List:Commits« Previous MessageNext Message »
From:Davi Arnaut Date:February 11 2009 12:16pm
Subject:bzr push into mysql-6.0-bugteam branch (davi:3040 to 3041)
View as plain text  
 3041 Davi Arnaut	2009-02-11 [merge]
      Merge from 6.0 main.
      removed:
        .bzr-mysql.moved/
        .bzr-mysql.moved/default.conf
        mysql-test/suite/ndb/r/ndb_discover_db2.result
        mysql-test/suite/ndb/t/ndb_discover_db2-master.opt
        mysql-test/suite/ndb/t/ndb_discover_db2.test
        mysql-test/suite/ndb/t/ndb_partition_error2-master.opt
        mysql-test/suite/ndb/t/ndb_restore_partition-master.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt
      added:
        mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result
        mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test
        mysql-test/suite/ndb_binlog/my.cnf
        mysql-test/suite/ndb_team/my.cnf
        mysql-test/suite/rpl_ndb_big/my.cnf
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt
        storage/ndb/src/mgmapi/mgmapi_error.c
        storage/ndb/test/run-test/conf-upgrade.cnf
        storage/ndb/test/run-test/upgrade-tests.txt
      modified:
        .bzrignore
        Makefile.am
        client/mysqldump.c
        client/mysqltest.cc
        configure.in
        extra/perror.c
        include/hash.h
        include/lf.h
        include/my_sys.h
        mysql-test/Makefile.am
        mysql-test/lib/v1/mysql-test-run.pl
        mysql-test/lib/v1/ndb_config_1_node.ini
        mysql-test/lib/v1/ndb_config_2_node.ini
        mysql-test/mysql-test-run.pl
        mysql-test/r/archive_aio_posix.result
        mysql-test/r/comment_index.result
        mysql-test/r/grant4.result
        mysql-test/r/implicit_commit.result
        mysql-test/r/partition_mgm.result
        mysql-test/r/subselect3.result
        mysql-test/r/subselect3_jcl6.result
        mysql-test/std_data/ndb_config_config.ini
        mysql-test/suite/ddl_lock/r/concurrent_ddl.result
        mysql-test/suite/ddl_lock/t/concurrent_ddl.test
        mysql-test/suite/ndb/my.cnf
        mysql-test/suite/ndb/r/bug36547.result
        mysql-test/suite/ndb/r/ndb_basic.result
        mysql-test/suite/ndb/r/ndb_config.result
        mysql-test/suite/ndb/r/ndb_dbug_lock.result
        mysql-test/suite/ndb/r/ndb_dd_ddl.result
        mysql-test/suite/ndb/r/ndb_discover_db.result
        mysql-test/suite/ndb/r/ndb_read_multi_range.result
        mysql-test/suite/ndb/t/bug36547.test
        mysql-test/suite/ndb/t/ndb_dbug_lock.test
        mysql-test/suite/ndb/t/ndb_dd_ddl.test
        mysql-test/suite/ndb/t/ndb_dd_dump.test
        mysql-test/suite/ndb/t/ndb_discover_db.test
        mysql-test/suite/ndb/t/ndb_read_multi_range.test
        mysql-test/suite/ndb/t/ndb_restore_partition.test
        mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result
        mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result
        mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test
        mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result
        mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test
        mysql-test/suite/parts/r/partition_auto_increment_ndb.result
        mysql-test/suite/rpl_ndb/my.cnf
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test
        mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test
        mysql-test/t/archive_aio_posix.test
        mysql-test/t/comment_index.test
        mysql-test/t/disabled.def
        mysql-test/t/grant4.test
        mysql-test/t/implicit_commit.test
        mysql-test/t/partition_mgm.test
        mysql-test/t/subselect3.test
        mysys/lf_hash.c
        mysys/my_error.c
        mysys/my_init.c
        mysys/my_safehash.c
        mysys/safemalloc.c
        mysys/thr_mutex.c
        scripts/mysql_system_tables.sql
        sql/backup/backup_aux.h
        sql/backup/backup_info.cc
        sql/ha_ndbcluster.cc
        sql/ha_ndbcluster_binlog.cc
        sql/handler.cc
        sql/handler.h
        sql/hash_filo.h
        sql/hostname.cc
        sql/item_create.cc
        sql/item_func.cc
        sql/log.cc
        sql/mdl.cc
        sql/mysqld.cc
        sql/repl_failsafe.cc
        sql/rpl_filter.cc
        sql/rpl_handler.cc
        sql/rpl_tblmap.cc
        sql/rpl_tblmap.h
        sql/set_var.cc
        sql/si_objects.cc
        sql/slave.cc
        sql/sp.cc
        sql/sp_cache.cc
        sql/sp_head.cc
        sql/sql_acl.cc
        sql/sql_base.cc
        sql/sql_cache.cc
        sql/sql_class.cc
        sql/sql_class.h
        sql/sql_connect.cc
        sql/sql_db.cc
        sql/sql_handler.cc
        sql/sql_lex.cc
        sql/sql_parse.cc
        sql/sql_partition.cc
        sql/sql_plugin.cc
        sql/sql_prepare.cc
        sql/sql_repl.cc
        sql/sql_select.cc
        sql/sql_servers.cc
        sql/sql_table.cc
        sql/sql_test.cc
        sql/sql_udf.cc
        sql/table.cc
        sql/transaction.cc
        sql/tztime.cc
        storage/archive/ha_archive.cc
        storage/blackhole/ha_blackhole.cc
        storage/csv/ha_tina.cc
        storage/example/ha_example.cc
        storage/federated/ha_federated.cc
        storage/innobase/handler/ha_innodb.cc
        storage/maria/ha_maria.cc
        storage/maria/ma_init.c
        storage/maria/ma_open.c
        storage/maria/ma_pagecache.c
        storage/maria/ma_recovery.c
        storage/maria/ma_recovery_util.c
        storage/maria/tablockman.c
        storage/myisam/ha_myisam.cc
        storage/myisam/mi_create.c
        storage/myisam/mi_log.c
        storage/myisam/mi_open.c
        storage/myisam/myisam_backup_engine.cc
        storage/ndb/include/mgmapi/mgmapi.h
        storage/ndb/include/mgmapi/mgmapi_config_parameters.h
        storage/ndb/include/mgmapi/mgmapi_error.h
        storage/ndb/include/mgmapi/ndb_logevent.h
        storage/ndb/include/ndbapi/NdbScanOperation.hpp
        storage/ndb/include/util/Bitmask.hpp
        storage/ndb/src/common/portlib/NdbThread.c
        storage/ndb/src/common/util/Bitmask.cpp
        storage/ndb/src/kernel/blocks/ERROR_codes.txt
        storage/ndb/src/kernel/blocks/backup/Backup.cpp
        storage/ndb/src/kernel/blocks/backup/Backup.hpp
        storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
        storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
        storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
        storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
        storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
        storage/ndb/src/kernel/blocks/lgman.cpp
        storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
        storage/ndb/src/kernel/vm/Configuration.cpp
        storage/ndb/src/mgmapi/Makefile.am
        storage/ndb/src/mgmapi/ndb_logevent.cpp
        storage/ndb/src/mgmsrv/MgmtSrvr.cpp
        storage/ndb/src/mgmsrv/MgmtSrvr.hpp
        storage/ndb/src/mgmsrv/Services.cpp
        storage/ndb/src/mgmsrv/Services.hpp
        storage/ndb/src/ndbapi/ClusterMgr.cpp
        storage/ndb/src/ndbapi/ClusterMgr.hpp
        storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
        storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
        storage/ndb/src/ndbapi/NdbScanOperation.cpp
        storage/ndb/src/ndbapi/Ndbif.cpp
        storage/ndb/src/ndbapi/TransporterFacade.hpp
        storage/ndb/src/ndbapi/ndberror.c
        storage/ndb/test/include/DbUtil.hpp
        storage/ndb/test/ndbapi/testBasic.cpp
        storage/ndb/test/ndbapi/testMgm.cpp
        storage/ndb/test/ndbapi/testNodeRestart.cpp
        storage/ndb/test/ndbapi/testScan.cpp
        storage/ndb/test/ndbapi/testUpgrade.cpp
        storage/ndb/test/run-test/Makefile.am
        storage/ndb/test/run-test/atrt-gather-result.sh
        storage/ndb/test/run-test/atrt.hpp
        storage/ndb/test/run-test/autotest-boot.sh
        storage/ndb/test/run-test/autotest-run.sh
        storage/ndb/test/run-test/command.cpp
        storage/ndb/test/run-test/daily-basic-tests.txt
        storage/ndb/test/run-test/db.cpp
        storage/ndb/test/run-test/files.cpp
        storage/ndb/test/run-test/main.cpp
        storage/ndb/test/run-test/setup.cpp
        storage/ndb/test/src/DbUtil.cpp
        storage/ndb/test/src/HugoTransactions.cpp
        storage/ndb/test/src/NDBT_Tables.cpp
        storage/ndb/test/tools/log_listner.cpp
        storage/ndb/tools/waiter.cpp

 3040 Vladislav Vaintroub	2009-02-11 [merge]
      merge
      removed:
        cmd-line-utils/libedit/TEST/
        cmd-line-utils/libedit/TEST/test.c
        cmd-line-utils/libedit/compat.h
        cmd-line-utils/libedit/compat_conf.h
        cmd-line-utils/libedit/editline.3
        cmd-line-utils/libedit/editrc.5
        cmd-line-utils/libedit/fgetln.c
        cmd-line-utils/libedit/fgetln.h
        cmd-line-utils/libedit/libedit_term.h
        cmd-line-utils/libedit/strlcpy.c
        cmd-line-utils/libedit/strlcpy.h
        cmd-line-utils/libedit/tokenizer.h
        cmd-line-utils/libedit/unvis.c
        cmd-line-utils/libedit/vis.c
        cmd-line-utils/libedit/vis.h
      added:
        cmd-line-utils/libedit/README
        cmd-line-utils/libedit/filecomplete.c
        cmd-line-utils/libedit/filecomplete.h
      modified:
        client/mysql.cc
        cmd-line-utils/libedit/Makefile.am
        cmd-line-utils/libedit/chared.c
        cmd-line-utils/libedit/chared.h
        cmd-line-utils/libedit/common.c
        cmd-line-utils/libedit/config.h
        cmd-line-utils/libedit/el.c
        cmd-line-utils/libedit/el.h
        cmd-line-utils/libedit/el_term.h
        cmd-line-utils/libedit/emacs.c
        cmd-line-utils/libedit/hist.c
        cmd-line-utils/libedit/histedit.h
        cmd-line-utils/libedit/history.c
        cmd-line-utils/libedit/key.c
        cmd-line-utils/libedit/key.h
        cmd-line-utils/libedit/makelist.sh
        cmd-line-utils/libedit/map.c
        cmd-line-utils/libedit/np/fgetln.c
        cmd-line-utils/libedit/np/strlcat.c
        cmd-line-utils/libedit/np/strlcpy.c
        cmd-line-utils/libedit/np/unvis.c
        cmd-line-utils/libedit/np/vis.c
        cmd-line-utils/libedit/np/vis.h
        cmd-line-utils/libedit/parse.c
        cmd-line-utils/libedit/parse.h
        cmd-line-utils/libedit/prompt.c
        cmd-line-utils/libedit/read.c
        cmd-line-utils/libedit/read.h
        cmd-line-utils/libedit/readline.c
        cmd-line-utils/libedit/readline/readline.h
        cmd-line-utils/libedit/refresh.c
        cmd-line-utils/libedit/search.c
        cmd-line-utils/libedit/sig.c
        cmd-line-utils/libedit/sig.h
        cmd-line-utils/libedit/sys.h
        cmd-line-utils/libedit/term.c
        cmd-line-utils/libedit/tokenizer.c
        cmd-line-utils/libedit/tty.c
        cmd-line-utils/libedit/tty.h
        cmd-line-utils/libedit/vi.c
        mysql-test/r/commit_1innodb.result
        mysql-test/r/mysqlbinlog_row_trans.result
        mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result
        mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result
        mysql-test/suite/binlog/r/binlog_truncate_myisam.result
        mysql-test/suite/binlog/t/binlog_truncate_innodb.test
        mysql-test/suite/binlog/t/binlog_truncate_myisam.test
        mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result
        mysql-test/suite/rpl/r/rpl_truncate_2myisam.result
        mysql-test/suite/rpl/r/rpl_truncate_3innodb.result
        mysql-test/suite/rpl/r/rpl_truncate_falcon.result
        mysql-test/suite/rpl/t/rpl_truncate_falcon.test
        mysys/thr_lock.c
        sql/lock.cc

=== removed directory '.bzr-mysql.moved'
=== removed file '.bzr-mysql.moved/default.conf'
--- a/.bzr-mysql.moved/default.conf	2008-05-23 00:08:03 +0000
+++ b/.bzr-mysql.moved/default.conf	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@
-[MYSQL]
-post_commit_to = commits@stripped
-tree_name = mysql-6.0

=== modified file '.bzrignore'
--- a/.bzrignore	2008-12-24 10:48:24 +0000
+++ b/.bzrignore	2009-02-05 12:49:39 +0000
@@ -760,6 +760,7 @@ mysql-test/dump.txt
 mysql-test/funcs_1-ps.log
 mysql-test/funcs_1.log
 mysql-test/funcs_1.tar
+mysql-test/gmon.out
 mysql-test/install_test_db
 mysql-test/lib/My/SafeProcess/my_safe_process
 mysql-test/lib/init_db.sql
@@ -2009,3 +2010,4 @@ libmysql/probes.h
 libmysql_r/probes.h
 unittest/tmp
 libmysqld/sql_join_cache.cc
+libmysqld/examples/mysqltest.cc

=== modified file 'Makefile.am'
--- a/Makefile.am	2009-02-07 15:47:14 +0000
+++ b/Makefile.am	2009-02-11 12:11:20 +0000
@@ -132,6 +132,10 @@ smoke:
 	cd mysql-test ; \
 	    @PERL@ ./mysql-test-run.pl --do-test=s
 
+smoke:
+	cd mysql-test ; \
+	    @PERL@ ./mysql-test-run.pl --do-test=s
+
 test-full:	test test-nr test-ps
 
 test-force:

=== modified file 'client/mysqldump.c'
--- a/client/mysqldump.c	2009-01-08 19:06:44 +0000
+++ b/client/mysqldump.c	2009-01-31 16:21:19 +0000
@@ -891,9 +891,9 @@ static int get_options(int *argc, char *
   load_defaults("my",load_default_groups,argc,argv);
   defaults_argv= *argv;
 
-  if (hash_init(&ignore_table, charset_info, 16, 0, 0,
-                (hash_get_key) get_table_key,
-                (hash_free_key) free_table_ent, 0))
+  if (my_hash_init(&ignore_table, charset_info, 16, 0, 0,
+                   (my_hash_get_key) get_table_key,
+                   (my_hash_free_key) free_table_ent, 0))
     return(EX_EOM);
   /* Don't copy internal log tables */
   if (my_hash_insert(&ignore_table,
@@ -1463,8 +1463,8 @@ static void free_resources()
   if (md_result_file && md_result_file != stdout)
     my_fclose(md_result_file, MYF(0));
   my_free(opt_password, MYF(MY_ALLOW_ZERO_PTR));
-  if (hash_inited(&ignore_table))
-    hash_free(&ignore_table);
+  if (my_hash_inited(&ignore_table))
+    my_hash_free(&ignore_table);
   if (extended_insert)
     dynstr_free(&extended_row);
   if (insert_pat_inited)
@@ -4023,7 +4023,7 @@ static int init_dumping(char *database, 
 
 my_bool include_table(const uchar *hash_key, size_t len)
 {
-  return !hash_search(&ignore_table, hash_key, len);
+  return ! my_hash_search(&ignore_table, hash_key, len);
 }
 
 

=== modified file 'client/mysqltest.cc'
--- a/client/mysqltest.cc	2009-02-07 16:00:57 +0000
+++ b/client/mysqltest.cc	2009-02-11 12:11:20 +0000
@@ -1108,7 +1108,7 @@ void free_used_memory()
 
   close_connections();
   close_files();
-  hash_free(&var_hash);
+  my_hash_free(&var_hash);
 
   for (i= 0 ; i < q_lines.elements ; i++)
   {
@@ -1965,8 +1965,8 @@ VAR* var_get(const char *var_name, const
     if (length >= MAX_VAR_NAME_LENGTH)
       die("Too long variable name: %s", save_var_name);
 
-    if (!(v = (VAR*) hash_search(&var_hash, (const uchar*) save_var_name,
-                                            length)))
+    if (!(v = (VAR*) my_hash_search(&var_hash, (const uchar*) save_var_name,
+                                    length)))
     {
       char buff[MAX_VAR_NAME_LENGTH+1];
       strmake(buff, save_var_name, length);
@@ -1997,7 +1997,7 @@ err:
 VAR *var_obtain(const char *name, int len)
 {
   VAR* v;
-  if ((v = (VAR*)hash_search(&var_hash, (const uchar *) name, len)))
+  if ((v = (VAR*)my_hash_search(&var_hash, (const uchar *) name, len)))
     return v;
   v = var_init(0, name, len, "", 0);
   my_hash_insert(&var_hash, (uchar*)v);
@@ -7536,8 +7536,8 @@ int main(int argc, char **argv)
 
   my_init_dynamic_array(&q_lines, sizeof(struct st_command*), 1024, 1024);
 
-  if (hash_init(&var_hash, charset_info,
-                1024, 0, 0, get_var_key, var_free, MYF(0)))
+  if (my_hash_init(&var_hash, charset_info,
+                   1024, 0, 0, get_var_key, var_free, MYF(0)))
     die("Variable hash initialization failed");
 
   var_set_string("$MYSQL_SERVER_VERSION", MYSQL_SERVER_VERSION);

=== modified file 'configure.in'
--- a/configure.in	2009-02-07 16:00:57 +0000
+++ b/configure.in	2009-02-11 12:11:20 +0000
@@ -19,7 +19,7 @@ AC_CONFIG_HEADERS([include/config.h])
 NDB_VERSION_MAJOR=6
 NDB_VERSION_MINOR=2
 NDB_VERSION_BUILD=17
-NDB_VERSION_STATUS="-GA"
+NDB_VERSION_STATUS="-alpha"
 
 PROTOCOL_VERSION=10
 DOT_FRM_VERSION=6

=== modified file 'extra/perror.c'
--- a/extra/perror.c	2008-11-27 13:36:48 +0000
+++ b/extra/perror.c	2009-01-08 11:57:59 +0000
@@ -26,6 +26,7 @@
 #include "../storage/ndb/src/ndbapi/ndberror.c"
 #include "../storage/ndb/src/kernel/error/ndbd_exit_codes.c"
 #include "../storage/ndb/include/mgmapi/mgmapi_error.h"
+#include "../storage/ndb/src/mgmapi/mgmapi_error.c"
 #endif
 
 static my_bool verbose, print_all_codes;

=== modified file 'include/hash.h'
--- a/include/hash.h	2008-12-13 20:48:00 +0000
+++ b/include/hash.h	2009-01-27 02:08:48 +0000
@@ -22,40 +22,6 @@ extern "C" {
 #endif
 
 /*
-  There was a problem on MacOSX with a shared object ha_example.so.
-  It used hash_search(). During build of ha_example.so no libmysys
-  was specified. Since MacOSX had a hash_search() in the system
-  library, it built the shared object so that the dynamic linker
-  linked hash_search() to the system library, which caused a crash
-  when called. To come around this, we renamed hash_search() to
-  my_hash_search(), as we did long ago with hash_insert() and
-  hash_reset(). However, this time we made the move complete with
-  all names. To keep compatibility, we redefine the old names.
-  Since every C and C++ file, that uses HASH, needs to include
-  this file, the change is complete. Both names could be used
-  in the code, but the my_* versions are recommended now.
-*/
-#define hash_get_key    my_hash_get_key
-#define hash_free_key   my_hash_free_key
-#define hash_init       my_hash_init
-#define hash_init2      my_hash_init2
-#define _hash_init      _my_hash_init
-#define hash_free       my_hash_free
-#define hash_reset      my_hash_reset
-#define hash_element    my_hash_element
-#define hash_search     my_hash_search
-#define hash_first      my_hash_first
-#define hash_next       my_hash_next
-#define hash_insert     my_hash_insert
-#define hash_delete     my_hash_delete
-#define hash_update     my_hash_update
-#define hash_replace    my_hash_replace
-#define hash_check      my_hash_check
-#define hash_clear      my_hash_clear
-#define hash_inited     my_hash_inited
-#define hash_init_opt   my_hash_init_opt
-
-/*
   Overhead to store an element in hash
   Can be used to approximate memory consumption for a hash
  */

=== modified file 'include/lf.h'
--- a/include/lf.h	2008-07-29 14:10:24 +0000
+++ b/include/lf.h	2009-01-27 02:08:48 +0000
@@ -224,7 +224,7 @@ lock_wrap(lf_alloc_new, void *,
 typedef struct {
   LF_DYNARRAY array;                    /* hash itself */
   LF_ALLOCATOR alloc;                   /* allocator for elements */
-  hash_get_key get_key;                 /* see HASH */
+  my_hash_get_key get_key;              /* see HASH */
   CHARSET_INFO *charset;                /* see HASH */
   uint key_offset, key_length;          /* see HASH */
   uint element_size;                    /* size of memcpy'ed area on insert */
@@ -234,7 +234,7 @@ typedef struct {
 } LF_HASH;
 
 void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
-                  uint key_offset, uint key_length, hash_get_key get_key,
+                  uint key_offset, uint key_length, my_hash_get_key get_key,
                   CHARSET_INFO *charset);
 void lf_hash_destroy(LF_HASH *hash);
 int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data);

=== modified file 'include/my_sys.h'
--- a/include/my_sys.h	2009-02-05 06:27:55 +0000
+++ b/include/my_sys.h	2009-02-11 12:11:20 +0000
@@ -43,6 +43,17 @@ extern int NEAR my_errno;		/* Last error
 #define MYSYS_PROGRAM_DONT_USE_CURSES()  { error_handler_hook = my_message_no_curses; mysys_uses_curses=0;}
 #define MY_INIT(name);		{ my_progname= name; my_init(); }
 
+/**
+  Max length of an error message generated by mysys utilities.
+  Some mysys functions produce error messages. These mostly go
+  to stderr.
+  This constant defines the size of the buffer used to format
+  the message. It should be kept in sync with MYSQL_ERRMSG_SIZE,
+  since sometimes mysys errors are stored in the server diagnostics
+  area, and we would like to avoid unexpected truncation.
+*/
+#define MYSYS_ERRMSG_SIZE   (512)
+
 #define MY_FILE_ERROR	((size_t) -1)
 
 	/* General bitmaps for my_func's */

=== modified file 'mysql-test/Makefile.am'
--- a/mysql-test/Makefile.am	2009-02-07 15:47:14 +0000
+++ b/mysql-test/Makefile.am	2009-02-11 12:11:20 +0000
@@ -95,12 +95,14 @@ TEST_DIRS = t r include std_data std_dat
 	suite/funcs_2/t \
 	suite/jp suite/jp/t suite/jp/r suite/jp/std_data \
 	suite/manual/t suite/manual/r \
-	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \
 	suite/rpl suite/rpl/data suite/rpl/include suite/rpl/r \
 	suite/rpl/t \
 	suite/stress/include suite/stress/t suite/stress/r \
 	suite/ndb suite/ndb/t suite/ndb/r \
+	suite/ndb_binlog suite/ndb_binlog/t suite/ndb_binlog/r \
+	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \
 	suite/rpl_ndb suite/rpl_ndb/t suite/rpl_ndb/r \
+	suite/rpl_ndb_big suite/rpl_ndb_big/t suite/rpl_ndb_big/r \
 	suite/falcon suite/falcon/t suite/falcon/r \
 	suite/falcon_team suite/falcon_team/t suite/falcon_team/r \
 	suite/parts suite/parts/t suite/parts/r suite/parts/inc

=== modified file 'mysql-test/lib/v1/mysql-test-run.pl'
--- a/mysql-test/lib/v1/mysql-test-run.pl	2008-11-14 08:45:32 +0000
+++ b/mysql-test/lib/v1/mysql-test-run.pl	2009-02-01 21:05:19 +0000
@@ -135,7 +135,7 @@ our $default_vardir;
 
 our $opt_usage;
 our $opt_suites;
-our $opt_suites_default= "main,binlog,rpl,rpl_ndb,ndb"; # Default suites to run
+our $opt_suites_default= "ndb,ndb_binlog,rpl_ndb,main,binlog,rpl"; # Default suites to run
 our $opt_script_debug= 0;  # Script debugging, enable with --script-debug
 our $opt_verbose= 0;  # Verbose output, enable with --verbose
 
@@ -410,12 +410,13 @@ sub main () {
       # Check for any extra suites to enable based on the path name
       my %extra_suites=
 	(
-	 "mysql-5.1-new-ndb"              => "ndb_team",
-	 "mysql-5.1-new-ndb-merge"        => "ndb_team",
-	 "mysql-5.1-telco-6.2"            => "ndb_team",
-	 "mysql-5.1-telco-6.2-merge"      => "ndb_team",
-	 "mysql-5.1-telco-6.3"            => "ndb_team",
-	 "mysql-6.0-ndb"                  => "ndb_team",
+	 "bzr_mysql-5.1-ndb"                  => "ndb_team",
+	 "bzr_mysql-5.1-ndb-merge"            => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.2"            => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.2-merge"      => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.3"            => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.4"            => "ndb_team",
+	 "bzr_mysql-6.0-ndb"                  => "ndb_team,rpl_ndb_big",
 	);
 
       foreach my $dir ( reverse splitdir($glob_basedir) )
@@ -1577,16 +1578,22 @@ sub executable_setup_ndb () {
 				"$glob_basedir/storage/ndb",
 				"$glob_basedir/bin");
 
+  # Some might be found in sbin, not bin.
+  my $daemon_path= mtr_file_exists("$glob_basedir/ndb",
+				   "$glob_basedir/storage/ndb",
+				   "$glob_basedir/sbin",
+				   "$glob_basedir/bin");
+
   $exe_ndbd=
     mtr_exe_maybe_exists("$ndb_path/src/kernel/ndbd",
-			 "$ndb_path/ndbd",
+			 "$daemon_path/ndbd",
 			 "$glob_basedir/libexec/ndbd");
   $exe_ndb_mgm=
     mtr_exe_maybe_exists("$ndb_path/src/mgmclient/ndb_mgm",
 			 "$ndb_path/ndb_mgm");
   $exe_ndb_mgmd=
     mtr_exe_maybe_exists("$ndb_path/src/mgmsrv/ndb_mgmd",
-			 "$ndb_path/ndb_mgmd",
+			 "$daemon_path/ndb_mgmd",
 			 "$glob_basedir/libexec/ndb_mgmd");
   $exe_ndb_waiter=
     mtr_exe_maybe_exists("$ndb_path/tools/ndb_waiter",
@@ -2814,7 +2821,7 @@ sub ndbd_start ($$$) {
   mtr_add_arg($args, "$extra_args");
 
   my $nodeid= $cluster->{'ndbds'}->[$idx]->{'nodeid'};
-  my $path_ndbd_log= "$cluster->{'data_dir'}/ndb_${nodeid}.log";
+  my $path_ndbd_log= "$cluster->{'data_dir'}/ndb_${nodeid}_out.log";
   $pid= mtr_spawn($exe_ndbd, $args, "",
 		  $path_ndbd_log,
 		  $path_ndbd_log,
@@ -3973,9 +3980,12 @@ sub mysqld_arguments ($$$$) {
       mtr_add_arg($args, "%s--ndbcluster", $prefix);
       mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
 		  $cluster->{'connect_string'});
+      mtr_add_arg($args, "%s--ndb-wait-connected=20", $prefix);
+      mtr_add_arg($args, "%s--ndb-cluster-connection-pool=3", $prefix);
+      mtr_add_arg($args, "%s--slave-allow-batching", $prefix);
       if ( $mysql_version_id >= 50100 )
       {
-	mtr_add_arg($args, "%s--ndb-extra-logging", $prefix);
+	mtr_add_arg($args, "%s--ndb-log-orig", $prefix);
       }
     }
     else
@@ -4046,10 +4056,12 @@ sub mysqld_arguments ($$$$) {
       mtr_add_arg($args, "%s--ndbcluster", $prefix);
       mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
 		  $cluster->{'connect_string'});
-
+      mtr_add_arg($args, "%s--ndb-wait-connected=20", $prefix);
+      mtr_add_arg($args, "%s--ndb-cluster-connection-pool=3", $prefix);
+      mtr_add_arg($args, "%s--slave-allow-batching", $prefix);
       if ( $mysql_version_id >= 50100 )
       {
-	mtr_add_arg($args, "%s--ndb-extra-logging", $prefix);
+	mtr_add_arg($args, "%s--ndb-log-orig", $prefix);
       }
     }
     else
@@ -4310,6 +4322,7 @@ sub stop_all_servers () {
   {
     rm_ndbcluster_tables($mysqld->{'path_myddir'});
   }
+
 }
 
 
@@ -4619,22 +4632,6 @@ sub run_testcase_start_servers($) {
 	 $tinfo->{'master_num'} > 1 )
     {
       # Test needs cluster, start an extra mysqld connected to cluster
-
-      if ( $mysql_version_id >= 50100 )
-      {
-	# First wait for first mysql server to have created ndb system
-	# tables ok FIXME This is a workaround so that only one mysqld
-	# create the tables
-	if ( ! sleep_until_file_created(
-		  "$master->[0]->{'path_myddir'}/mysql/ndb_apply_status.ndb",
-					$master->[0]->{'start_timeout'},
-					$master->[0]->{'pid'}))
-	{
-
-	  $tinfo->{'comment'}= "Failed to create 'mysql/ndb_apply_status' table";
-	  return 1;
-	}
-      }
       mysqld_start($master->[1],$tinfo->{'master_opt'},[]);
     }
 

=== modified file 'mysql-test/lib/v1/ndb_config_1_node.ini'
--- a/mysql-test/lib/v1/ndb_config_1_node.ini	2008-11-14 08:45:32 +0000
+++ b/mysql-test/lib/v1/ndb_config_1_node.ini	2009-02-01 21:05:19 +0000
@@ -10,6 +10,7 @@ DataDir= CHOOSE_FILESYSTEM
 MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
 MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
 TimeBetweenGlobalCheckpoints= 500
+TimeBetweenEpochs=0
 NoOfFragmentLogFiles= 8
 FragmentLogFileSize= 6M
 DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory 

=== modified file 'mysql-test/lib/v1/ndb_config_2_node.ini'
--- a/mysql-test/lib/v1/ndb_config_2_node.ini	2008-11-14 08:45:32 +0000
+++ b/mysql-test/lib/v1/ndb_config_2_node.ini	2009-02-01 21:05:19 +0000
@@ -1,6 +1,6 @@
 [ndbd default]
 NoOfReplicas= 2
-MaxNoOfConcurrentTransactions= 64
+MaxNoOfConcurrentTransactions= 2048
 MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
 DataMemory= CHOOSE_DataMemory
 IndexMemory= CHOOSE_IndexMemory
@@ -9,7 +9,8 @@ TimeBetweenWatchDogCheck= 30000
 DataDir= CHOOSE_FILESYSTEM
 MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
 MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
-TimeBetweenGlobalCheckpoints= 500
+TimeBetweenGlobalCheckpoints= 3000
+TimeBetweenEpochs=100
 NoOfFragmentLogFiles= 4
 FragmentLogFileSize=12M
 DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
@@ -53,3 +54,16 @@ PortNumber= CHOOSE_PORT_MGM
 [mysqld]
 
 [mysqld]
+NodeId: 63
+
+[mysqld]
+NodeId: 127
+
+[mysqld]
+NodeId: 192
+
+[mysqld]
+NodeId: 228
+
+[mysqld]
+NodeId: 255

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2009-02-10 10:26:04 +0000
+++ b/mysql-test/mysql-test-run.pl	2009-02-11 12:11:20 +0000
@@ -259,17 +259,18 @@ sub main {
     # Check for any extra suites to enable based on the path name
     my %extra_suites=
       (
-       "mysql-5.1-new-ndb"              => "ndb_team",
-       "mysql-5.1-new-ndb-merge"        => "ndb_team",
-       "mysql-5.1-telco-6.2"            => "ndb_team",
-       "mysql-5.1-telco-6.2-merge"      => "ndb_team",
-       "mysql-5.1-telco-6.3"            => "ndb_team",
-       "mysql-6.0-ndb"                  => "ndb_team",
-       "mysql-6.0-falcon"               => "falcon_team",
-       "mysql-6.0-falcon-team"          => "falcon_team",
-       "mysql-6.0-falcon-wlad"          => "falcon_team",
-       "mysql-6.0-falcon-chris"         => "falcon_team",
-       "mysql-6.0-falcon-kevin"         => "falcon_team",
+       "bzr_mysql-5.1-ndb"              => "ndb_team",
+       "bzr_mysql-5.1-ndb-merge"        => "ndb_team",
+       "bzr_mysql-5.1-telco-6.2"        => "ndb_team",
+       "bzr_mysql-5.1-telco-6.2-merge"  => "ndb_team",
+       "bzr_mysql-5.1-telco-6.3"        => "ndb_team",
+       "bzr_mysql-5.1-telco-6.4"        => "ndb_team",
+       "bzr_mysql-6.0-ndb"              => "ndb_team,rpl_ndb_big,ndb_binlog",
+       "bzr_mysql-6.0-falcon"           => "falcon_team",
+       "bzr_mysql-6.0-falcon-team"      => "falcon_team",
+       "bzr_mysql-6.0-falcon-wlad"      => "falcon_team",
+       "bzr_mysql-6.0-falcon-chris"     => "falcon_team",
+       "bzr_mysql-6.0-falcon-kevin"     => "falcon_team",
       );
 
     foreach my $dir ( reverse splitdir($basedir) ) {

=== modified file 'mysql-test/r/archive_aio_posix.result'
--- a/mysql-test/r/archive_aio_posix.result	2007-12-13 12:55:04 +0000
+++ b/mysql-test/r/archive_aio_posix.result	2009-02-04 22:22:32 +0000
@@ -12686,3 +12686,5 @@ check table t1 extended;
 Table	Op	Msg_type	Msg_text
 test.t1	check	status	OK
 drop table t1;
+# Test file cleanup
+SET GLOBAL archive_aio=off;

=== modified file 'mysql-test/r/comment_index.result'
--- a/mysql-test/r/comment_index.result	2007-08-28 11:04:35 +0000
+++ b/mysql-test/r/comment_index.result	2009-02-04 22:22:32 +0000
@@ -483,3 +483,4 @@ t1	CREATE TABLE `t1` (
   KEY `i11` (`c11`) COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij
 abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcd',
   KEY `c13` (`c13`) COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij
 abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcd'
 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 COMMENT='ABCDEFGHIJabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghija
 bcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcd'
+DROP TABLE t1;

=== modified file 'mysql-test/r/grant4.result'
--- a/mysql-test/r/grant4.result	2008-03-04 17:35:42 +0000
+++ b/mysql-test/r/grant4.result	2009-02-04 22:22:32 +0000
@@ -120,3 +120,4 @@ SHOW CREATE VIEW v3;
 View	Create View	character_set_client	collation_connection
 v3	CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select `t_select_priv`.`a` AS `a`,`t_select_priv`.`b` AS `b` from `t_select_priv`	latin1	latin1_swedish_ci
 drop database mysqltest_db1;
+drop user mysqltest_u1@localhost;

=== modified file 'mysql-test/r/implicit_commit.result'
--- a/mysql-test/r/implicit_commit.result	2009-01-18 23:21:43 +0000
+++ b/mysql-test/r/implicit_commit.result	2009-02-04 22:22:32 +0000
@@ -447,6 +447,7 @@ revoke all on test.t1 from mysqltest_2@l
 CALL db1.test_if_commit();
 IMPLICIT COMMIT
 YES
+drop user mysqltest_2@localhost;
 #
 # SQLCOM_SHOW_GRANTS
 #

=== modified file 'mysql-test/r/partition_mgm.result'
--- a/mysql-test/r/partition_mgm.result	2008-12-10 08:06:58 +0000
+++ b/mysql-test/r/partition_mgm.result	2009-02-04 14:48:13 +0000
@@ -5,6 +5,11 @@ PARTITION BY HASH (a)
 PARTITIONS 1;
 INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
 ALTER TABLE t1 REORGANIZE PARTITION;
+ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
+ALTER ONLINE TABLE t1 REORGANIZE PARTITION;
+ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
+ALTER OFFLINE TABLE t1 REORGANIZE PARTITION;
+ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
 DROP TABLE t1;
 create table t1 (a int)
 partition by range (a)

=== modified file 'mysql-test/r/subselect3.result'
--- a/mysql-test/r/subselect3.result	2009-01-30 14:13:39 +0000
+++ b/mysql-test/r/subselect3.result	2009-02-04 11:27:06 +0000
@@ -1219,6 +1219,21 @@ id	select_type	table	type	possible_keys	
 1	PRIMARY	Z	ALL	NULL	NULL	NULL	NULL	6	End materialize; Using join buffer
 drop table t0,t1,t2;
 
+BUG#37842: Assertion in DsMrr_impl::dsmrr_init, at handler.cc:4307
+
+CREATE TABLE t1 (
+`pk` int(11) NOT NULL AUTO_INCREMENT,
+`int_key` int(11) DEFAULT NULL,
+PRIMARY KEY (`pk`),
+KEY `int_key` (`int_key`)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,9),(2,3),(3,8),(4,6),(5,9),(6,5),(7,5),(8,9),(9,1),(10,10);
+SELECT `pk` FROM t1 AS OUTR WHERE `int_key` = ALL (
+SELECT `int_key` FROM t1 AS INNR WHERE INNR . `pk` >= 9
+);
+pk
+DROP TABLE t1;
+
 BUG#40118 Crash when running Batched Key Access and requiring one match for each key
 
 create table t0(a int);

=== modified file 'mysql-test/r/subselect3_jcl6.result'
--- a/mysql-test/r/subselect3_jcl6.result	2009-02-03 09:16:53 +0000
+++ b/mysql-test/r/subselect3_jcl6.result	2009-02-04 11:27:06 +0000
@@ -1224,6 +1224,21 @@ id	select_type	table	type	possible_keys	
 1	PRIMARY	Z	ALL	NULL	NULL	NULL	NULL	6	End materialize; Using join buffer
 drop table t0,t1,t2;
 
+BUG#37842: Assertion in DsMrr_impl::dsmrr_init, at handler.cc:4307
+
+CREATE TABLE t1 (
+`pk` int(11) NOT NULL AUTO_INCREMENT,
+`int_key` int(11) DEFAULT NULL,
+PRIMARY KEY (`pk`),
+KEY `int_key` (`int_key`)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,9),(2,3),(3,8),(4,6),(5,9),(6,5),(7,5),(8,9),(9,1),(10,10);
+SELECT `pk` FROM t1 AS OUTR WHERE `int_key` = ALL (
+SELECT `int_key` FROM t1 AS INNR WHERE INNR . `pk` >= 9
+);
+pk
+DROP TABLE t1;
+
 BUG#40118 Crash when running Batched Key Access and requiring one match for each key
 
 create table t0(a int);

=== modified file 'mysql-test/std_data/ndb_config_config.ini'
--- a/mysql-test/std_data/ndb_config_config.ini	2008-04-25 10:34:28 +0000
+++ b/mysql-test/std_data/ndb_config_config.ini	2009-02-02 15:58:48 +0000
@@ -66,4 +66,3 @@ NodeId: 228
 
 [mysqld]
 NodeId: 255
-

=== modified file 'mysql-test/suite/ddl_lock/r/concurrent_ddl.result'
--- a/mysql-test/suite/ddl_lock/r/concurrent_ddl.result	2008-12-17 10:11:14 +0000
+++ b/mysql-test/suite/ddl_lock/r/concurrent_ddl.result	2009-02-04 09:11:18 +0000
@@ -311,14 +311,15 @@ DROP TABLE t4;
 ##
 CREATE TABLE t1(a INT, b CHAR(100))
 ENGINE=<engine_type>;
+INSERT INTO t1 (a, b) VALUES (1, 'one');
 # Switch to connection locker
-SET DEBUG_SYNC= 'after_lock_tables_takes_lock
-                 SIGNAL locked WAIT_FOR do_unlock';
+SET DEBUG_SYNC= 'locked_table_name
+                 SIGNAL locked WAIT_FOR do_unlock HIT_LIMIT 2';
 # "send" next statement
 ALTER TABLE t1 RENAME t2;
 # Switch to connection waiter
 SET DEBUG_SYNC= 'now WAIT_FOR locked';
-SET DEBUG_SYNC= 'after_start_ddl SIGNAL do_unlock';
+SET DEBUG_SYNC= 'mdl_enter_cond SIGNAL do_unlock';
 # "send" next statement
 ALTER TABLE t2 RENAME t3;
 # Switch to connection locker
@@ -328,10 +329,10 @@ ALTER TABLE t2 RENAME t3;
 # Switch to connection default
 SELECT SUM(a) FROM t3;
 SUM(a)
-NULL
+1
 SELECT COUNT(*) FROM t3;
 COUNT(*)
-0
+1
 SET DEBUG_SYNC= 'RESET';
 DROP TABLE t3;
 ##

=== modified file 'mysql-test/suite/ddl_lock/t/concurrent_ddl.test'
--- a/mysql-test/suite/ddl_lock/t/concurrent_ddl.test	2008-12-17 10:11:14 +0000
+++ b/mysql-test/suite/ddl_lock/t/concurrent_ddl.test	2009-02-04 09:11:18 +0000
@@ -384,10 +384,12 @@ eval
 CREATE TABLE t1(a INT, b CHAR(100))
 ENGINE=$engine_type;
 
+INSERT INTO t1 (a, b) VALUES (1, 'one');
+
 --echo # Switch to connection locker
 connection locker;
-SET DEBUG_SYNC= 'after_lock_tables_takes_lock
-                 SIGNAL locked WAIT_FOR do_unlock';
+SET DEBUG_SYNC= 'locked_table_name
+                 SIGNAL locked WAIT_FOR do_unlock HIT_LIMIT 2';
 --echo # "send" next statement
 send
 ALTER TABLE t1 RENAME t2;
@@ -395,7 +397,7 @@ ALTER TABLE t1 RENAME t2;
 --echo # Switch to connection waiter
 connection waiter;
 SET DEBUG_SYNC= 'now WAIT_FOR locked';
-SET DEBUG_SYNC= 'after_start_ddl SIGNAL do_unlock';
+SET DEBUG_SYNC= 'mdl_enter_cond SIGNAL do_unlock';
 --echo # "send" next statement
 send
 ALTER TABLE t2 RENAME t3;

=== modified file 'mysql-test/suite/ndb/my.cnf'
--- a/mysql-test/suite/ndb/my.cnf	2008-05-09 15:28:34 +0000
+++ b/mysql-test/suite/ndb/my.cnf	2009-02-02 15:58:48 +0000
@@ -6,7 +6,22 @@ NoOfReplicas=                  2
 ndbd=,
 ndb_mgmd=
 mysqld=,
-ndbapi=,,
+ndbapi=,,,,,,,,,,,
+
+[cluster_config.ndbapi.8.1]
+NodeId=63
+
+[cluster_config.ndbapi.9.1]
+NodeId=127
+
+[cluster_config.ndbapi.10.1]
+NodeId=192
+
+[cluster_config.ndbapi.11.1]
+NodeId=228
+
+[cluster_config.ndbapi.12.1]
+NodeId=255
 
 [mysqld]
 # Make all mysqlds use cluster
@@ -14,11 +29,9 @@ ndbcluster
 
 # Time to wait for NDB connection before
 # accepting connections client connections
-ndb-wait-connected=            20
-
-ndb-extra-logging
+ndb-wait-connected=20
 
-#ndb-cluster-connection-pool=  3
+ndb-cluster-connection-pool=3
 
 [ENV]
 NDB_CONNECTSTRING=             @mysql_cluster.1.ndb_connectstring
@@ -26,16 +39,3 @@ MASTER_MYPORT=                 @mysqld.1
 MASTER_MYPORT1=                @mysqld.2.1.port
 
 NDB_BACKUP_DIR=                @cluster_config.ndbd.1.1.BackupDataDir
-
-
-# Give the second mysqld hardcoded NodeId
-[cluster_config.mysqld.2.1]
-NodeId=192
-
-# Set hardccoded NodeId's alos on ndbapi nodes
-[cluster_config.ndbapi.2.1]
-NodeId=228
-
-[cluster_config.ndbapi.3.1]
-NodeId=255
-

=== modified file 'mysql-test/suite/ndb/r/bug36547.result'
--- a/mysql-test/suite/ndb/r/bug36547.result	2008-05-07 14:43:32 +0000
+++ b/mysql-test/suite/ndb/r/bug36547.result	2009-02-02 06:45:57 +0000
@@ -2,11 +2,11 @@ SET NDB_EXTRA_LOGGING=1;
 ERROR HY000: Variable 'ndb_extra_logging' is a GLOBAL variable and should be set with SET GLOBAL
 SET @SAVE_NDB_EXTRA_LOGGING= @@NDB_EXTRA_LOGGING;
 SET GLOBAL NDB_EXTRA_LOGGING=1;
-SHOW VARIABLES LIKE 'ndb_extra%';
+SHOW VARIABLES LIKE 'ndb_extra_logging';
 Variable_name	Value
 ndb_extra_logging	1
 SET GLOBAL NDB_EXTRA_LOGGING=0;
-SHOW VARIABLES LIKE 'ndb_extra%';
+SHOW VARIABLES LIKE 'ndb_extra_logging';
 Variable_name	Value
 ndb_extra_logging	0
-SET @GLOBAL.NDB_EXTRA_LOGGGING= @SAVE_NDB_EXTRA_LOGGING;
+SET @@GLOBAL.NDB_EXTRA_LOGGING= @SAVE_NDB_EXTRA_LOGGING;

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2008-10-15 12:14:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2009-01-23 11:03:00 +0000
@@ -24,6 +24,7 @@ ndb_force_send	#
 ndb_index_stat_cache_entries	#
 ndb_index_stat_enable	#
 ndb_index_stat_update_freq	#
+ndb_log_binlog_index	#
 ndb_report_thresh_binlog_epoch_slip	#
 ndb_report_thresh_binlog_mem_usage	#
 ndb_use_copying_alter_table	#

=== modified file 'mysql-test/suite/ndb/r/ndb_config.result'
--- a/mysql-test/suite/ndb/r/ndb_config.result	2008-04-25 10:34:28 +0000
+++ b/mysql-test/suite/ndb/r/ndb_config.result	2009-02-02 16:02:58 +0000
@@ -1,5 +1,5 @@
 == 1 ==
-ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4,localhost mysqld,192,localhost mysqld,193,localhost mysqld,228,localhost mysqld,255,localhost
+ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4,localhost mysqld,5,localhost mysqld,6,localhost mysqld,7,localhost mysqld,8,localhost mysqld,9,localhost mysqld,10,localhost mysqld,11,localhost mysqld,12,localhost mysqld,63,localhost mysqld,127,localhost mysqld,192,localhost mysqld,228,localhost mysqld,255,localhost
 == 2 ==
 1,localhost,20971520,1048576 2,localhost,20971520,1048576
 == 3 ==

=== modified file 'mysql-test/suite/ndb/r/ndb_dbug_lock.result'
--- a/mysql-test/suite/ndb/r/ndb_dbug_lock.result	2008-10-29 13:09:15 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dbug_lock.result	2009-01-23 09:40:08 +0000
@@ -40,5 +40,5 @@ t1	CREATE TABLE `t1` (
   PRIMARY KEY (`a`)
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
 # Cleanup
-set session debug="-d,sleep_after_global_schema_lock";
+set session debug="-d,";
 drop table t1;

=== modified file 'mysql-test/suite/ndb/r/ndb_dd_ddl.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_ddl.result	2008-10-17 08:37:23 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_ddl.result	2009-01-28 15:06:33 +0000
@@ -236,80 +236,3 @@ engine ndb;
 ERROR HY000: Failed to drop TABLESPACE
 drop logfile group lg1
 engine ndb;
-
-# -----------------------------------------------------------------
-# End 5.1 test
-# -----------------------------------------------------------------
-
-# --
-# -- WL#4300: Define privileges for tablespaces.
-# --
-GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
-
-DROP DATABASE IF EXISTS mysqltest2;
-CREATE DATABASE mysqltest2;
-GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
-
-# -- Connection: mysqltest_u1@localhost
-
-# -- Grants for mysqltest_u1@localhost:
-SHOW GRANTS;
-Grants for mysqltest_u1@localhost
-GRANT CREATE TABLESPACE ON *.* TO 'mysqltest_u1'@'localhost'
-
-# -- Check CREATE LOGFILE GROUP...
-CREATE LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 1M
-UNDO_BUFFER_SIZE = 1M
-ENGINE = NDB;
-
-# -- Check ALTER LOGFILE GROUP...
-ALTER LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
-# -- Check CREATE TABLESPACE...
-CREATE TABLESPACE ts1
-ADD DATAFILE 'datafile.dat'
-USE LOGFILE GROUP lg1
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
-# -- Check ALTER TABLESPACE...
-ALTER TABLESPACE ts1
-DROP DATAFILE 'datafile.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
-# -- Connection: mysqltest_u2@localhost
-
-# -- Grants for mysqltest_u2@localhost:
-SHOW GRANTS;
-Grants for mysqltest_u2@localhost
-GRANT USAGE ON *.* TO 'mysqltest_u2'@'localhost'
-GRANT ALL PRIVILEGES ON `mysqltest2`.* TO 'mysqltest_u2'@'localhost'
-CREATE TABLE t1(c INT) TABLESPACE ts1;
-DROP TABLE t1;
-
-# -- Connection: mysqltest_u1@localhost
-
-
-# -- Check DROP TABLESPACE...
-DROP TABLESPACE ts1 
-ENGINE = NDB;
-
-# -- Check DROP LOGFILE GROUP...
-DROP LOGFILE GROUP lg1 
-ENGINE = NDB;
-
-# -- Connection: root@localhost
-
-DROP USER mysqltest_u1@localhost;
-DROP USER mysqltest_u2@localhost;
-DROP DATABASE mysqltest2;
-
-# -----------------------------------------------------------------
-# End 6.0 test
-# -----------------------------------------------------------------

=== added file 'mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result	2009-01-28 15:06:33 +0000
@@ -0,0 +1,73 @@
+
+# --
+# -- WL#4300: Define privileges for tablespaces.
+# --
+GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
+
+DROP DATABASE IF EXISTS mysqltest2;
+CREATE DATABASE mysqltest2;
+GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
+
+# -- Connection: mysqltest_u1@localhost
+
+# -- Grants for mysqltest_u1@localhost:
+SHOW GRANTS;
+Grants for mysqltest_u1@localhost
+GRANT CREATE TABLESPACE ON *.* TO 'mysqltest_u1'@'localhost'
+
+# -- Check CREATE LOGFILE GROUP...
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 1M
+UNDO_BUFFER_SIZE = 1M
+ENGINE = NDB;
+
+# -- Check ALTER LOGFILE GROUP...
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+# -- Check CREATE TABLESPACE...
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+# -- Check ALTER TABLESPACE...
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+# -- Connection: mysqltest_u2@localhost
+
+# -- Grants for mysqltest_u2@localhost:
+SHOW GRANTS;
+Grants for mysqltest_u2@localhost
+GRANT USAGE ON *.* TO 'mysqltest_u2'@'localhost'
+GRANT ALL PRIVILEGES ON `mysqltest2`.* TO 'mysqltest_u2'@'localhost'
+CREATE TABLE t1(c INT) TABLESPACE ts1;
+DROP TABLE t1;
+
+# -- Connection: mysqltest_u1@localhost
+
+
+# -- Check DROP TABLESPACE...
+DROP TABLESPACE ts1 
+ENGINE = NDB;
+
+# -- Check DROP LOGFILE GROUP...
+DROP LOGFILE GROUP lg1 
+ENGINE = NDB;
+
+# -- Connection: root@localhost
+
+DROP USER mysqltest_u1@localhost;
+DROP USER mysqltest_u2@localhost;
+DROP DATABASE mysqltest2;
+
+# -----------------------------------------------------------------
+# End 6.0 test
+# -----------------------------------------------------------------

=== modified file 'mysql-test/suite/ndb/r/ndb_discover_db.result'
--- a/mysql-test/suite/ndb/r/ndb_discover_db.result	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/r/ndb_discover_db.result	2009-02-02 15:58:48 +0000
@@ -5,3 +5,31 @@ create table discover_db.t1 (a int key, 
 create database discover_db_2;
 alter database discover_db_2 character set binary;
 create table discover_db_2.t1 (a int key, b int) engine ndb;
+show create database discover_db;
+Database	Create Database
+discover_db	CREATE DATABASE `discover_db` /*!40100 DEFAULT CHARACTER SET latin1 */
+show create database discover_db_2;
+Database	Create Database
+discover_db_2	CREATE DATABASE `discover_db_2` /*!40100 DEFAULT CHARACTER SET binary */
+reset master;
+insert into discover_db.t1 values (1,1);
+show binlog events from <binlog_start>;
+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
+mysqld-bin.000001	#	Query	1	#	BEGIN
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (discover_db.t1)
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
+mysqld-bin.000001	#	Write_rows	1	#	table_id: #
+mysqld-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
+mysqld-bin.000001	#	Query	1	#	COMMIT
+reset master;
+insert into discover_db_2.t1 values (1,1);
+show binlog events from <binlog_start>;
+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
+mysqld-bin.000001	#	Query	1	#	BEGIN
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (discover_db_2.t1)
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
+mysqld-bin.000001	#	Write_rows	1	#	table_id: #
+mysqld-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
+mysqld-bin.000001	#	Query	1	#	COMMIT
+drop database discover_db;
+drop database discover_db_2;

=== removed file 'mysql-test/suite/ndb/r/ndb_discover_db2.result'
--- a/mysql-test/suite/ndb/r/ndb_discover_db2.result	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/r/ndb_discover_db2.result	1970-01-01 00:00:00 +0000
@@ -1,28 +0,0 @@
-show create database discover_db;
-Database	Create Database
-discover_db	CREATE DATABASE `discover_db` /*!40100 DEFAULT CHARACTER SET latin1 */
-show create database discover_db_2;
-Database	Create Database
-discover_db_2	CREATE DATABASE `discover_db_2` /*!40100 DEFAULT CHARACTER SET binary */
-reset master;
-insert into discover_db.t1 values (1,1);
-show binlog events from <binlog_start>;
-Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-master-bin1.000001	#	Query	102	#	BEGIN
-master-bin1.000001	#	Table_map	102	#	table_id: # (discover_db.t1)
-master-bin1.000001	#	Table_map	102	#	table_id: # (mysql.ndb_apply_status)
-master-bin1.000001	#	Write_rows	102	#	table_id: #
-master-bin1.000001	#	Write_rows	102	#	table_id: # flags: STMT_END_F
-master-bin1.000001	#	Query	102	#	COMMIT
-reset master;
-insert into discover_db_2.t1 values (1,1);
-show binlog events from <binlog_start>;
-Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-master-bin1.000001	#	Query	102	#	BEGIN
-master-bin1.000001	#	Table_map	102	#	table_id: # (discover_db_2.t1)
-master-bin1.000001	#	Table_map	102	#	table_id: # (mysql.ndb_apply_status)
-master-bin1.000001	#	Write_rows	102	#	table_id: #
-master-bin1.000001	#	Write_rows	102	#	table_id: # flags: STMT_END_F
-master-bin1.000001	#	Query	102	#	COMMIT
-drop database discover_db;
-drop database discover_db_2;

=== modified file 'mysql-test/suite/ndb/r/ndb_read_multi_range.result'
--- a/mysql-test/suite/ndb/r/ndb_read_multi_range.result	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/r/ndb_read_multi_range.result	2009-02-02 15:58:48 +0000
@@ -520,7 +520,7 @@ select * from t2 order by id;
 id
 3
 drop trigger kaboom;
-drop table t1, t2;
+drop table t1;
 create table t1 (
 a int not null primary key,
 b int
@@ -636,3 +636,59 @@ i	i	9
 m	m	13
 v	v	22
 drop table t1;
+create table t1 (
+a int not null primary key,
+b int
+) engine = ndb;
+insert into t1 values (7,2),(8,3),(10,4);
+update t1 set b = 5 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+a	b
+7	5
+8	5
+10	5
+delete from t1 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+a	b
+drop table t1;
+create table t1 (a int primary key, b int, key b_idx (b)) engine ndb;
+insert into t1 values(1,1), (2,2), (3,3), (4,4), (5,5);
+select one.a 
+from t1 one left join t1 two 
+on (two.b = one.b) 
+where one.a in (3, 4) 
+order by a;
+a
+3
+4
+drop table t1;
+create table t1 (a varchar(1536) not null,
+b varchar(1536) not null,
+c int, primary key (a,b)) engine=ndb;
+insert into t1 values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3),
+('d', 'd', 4), ('e', 'e', 5), ('f', 'f', 6),
+('g', 'g', 7), ('h', 'h', 8), ('i', 'i', 9),
+('j', 'j', 10), ('k', 'k', 11), ('l', 'l', 12),
+('m', 'm', 13), ('n', 'n', 14), ('o', 'o', 15),
+('p', 'p', 16), ('q', 'q', 17), ('r', 'r', 18),
+('s', 's', 19), ('t', 't', 20), ('u', 'u', 21),
+('v', 'v', 22), ('w', 'w', 23), ('x', 'x', 24);
+select * from t1
+where (a >= 'aa' and b >= 'x' and a <= 'c' and b <= 'c')
+or (a = 'd')
+or (a = 'e')
+or (a = 'f')
+or (a > 'g' and a < 'ii')
+or (a >= 'j' and b >= 'x' and a <= 'k' and b <= 'k')
+or (a = 'm' and b = 'm')
+or (a = 'v')
+order by a asc, b asc;
+a	b	c
+d	d	4
+e	e	5
+f	f	6
+h	h	8
+i	i	9
+m	m	13
+v	v	22
+drop table t1, t2;

=== modified file 'mysql-test/suite/ndb/t/bug36547.test'
--- a/mysql-test/suite/ndb/t/bug36547.test	2008-05-07 14:43:32 +0000
+++ b/mysql-test/suite/ndb/t/bug36547.test	2009-02-02 06:45:57 +0000
@@ -7,7 +7,8 @@ SET NDB_EXTRA_LOGGING=1;
 
 SET @SAVE_NDB_EXTRA_LOGGING= @@NDB_EXTRA_LOGGING;
 SET GLOBAL NDB_EXTRA_LOGGING=1;
-SHOW VARIABLES LIKE 'ndb_extra%';
+SHOW VARIABLES LIKE 'ndb_extra_logging';
 SET GLOBAL NDB_EXTRA_LOGGING=0;
-SHOW VARIABLES LIKE 'ndb_extra%';
-SET @GLOBAL.NDB_EXTRA_LOGGGING= @SAVE_NDB_EXTRA_LOGGING;
+SHOW VARIABLES LIKE 'ndb_extra_logging';
+SET @@GLOBAL.NDB_EXTRA_LOGGING= @SAVE_NDB_EXTRA_LOGGING;
+

=== modified file 'mysql-test/suite/ndb/t/ndb_dbug_lock.test'
--- a/mysql-test/suite/ndb/t/ndb_dbug_lock.test	2008-11-06 18:54:30 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dbug_lock.test	2009-01-23 09:40:08 +0000
@@ -67,5 +67,5 @@ show create table t1;
 
 --echo # Cleanup
 --connection default
-set session debug="-d,sleep_after_global_schema_lock";
+set session debug="-d,";
 drop table t1;

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_ddl.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_ddl.test	2008-10-17 08:37:23 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_ddl.test	2009-01-28 15:06:33 +0000
@@ -367,105 +367,4 @@ engine ndb;
 --exec rm $MYSQLTEST_VARDIR/tmp/t1.frm
 
 
---echo
---echo # -----------------------------------------------------------------
---echo # End 5.1 test
---echo # -----------------------------------------------------------------
-
---echo
---echo # --
---echo # -- WL#4300: Define privileges for tablespaces.
---echo # --
-
-GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
-
---echo
-
---disable_warnings
-DROP DATABASE IF EXISTS mysqltest2;
---enable_warnings
-
-CREATE DATABASE mysqltest2;
-
-GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
-
---echo
---echo # -- Connection: mysqltest_u1@localhost
---echo
---connect(con1, localhost, mysqltest_u1,,)
-
---echo # -- Grants for mysqltest_u1@localhost:
-SHOW GRANTS;
-
---echo
---echo # -- Check CREATE LOGFILE GROUP...
-CREATE LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 1M
-UNDO_BUFFER_SIZE = 1M
-ENGINE = NDB;
-
---echo
---echo # -- Check ALTER LOGFILE GROUP...
-ALTER LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
---echo
---echo # -- Check CREATE TABLESPACE...
-CREATE TABLESPACE ts1
-ADD DATAFILE 'datafile.dat'
-USE LOGFILE GROUP lg1
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
---echo
---echo # -- Check ALTER TABLESPACE...
-ALTER TABLESPACE ts1
-DROP DATAFILE 'datafile.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
---echo
---echo # -- Connection: mysqltest_u2@localhost
---echo
---connect(con2, localhost, mysqltest_u2,,mysqltest2)
-
---echo # -- Grants for mysqltest_u2@localhost:
-SHOW GRANTS;
-
-CREATE TABLE t1(c INT) TABLESPACE ts1;
-
-DROP TABLE t1;
-
---echo
---echo # -- Connection: mysqltest_u1@localhost
---echo
---connection con1
-
---echo
---echo # -- Check DROP TABLESPACE...
-DROP TABLESPACE ts1 
-ENGINE = NDB;
-
---echo
---echo # -- Check DROP LOGFILE GROUP...
-DROP LOGFILE GROUP lg1 
-ENGINE = NDB;
-
---echo
---echo # -- Connection: root@localhost
---echo
---connection default
---disconnect con1
-
-DROP USER mysqltest_u1@localhost;
-DROP USER mysqltest_u2@localhost;
-
-DROP DATABASE mysqltest2;
-
---echo
---echo # -----------------------------------------------------------------
---echo # End 6.0 test
---echo # -----------------------------------------------------------------
+# End 5.1 test

=== added file 'mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test	2009-01-28 15:06:33 +0000
@@ -0,0 +1,101 @@
+-- source include/have_ndb.inc
+# grants are not in embedded
+-- source include/not_embedded.inc
+
+--echo
+--echo # --
+--echo # -- WL#4300: Define privileges for tablespaces.
+--echo # --
+
+GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
+
+--echo
+
+--disable_warnings
+DROP DATABASE IF EXISTS mysqltest2;
+--enable_warnings
+
+CREATE DATABASE mysqltest2;
+
+GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
+
+--echo
+--echo # -- Connection: mysqltest_u1@localhost
+--echo
+--connect(con1, localhost, mysqltest_u1,,)
+
+--echo # -- Grants for mysqltest_u1@localhost:
+SHOW GRANTS;
+
+--echo
+--echo # -- Check CREATE LOGFILE GROUP...
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 1M
+UNDO_BUFFER_SIZE = 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Check ALTER LOGFILE GROUP...
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Check CREATE TABLESPACE...
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Check ALTER TABLESPACE...
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Connection: mysqltest_u2@localhost
+--echo
+--connect(con2, localhost, mysqltest_u2,,mysqltest2)
+
+--echo # -- Grants for mysqltest_u2@localhost:
+SHOW GRANTS;
+
+CREATE TABLE t1(c INT) TABLESPACE ts1;
+
+DROP TABLE t1;
+
+--echo
+--echo # -- Connection: mysqltest_u1@localhost
+--echo
+--connection con1
+
+--echo
+--echo # -- Check DROP TABLESPACE...
+DROP TABLESPACE ts1 
+ENGINE = NDB;
+
+--echo
+--echo # -- Check DROP LOGFILE GROUP...
+DROP LOGFILE GROUP lg1 
+ENGINE = NDB;
+
+--echo
+--echo # -- Connection: root@localhost
+--echo
+--connection default
+--disconnect con1
+
+DROP USER mysqltest_u1@localhost;
+DROP USER mysqltest_u2@localhost;
+
+DROP DATABASE mysqltest2;
+
+--echo
+--echo # -----------------------------------------------------------------
+--echo # End 6.0 test
+--echo # -----------------------------------------------------------------

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_dump.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_dump.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_dump.test	2009-02-02 15:58:48 +0000
@@ -257,10 +257,10 @@ CREATE TABLE test.t (
 #'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting 
 #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB;
 
-let $MYSQLD_DATADIR= `select @@datadir`;
  SELECT count(*) FROM test.t;
  LOAD DATA INFILE 't_backup' INTO TABLE test.t;
- --remove_file $MYSQLD_DATADIR/test/t_backup
+--let $MYSQLD_DATADIR= `SELECT @@datadir`
+--remove_file  $MYSQLD_DATADIR/test/t_backup
 
  SELECT * FROM test.t order by a;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_discover_db.test'
--- a/mysql-test/suite/ndb/t/ndb_discover_db.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_discover_db.test	2009-02-02 15:58:48 +0000
@@ -12,6 +12,27 @@ drop database if exists discover_db_2;
 # The discovery happens in ndb_discover_db2.test
 #
 
+#
+# Shutdown server 1
+#
+
+-- connection server1
+# Write file to make mysql-test-run.pl expect the "crash", but don't start
+# it until it's told to.
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.1.expect
+wait
+EOF
+# Send shutdown to the connected server and give
+# it 30 seconds to die before zapping it.
+shutdown_server 30;
+# Check server is gone.
+--source include/wait_until_disconnected.inc
+
+#
+# Create databases while server1 is down
+#
+-- connection server2
+
 # check that created database is discovered
 create database discover_db;
 create table discover_db.t1 (a int key, b int) engine ndb;
@@ -21,15 +42,41 @@ create database discover_db_2;
 alter database discover_db_2 character set binary;
 create table discover_db_2.t1 (a int key, b int) engine ndb;
 
-let $MYSQLD_DATADIR= `select @@datadir`;
+#
+# Startup server1
+#
+
+-- connection server1
+# Write file to make mysql-test-run.pl start up the server again.
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.1.expect
+restart
+EOF
+# Turn on reconnect.
+--enable_reconnect
+# Call script that will poll the server waiting for it to be back online again.
+--source include/wait_until_connected_again.inc
+# Turn off reconnect again.
+--disable_reconnect
+#
+--disable_query_log
+--source include/ndb_not_readonly.inc
+--enable_query_log
+
+#
+# Now check that databases have been discovered
+#
 
--- remove_file $MYSQLD_DATADIR/discover_db/t1.frm
--- remove_file $MYSQLD_DATADIR/discover_db/t1.ndb
--- remove_file $MYSQLD_DATADIR/discover_db/db.opt
--- rmdir $MYSQLD_DATADIR/discover_db
-
--- remove_file $MYSQLD_DATADIR/discover_db_2/t1.frm
--- remove_file $MYSQLD_DATADIR/discover_db_2/t1.ndb
--- remove_file $MYSQLD_DATADIR/discover_db_2/db.opt
--- rmdir $MYSQLD_DATADIR/discover_db_2
+show create database discover_db;
+show create database discover_db_2;
+reset master;
+insert into discover_db.t1 values (1,1);
+--source include/show_binlog_events2.inc
+reset master;
+insert into discover_db_2.t1 values (1,1);
+--source include/show_binlog_events2.inc
 
+#
+# Cleanup
+#
+drop database discover_db;
+drop database discover_db_2;

=== removed file 'mysql-test/suite/ndb/t/ndb_discover_db2-master.opt'
--- a/mysql-test/suite/ndb/t/ndb_discover_db2-master.opt	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/t/ndb_discover_db2-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---skip-external-locking

=== removed file 'mysql-test/suite/ndb/t/ndb_discover_db2.test'
--- a/mysql-test/suite/ndb/t/ndb_discover_db2.test	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/t/ndb_discover_db2.test	1970-01-01 00:00:00 +0000
@@ -1,21 +0,0 @@
--- source include/have_multi_ndb.inc
--- source include/have_binlog_format_mixed_or_row.inc
-
-#
-# When this test started there no database on disk for server2
-# Check that table has been discovered correctly, and that the
-# binlog is updated correctly
-#
-
--- connection server2
-show create database discover_db;
-show create database discover_db_2;
-reset master;
-insert into discover_db.t1 values (1,1);
---source include/show_binlog_events2.inc
-reset master;
-insert into discover_db_2.t1 values (1,1);
---source include/show_binlog_events2.inc
-
-drop database discover_db;
-drop database discover_db_2;

=== removed file 'mysql-test/suite/ndb/t/ndb_partition_error2-master.opt'
--- a/mysql-test/suite/ndb/t/ndb_partition_error2-master.opt	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/t/ndb_partition_error2-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---ndbcluster

=== modified file 'mysql-test/suite/ndb/t/ndb_read_multi_range.test'
--- a/mysql-test/suite/ndb/t/ndb_read_multi_range.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_read_multi_range.test	2009-02-02 15:58:48 +0000
@@ -386,7 +386,7 @@ delete from t1 where id in (1,2);
 select * from t2 order by id;
 
 drop trigger kaboom;
-drop table t1, t2;
+drop table t1;
 
 #bug#31874
 
@@ -484,3 +484,54 @@ select * from t1
     or (a = 'v')
     order by a asc, b asc;
 drop table t1;
+
+#bug#31874
+
+create table t1 (
+  a int not null primary key,
+  b int
+) engine = ndb;
+insert into t1 values (7,2),(8,3),(10,4);
+
+update t1 set b = 5 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+delete from t1 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+
+drop table t1;
+
+#bug#35137 - self join + mrr
+
+create table t1 (a int primary key, b int, key b_idx (b)) engine ndb;
+insert into t1 values(1,1), (2,2), (3,3), (4,4), (5,5);
+
+select one.a 
+from t1 one left join t1 two 
+on (two.b = one.b) 
+where one.a in (3, 4) 
+order by a;
+
+drop table t1;
+
+create table t1 (a varchar(1536) not null,
+                 b varchar(1536) not null,
+                 c int, primary key (a,b)) engine=ndb;
+insert into t1 values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3),
+                      ('d', 'd', 4), ('e', 'e', 5), ('f', 'f', 6),
+                      ('g', 'g', 7), ('h', 'h', 8), ('i', 'i', 9),
+                      ('j', 'j', 10), ('k', 'k', 11), ('l', 'l', 12),
+                      ('m', 'm', 13), ('n', 'n', 14), ('o', 'o', 15),
+                      ('p', 'p', 16), ('q', 'q', 17), ('r', 'r', 18),
+                      ('s', 's', 19), ('t', 't', 20), ('u', 'u', 21),
+                      ('v', 'v', 22), ('w', 'w', 23), ('x', 'x', 24);
+select * from t1
+ where (a >= 'aa' and b >= 'x' and a <= 'c' and b <= 'c')
+    or (a = 'd')
+    or (a = 'e')
+    or (a = 'f')
+    or (a > 'g' and a < 'ii')
+    or (a >= 'j' and b >= 'x' and a <= 'k' and b <= 'k')
+    or (a = 'm' and b = 'm')
+    or (a = 'v')
+    order by a asc, b asc;
+drop table t1, t2;

=== removed file 'mysql-test/suite/ndb/t/ndb_restore_partition-master.opt'
--- a/mysql-test/suite/ndb/t/ndb_restore_partition-master.opt	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_partition-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---new

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_partition.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_partition.test	2007-07-04 20:38:53 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_partition.test	2009-02-03 13:28:13 +0000
@@ -2,6 +2,10 @@
 -- source include/ndb_default_cluster.inc
 -- source include/not_embedded.inc
 
+--disable_query_log
+set new=on;
+--enable_query_log
+
 --disable_warnings
 use test;
 drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;

=== added file 'mysql-test/suite/ndb_binlog/my.cnf'
--- a/mysql-test/suite/ndb_binlog/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_binlog/my.cnf	2009-02-02 10:35:33 +0000
@@ -0,0 +1,23 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+
+[ENV]
+NDB_CONNECTSTRING=             @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                 @mysqld.1.1.port
+MASTER_MYPORT1=                @mysqld.2.1.port
+
+NDB_BACKUP_DIR=                @cluster_config.ndbd.1.1.BackupDataDir
+

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result	2008-03-12 13:13:49 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result	2009-01-29 12:44:41 +0000
@@ -4,6 +4,21 @@ create database mysqltest;
 use mysqltest;
 drop database mysqltest;
 use test;
+show create table mysql.ndb_binlog_index;
+Table	Create Table
+ndb_binlog_index	CREATE TABLE `ndb_binlog_index` (
+  `Position` bigint(20) unsigned NOT NULL,
+  `File` varchar(255) NOT NULL,
+  `epoch` bigint(20) unsigned NOT NULL,
+  `inserts` int(10) unsigned NOT NULL,
+  `updates` int(10) unsigned NOT NULL,
+  `deletes` int(10) unsigned NOT NULL,
+  `schemaops` int(10) unsigned NOT NULL,
+  `orig_server_id` int(10) unsigned NOT NULL,
+  `orig_epoch` bigint(20) unsigned NOT NULL,
+  `gci` int(10) unsigned NOT NULL,
+  PRIMARY KEY (`epoch`,`orig_server_id`,`orig_epoch`)
+) ENGINE=MARIA DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
 reset master;
 create table t1 (a int primary key) engine=ndb;
 insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result	2008-06-18 15:03:43 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result	2009-02-01 21:05:19 +0000
@@ -4,8 +4,6 @@ drop table if exists t1;
 #
 create table t1 (a int key, b int) engine ndb;
 insert into t1 values (1,1);
-@the_backup_id:=backup_id
-<the_backup_id>
 #
 # extra table to be used to ensure data has arrived to binlog
 create table t2 (a int key, b int) engine ndb;
@@ -43,12 +41,12 @@ set SQL_LOG_BIN=0;
 insert into t2 values (2,2);
 show binlog events from <binlog_start>;
 Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-master-bin.000001	#	Query	1	#	BEGIN
-master-bin.000001	#	Table_map	1	#	table_id: # (test.t1)
-master-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
-master-bin.000001	#	Write_rows	1	#	table_id: #
-master-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
-master-bin.000001	#	Query	1	#	COMMIT
+mysqld-bin.000001	#	Query	1	#	BEGIN
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (test.t1)
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
+mysqld-bin.000001	#	Write_rows	1	#	table_id: #
+mysqld-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
+mysqld-bin.000001	#	Query	1	#	COMMIT
 drop table t1, t2;
 #
 # Now more complex using "BANK schema" including restore of log

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test	2008-02-25 13:50:20 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test	2009-01-29 12:44:41 +0000
@@ -10,6 +10,9 @@ drop database mysqltest;
 use test;
 --enable_warnings
 
+# check type and schema for ndb_binlog_index
+show create table mysql.ndb_binlog_index;
+
 #
 # basic insert, update, delete test, alter, rename, drop
 # check that ndb_binlog_index gets the right info

=== added file 'mysql-test/suite/ndb_team/my.cnf'
--- a/mysql-test/suite/ndb_team/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_team/my.cnf	2009-02-02 15:58:48 +0000
@@ -0,0 +1,89 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,
+
+[cluster_config.slave]
+NoOfReplicas=                  1
+MaxNoOfConcurrentTransactions= 64
+TimeBetweenGlobalCheckpoints= 500
+TimeBetweenEpochs= 0
+NoOfFragmentLogFiles= 8
+FragmentLogFileSize= 6M
+ndbd=
+ndb_mgmd=
+mysqld=
+ndbapi=,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
+# Turn on bin logging
+log-bin=                       master-bin
+
+# Time to wait for NDB connection before
+# accepting connections client connections
+ndb-wait-connected=            20
+
+ndb-extra-logging
+
+[mysqld.1.1]
+
+[mysqld.1.1]
+
+[mysqld.1.slave]
+
+# Append <testname>-slave.opt file to the list of argument used when
+# starting the mysqld
+#!use-slave-opt
+
+log-bin=                      slave-bin
+relay-log=                    slave-relay-bin
+
+init-rpl-role=                slave
+log-slave-updates
+master-retry-count=           10
+
+# Values reported by slave when it connect to master
+# and shows up in SHOW SLAVE STATUS;
+report-host=                  127.0.0.1
+report-port=                  @mysqld.1.slave.port
+report-user=                  root
+
+loose-skip-innodb
+skip-slave-start
+
+# Directory where slaves find the dumps generated by "load data"
+# on the server. The path need to have constant length otherwise
+# test results will vary, thus a relative path is used.
+slave-load-tmpdir=            ../../../tmp
+
+rpl-recovery-rank=            @mysqld.1.slave.server-id
+
+# Use batching when applying the binlog on slave
+slave-allow-batching
+
+# Write additional info in mysql.ndb_binlog_index
+# to allow multi way replication
+ndb-log-orig
+
+
+[ENV]
+NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                @mysqld.1.1.port
+MASTER_MYPORT1=               @mysqld.2.1.port
+
+NDB_CONNECTSTRING_SLAVE=      @mysql_cluster.slave.ndb_connectstring
+SLAVE_MYPORT=                 @mysqld.1.slave.port
+SLAVE_MYSOCK=                 @mysqld.1.slave.socket
+
+NDB_BACKUP_DIR=               @cluster_config.ndbd.1.1.BackupDataDir

=== modified file 'mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result'
--- a/mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result	2008-02-13 19:52:52 +0000
+++ b/mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result	2009-02-02 15:58:48 +0000
@@ -133,6 +133,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 
 ***** Testing Altering table def scenario *****
@@ -507,6 +509,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 ****************************************
 * columns in master at middle of table *
@@ -581,6 +585,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 5, test.t10 has type 254
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -656,6 +662,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 252, test.t11 has type 15
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -807,6 +815,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1091
 Last_SQL_Error	Error 'Can't DROP 'c7'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t14 DROP COLUMN c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -893,6 +903,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1054
 Last_SQL_Error	Error 'Unknown column 'c7' in 't15'' on query. Default database: 'test'. Query: 'ALTER TABLE t15 ADD COLUMN c2 DECIMAL(8,2) AFTER c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -979,6 +991,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1072
 Last_SQL_Error	Error 'Key column 'c6' doesn't exist in table' on query. Default database: 'test'. Query: 'CREATE INDEX part_of_c6 ON t16 (c6)'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -1272,6 +1286,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 
 ***** Testing Altering table def scenario *****
@@ -1646,6 +1662,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 ****************************************
 * columns in master at middle of table *
@@ -1720,6 +1738,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 5, test.t10 has type 254
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -1795,6 +1815,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 252, test.t11 has type 15
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -1946,6 +1968,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1091
 Last_SQL_Error	Error 'Can't DROP 'c7'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t14 DROP COLUMN c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -2032,6 +2056,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1054
 Last_SQL_Error	Error 'Unknown column 'c7' in 't15'' on query. Default database: 'test'. Query: 'ALTER TABLE t15 ADD COLUMN c2 DECIMAL(8,2) AFTER c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -2118,6 +2144,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1072
 Last_SQL_Error	Error 'Key column 'c6' doesn't exist in table' on query. Default database: 'test'. Query: 'CREATE INDEX part_of_c6 ON t16 (c6)'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 

=== modified file 'mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test'
--- a/mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test	2009-02-02 16:02:58 +0000
@@ -287,25 +287,7 @@ while ($j)
 
 SELECT COUNT(*) FROM history;
 
-#RESET MASTER;
---exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT
-
---exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
-
-CREATE TEMPORARY TABLE IF NOT EXISTS mysql.backup_info (id INT, backup_id INT) ENGINE = HEAP;
-
-DELETE FROM mysql.backup_info;
-
-LOAD DATA INFILE '../tmp.dat' INTO TABLE mysql.backup_info FIELDS TERMINATED BY ',';
---remove_file $MYSQLTEST_VARDIR/tmp.dat
---replace_column 1 <the_backup_id>
-
-SELECT @the_backup_id:=backup_id FROM mysql.backup_info;
-
-let the_backup_id=`select @the_backup_id`;
-
-DROP TABLE IF EXISTS mysql.backup_info;
-#RESET MASTER;
+--source include/ndb_backup.inc
 
 --echo ************ Restore the slave ************************
 connection slave;
@@ -375,9 +357,9 @@ SELECT COUNT(*) FROM history;
 
 --echo *** DUMP MASTER & SLAVE FOR COMPARE ********
 
---exec $MYSQL_DUMP  --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql
+--exec $MYSQL_DUMP --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql
 
---exec $MYSQL_DUMP_SLAVE  --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql
+--exec $MYSQL_DUMP_SLAVE --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql
 
 --echo *************** TEST 2 CLEANUP SECTION ********************
 connection master;

=== modified file 'mysql-test/suite/parts/r/partition_auto_increment_ndb.result'
--- a/mysql-test/suite/parts/r/partition_auto_increment_ndb.result	2009-02-05 17:47:24 +0000
+++ b/mysql-test/suite/parts/r/partition_auto_increment_ndb.result	2009-02-11 12:11:20 +0000
@@ -124,7 +124,7 @@ INSERT INTO t1 VALUES (NULL);
 DELETE FROM t1 WHERE c1 >= 100;
 OPTIMIZE TABLE t1;
 Table	Op	Msg_type	Msg_text
-test.t1	optimize	note	The storage engine for the table doesn't support optimize
+test.t1	optimize	status	OK
 SHOW CREATE TABLE t1;
 Table	Create Table
 t1	CREATE TABLE `t1` (
@@ -393,7 +393,7 @@ INSERT INTO t1 VALUES (NULL);
 DELETE FROM t1 WHERE c1 >= 100;
 OPTIMIZE TABLE t1;
 Table	Op	Msg_type	Msg_text
-test.t1	optimize	note	The storage engine for the table doesn't support optimize
+test.t1	optimize	status	OK
 SHOW CREATE TABLE t1;
 Table	Create Table
 t1	CREATE TABLE `t1` (

=== modified file 'mysql-test/suite/rpl_ndb/my.cnf'
--- a/mysql-test/suite/rpl_ndb/my.cnf	2008-05-09 15:28:34 +0000
+++ b/mysql-test/suite/rpl_ndb/my.cnf	2009-02-02 15:58:48 +0000
@@ -6,6 +6,7 @@ NoOfReplicas=                  2
 ndbd=,
 ndb_mgmd=
 mysqld=,
+ndbapi=,,,,,,,,,
 
 [cluster_config.slave]
 NoOfReplicas=                  1
@@ -17,10 +18,15 @@ FragmentLogFileSize= 6M
 ndbd=
 ndb_mgmd=
 mysqld=
+ndbapi=,,,,
 
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
 # Turn on bin logging
 log-bin=                       master-bin
 

=== added file 'mysql-test/suite/rpl_ndb_big/my.cnf'
--- a/mysql-test/suite/rpl_ndb_big/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/my.cnf	2009-02-02 22:37:44 +0000
@@ -0,0 +1,89 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,
+
+[cluster_config.slave]
+NoOfReplicas=                  1
+MaxNoOfConcurrentTransactions= 64
+TimeBetweenGlobalCheckpoints= 500
+TimeBetweenEpochs= 0
+NoOfFragmentLogFiles= 8
+FragmentLogFileSize= 6M
+ndbd=
+ndb_mgmd=
+mysqld=
+ndbapi=,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
+# Turn on bin logging
+log-bin=                       master-bin
+
+# Time to wait for NDB connection before
+# accepting connections client connections
+ndb-wait-connected=            20
+
+ndb-extra-logging
+
+[mysqld.1.1]
+
+[mysqld.1.1]
+
+[mysqld.1.slave]
+
+# Append <testname>-slave.opt file to the list of argument used when
+# starting the mysqld
+#!use-slave-opt
+
+log-bin=                      slave-bin
+relay-log=                    slave-relay-bin
+
+init-rpl-role=                slave
+log-slave-updates
+master-retry-count=           10
+
+# Values reported by slave when it connect to master
+# and shows up in SHOW SLAVE STATUS;
+report-host=                  127.0.0.1
+report-port=                  @mysqld.1.slave.port
+report-user=                  root
+
+loose-skip-innodb
+skip-slave-start
+
+# Directory where slaves find the dumps generated by "load data"
+# on the server. The path need to have constant length otherwise
+# test results will vary, thus a relative path is used.
+slave-load-tmpdir=            ../../../tmp
+
+rpl-recovery-rank=            @mysqld.1.slave.server-id
+
+# Use batching when applying the binlog on slave
+slave-allow-batching
+
+# Write additional info in mysql.ndb_binlog_index
+# to allow multi way replication
+ndb-log-orig
+
+
+[ENV]
+NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                @mysqld.1.1.port
+MASTER_MYPORT1=               @mysqld.2.1.port
+
+NDB_CONNECTSTRING_SLAVE=      @mysql_cluster.slave.ndb_connectstring
+SLAVE_MYPORT=                 @mysqld.1.slave.port
+SLAVE_MYSOCK=                 @mysqld.1.slave.socket
+
+NDB_BACKUP_DIR=               @cluster_config.ndbd.1.1.BackupDataDir

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result	2008-11-04 11:30:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result	2009-02-04 12:35:46 +0000
@@ -924,4 +924,5 @@ DELETE FROM t1;
 --- End test 5 key partition testing ---
 --- Do Cleanup ---
 DROP TABLE IF EXISTS t1;
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result	2008-11-04 11:30:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result	2009-02-04 12:35:46 +0000
@@ -924,4 +924,5 @@ DELETE FROM t1;
 --- End test 5 key partition testing ---
 --- Do Cleanup ---
 DROP TABLE IF EXISTS t1;
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result	2009-02-03 06:41:56 +0000
@@ -106,6 +106,8 @@ Last_IO_Errno	<Last_IO_Errno>
 Last_IO_Error	<Last_IO_Error>
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
 hex(c1)	hex(c2)	c3
 1	1	row1

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt	2009-02-04 12:35:46 +0000
@@ -1 +1 @@
---new --default-storage-engine=ndbcluster --ndb_log_updated_only=0 
+--new --ndb-log-updated-only=0 

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test	2009-02-04 12:35:46 +0000
@@ -29,7 +29,14 @@ CREATE TABLE mysql.ndb_apply_status
                    end_pos BIGINT UNSIGNED NOT NULL,
                    PRIMARY KEY USING HASH (server_id)) ENGINE=INNODB;
 
+-- connection master
+--disable_query_log
+set new=on;
+set storage_engine=ndbcluster;
+--enable_query_log
+
 --source extra/rpl_tests/rpl_ndb_2multi_eng.test
 
 --connection slave
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt	2009-02-04 12:35:46 +0000
@@ -1 +1 @@
---new --default-storage-engine=ndbcluster --ndb_log_updated_only=0
+--ndb_log_updated_only=0

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test	2009-02-04 12:35:46 +0000
@@ -28,7 +28,14 @@ CREATE TABLE mysql.ndb_apply_status
                    end_pos BIGINT UNSIGNED NOT NULL,
                    PRIMARY KEY USING HASH (server_id)) ENGINE=MYISAM;
 
+-- connection master
+--disable_query_log
+set new=on;
+set storage_engine=ndbcluster;
+--enable_query_log
+
 --source extra/rpl_tests/rpl_ndb_2multi_eng.test
 
 --connection slave
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test	2008-12-13 11:02:16 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test	2009-02-03 14:01:50 +0000
@@ -16,7 +16,7 @@ select * from mysql.ndb_apply_status;
 
 
 -- source include/have_ndb.inc
--- source include/have_binlog_format_row.inc
+-- source include/have_binlog_format_mixed_or_row.inc
 -- source include/ndb_master-slave.inc
 
 #

=== added file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	2009-02-04 14:48:13 +0000
@@ -0,0 +1 @@
+--new=true

=== removed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---new=true

=== added file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	2009-02-04 14:48:13 +0000
@@ -0,0 +1 @@
+--new=true

=== removed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---new=true

=== removed file 'mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt	2008-11-22 15:24:06 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---binlog-format=mixed

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test	2009-02-03 14:01:50 +0000
@@ -6,6 +6,5 @@
 # Change Author:  pcrews
 # Change:  Moved test to rpl_ndb suite, updated location of --source .test file
 
---source include/have_binlog_format_mixed.inc
---source include/big_test.inc
+--source include/have_binlog_format_mixed_or_row.inc
 --source suite/rpl_ndb_big/t/rpl_truncate_7ndb.test

=== modified file 'mysql-test/t/archive_aio_posix.test'
--- a/mysql-test/t/archive_aio_posix.test	2007-11-27 15:55:24 +0000
+++ b/mysql-test/t/archive_aio_posix.test	2009-02-04 22:22:32 +0000
@@ -1562,3 +1562,6 @@ insert into t1 set a='';
 insert into t1 set a='a';
 check table t1 extended;
 drop table t1;
+
+--echo # Test file cleanup
+SET GLOBAL archive_aio=off;

=== modified file 'mysql-test/t/comment_index.test'
--- a/mysql-test/t/comment_index.test	2007-06-30 21:25:11 +0000
+++ b/mysql-test/t/comment_index.test	2009-02-04 22:22:32 +0000
@@ -96,6 +96,4 @@ SELECT comment,index_comment,char_length
 let $ENGINE=`select variable_value from information_schema.global_variables where variable_name='STORAGE_ENGINE'`;
 --replace_result $ENGINE ENGINE
 SHOW CREATE TABLE t1;
-
-
-
+DROP TABLE t1;

=== modified file 'mysql-test/t/disabled.def'
--- a/mysql-test/t/disabled.def	2009-02-03 09:16:53 +0000
+++ b/mysql-test/t/disabled.def	2009-02-04 12:34:03 +0000
@@ -40,3 +40,4 @@ user_limits                       : Bug#
 query_cache_28249                 : Bug#41098 Query Cache returns wrong result with concurrent insert
 innodb_bug39438          : BUG#42383 2009-01-28 lsoares "This fails in embedded and on windows.  Note that this test is not run on windows and on embedded in PB for main trees currently"
 subselect3_jcl6          : BUG#42534 subselect3_jcl6 produces valgrind warnings with MTR2 (2008-02-02 spetrunia)
+join_cache               : Bug#42585 joro main.join_cache fails on powermacg5  

=== modified file 'mysql-test/t/grant4.test'
--- a/mysql-test/t/grant4.test	2008-04-11 10:09:58 +0000
+++ b/mysql-test/t/grant4.test	2009-02-04 22:22:32 +0000
@@ -143,4 +143,4 @@ SHOW CREATE VIEW v3;
 connection default;
 disconnect con1;
 drop database mysqltest_db1;
-
+drop user mysqltest_u1@localhost;

=== modified file 'mysql-test/t/implicit_commit.test'
--- a/mysql-test/t/implicit_commit.test	2008-12-13 19:55:44 +0000
+++ b/mysql-test/t/implicit_commit.test	2009-02-04 22:22:32 +0000
@@ -461,6 +461,8 @@ let $statement=
   revoke all on test.t1 from mysqltest_2@localhost;
 source include/implicit_commit_helper.inc;
 
+drop user mysqltest_2@localhost;
+
 --echo #
 --echo # SQLCOM_SHOW_GRANTS
 --echo #

=== modified file 'mysql-test/t/partition_mgm.test'
--- a/mysql-test/t/partition_mgm.test	2009-01-26 16:32:29 +0000
+++ b/mysql-test/t/partition_mgm.test	2009-02-04 13:08:05 +0000
@@ -11,7 +11,12 @@ ENGINE MYISAM
 PARTITION BY HASH (a)
 PARTITIONS 1;
 INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+--error ER_REORG_NO_PARAM_ERROR
 ALTER TABLE t1 REORGANIZE PARTITION;
+--error ER_REORG_NO_PARAM_ERROR
+ALTER ONLINE TABLE t1 REORGANIZE PARTITION;
+--error ER_REORG_NO_PARAM_ERROR
+ALTER OFFLINE TABLE t1 REORGANIZE PARTITION;
 DROP TABLE t1;
 
 #

=== modified file 'mysql-test/t/subselect3.test'
--- a/mysql-test/t/subselect3.test	2009-01-30 14:13:39 +0000
+++ b/mysql-test/t/subselect3.test	2009-02-04 11:27:06 +0000
@@ -989,6 +989,21 @@ explain select * from t1 where (a,b,c) i
 
 drop table t0,t1,t2;
 
+--echo
+--echo BUG#37842: Assertion in DsMrr_impl::dsmrr_init, at handler.cc:4307
+--echo
+CREATE TABLE t1 (
+  `pk` int(11) NOT NULL AUTO_INCREMENT,
+  `int_key` int(11) DEFAULT NULL,
+  PRIMARY KEY (`pk`),
+  KEY `int_key` (`int_key`)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,9),(2,3),(3,8),(4,6),(5,9),(6,5),(7,5),(8,9),(9,1),(10,10);
+SELECT `pk` FROM t1 AS OUTR WHERE `int_key` = ALL (
+ SELECT `int_key` FROM t1 AS INNR WHERE INNR . `pk` >= 9
+);
+DROP TABLE t1;
+
 --echo 
 --echo BUG#40118 Crash when running Batched Key Access and requiring one match for each key
 --echo

=== modified file 'mysys/lf_hash.c'
--- a/mysys/lf_hash.c	2008-12-17 18:40:14 +0000
+++ b/mysys/lf_hash.c	2009-01-27 02:08:48 +0000
@@ -312,7 +312,7 @@ static int initialize_bucket(LF_HASH *, 
   See wt_init() for example.
 */
 void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
-                  uint key_offset, uint key_length, hash_get_key get_key,
+                  uint key_offset, uint key_length, my_hash_get_key get_key,
                   CHARSET_INFO *charset)
 {
   compile_time_assert(sizeof(LF_SLIST) == LF_HASH_OVERHEAD);

=== modified file 'mysys/my_error.c'
--- a/mysys/my_error.c	2009-02-05 06:27:55 +0000
+++ b/mysys/my_error.c	2009-02-11 12:11:20 +0000
@@ -22,7 +22,6 @@
 /* Max length of a error message. Should be kept in sync with MYSQL_ERRMSG_SIZE. */
 #define ERRMSGSIZE      (512)
 
-
 /* Define some external variables for error handling */
 
 /*
@@ -137,7 +136,7 @@ void my_printf_error(uint error, const c
 
 void my_printv_error(uint error, const char *format, myf MyFlags, va_list ap)
 {
-  char ebuff[ERRMSGSIZE+20];
+  char ebuff[ERRMSGSIZE];
   DBUG_ENTER("my_printv_error");
   DBUG_PRINT("my", ("nr: %d  MyFlags: %d  errno: %d  format: %s",
 		    error, MyFlags, errno, format));

=== modified file 'mysys/my_init.c'
--- a/mysys/my_init.c	2009-02-05 06:27:55 +0000
+++ b/mysys/my_init.c	2009-02-11 12:11:20 +0000
@@ -159,7 +159,7 @@ void my_end(int infoflag)
   {					/* Test if some file is left open */
     if (my_file_opened | my_stream_opened)
     {
-      char ebuff[512];
+      char ebuff[MYSYS_ERRMSG_SIZE];
       my_snprintf(ebuff, sizeof(ebuff), EE(EE_OPEN_WARNING),
                   my_file_opened, my_stream_opened);
       my_message_no_curses(EE_OPEN_WARNING, ebuff, ME_BELL);

=== modified file 'mysys/my_safehash.c'
--- a/mysys/my_safehash.c	2008-05-29 15:44:11 +0000
+++ b/mysys/my_safehash.c	2009-01-27 02:08:48 +0000
@@ -100,9 +100,9 @@ my_bool safe_hash_init(SAFE_HASH *hash, 
                        uchar *default_value)
 {
   DBUG_ENTER("safe_hash_init");
-  if (hash_init(&hash->hash, &my_charset_bin, elements,
-                0, 0, (hash_get_key) safe_hash_entry_get,
-                (void (*)(void*)) safe_hash_entry_free, 0))
+  if (my_hash_init(&hash->hash, &my_charset_bin, elements,
+                   0, 0, (my_hash_get_key) safe_hash_entry_get,
+                   (void (*)(void*)) safe_hash_entry_free, 0))
   {
     hash->default_value= 0;
     DBUG_RETURN(1);
@@ -133,7 +133,7 @@ void safe_hash_free(SAFE_HASH *hash)
   */
   if (hash->default_value)
   {
-    hash_free(&hash->hash);
+    my_hash_free(&hash->hash);
     rwlock_destroy(&hash->mutex);
     hash->default_value=0;
   }
@@ -160,7 +160,7 @@ uchar *safe_hash_search(SAFE_HASH *hash,
   uchar *result;
   DBUG_ENTER("safe_hash_search");
   rw_rdlock(&hash->mutex);
-  result= hash_search(&hash->hash, key, length);
+  result= my_hash_search(&hash->hash, key, length);
   rw_unlock(&hash->mutex);
   if (!result)
     result= def;
@@ -200,7 +200,7 @@ my_bool safe_hash_set(SAFE_HASH *hash, c
   DBUG_PRINT("enter",("key: %.*s  data: 0x%lx", length, key, (long) data));
 
   rw_wrlock(&hash->mutex);
-  entry= (SAFE_HASH_ENTRY*) hash_search(&hash->hash, key, length);
+  entry= (SAFE_HASH_ENTRY*) my_hash_search(&hash->hash, key, length);
 
   if (data == hash->default_value)
   {
@@ -214,7 +214,7 @@ my_bool safe_hash_set(SAFE_HASH *hash, c
     /* unlink entry from list */
     if ((*entry->prev= entry->next))
       entry->next->prev= entry->prev;
-    hash_delete(&hash->hash, (uchar*) entry);
+    my_hash_delete(&hash->hash, (uchar*) entry);
     goto end;
   }
   if (entry)
@@ -285,7 +285,7 @@ void safe_hash_change(SAFE_HASH *hash, u
       {
         if ((*entry->prev= entry->next))
           entry->next->prev= entry->prev;
-        hash_delete(&hash->hash, (uchar*) entry);
+        my_hash_delete(&hash->hash, (uchar*) entry);
       }
       else
         entry->data= new_data;

=== modified file 'mysys/safemalloc.c'
--- a/mysys/safemalloc.c	2009-02-05 06:27:55 +0000
+++ b/mysys/safemalloc.c	2009-02-11 12:11:20 +0000
@@ -147,14 +147,16 @@ void *_mymalloc(size_t size, const char 
       error_handler_hook=fatal_error_handler_hook;
     if (MyFlags & (MY_FAE+MY_WME))
     {
-      char buff[256];
+      char buff[MYSYS_ERRMSG_SIZE];
       my_errno=errno;
-      sprintf(buff,"Out of memory at line %d, '%s'", lineno, filename);
+      my_snprintf(buff, sizeof(buff), "Out of memory at line %d, '%s'",
+                  lineno, filename);
       my_message(EE_OUTOFMEMORY, buff, MYF(ME_BELL+ME_WAITTANG+ME_NOREFRESH));
-      sprintf(buff,"needed %lu byte (%luk), memory in use: %lu bytes (%luk)",
-	      (ulong) size, (ulong) (size + 1023L) / 1024L,
-	      (ulong) sf_malloc_max_memory,
-	      (ulong) (sf_malloc_max_memory + 1023L) / 1024L);
+      my_snprintf(buff, sizeof(buff),
+                  "needed %lu byte (%luk), memory in use: %lu bytes (%luk)",
+	          (ulong) size, (ulong) (size + 1023L) / 1024L,
+	          (ulong) sf_malloc_max_memory,
+	          (ulong) (sf_malloc_max_memory + 1023L) / 1024L);
       my_message(EE_OUTOFMEMORY, buff, MYF(ME_BELL+ME_WAITTANG+ME_NOREFRESH));
     }
     DBUG_PRINT("error",("Out of memory, in use: %ld at line %d, '%s'",

=== modified file 'mysys/thr_mutex.c'
--- a/mysys/thr_mutex.c	2008-12-04 21:02:09 +0000
+++ b/mysys/thr_mutex.c	2009-01-27 02:08:48 +0000
@@ -120,16 +120,16 @@ int safe_mutex_init(safe_mutex_t *mp,
       pthread_mutex_lock(&THR_LOCK_mutex);
       mp->id= ++safe_mutex_id;
       pthread_mutex_unlock(&THR_LOCK_mutex);
-      hash_init(mp->locked_mutex, &my_charset_bin,
-                1000,
-                offsetof(safe_mutex_deadlock_t, id),
-                sizeof(mp->id),
-                0, 0, HASH_UNIQUE);
-      hash_init(mp->used_mutex, &my_charset_bin,
-                1000,
-                offsetof(safe_mutex_t, id),
-                sizeof(mp->id),
-                0, 0, HASH_UNIQUE);
+      my_hash_init(mp->locked_mutex, &my_charset_bin,
+                   1000,
+                   offsetof(safe_mutex_deadlock_t, id),
+                   sizeof(mp->id),
+                   0, 0, HASH_UNIQUE);
+      my_hash_init(mp->used_mutex, &my_charset_bin,
+                   1000,
+                   offsetof(safe_mutex_t, id),
+                   sizeof(mp->id),
+                   0, 0, HASH_UNIQUE);
     }
   }
   else
@@ -267,7 +267,7 @@ int safe_mutex_lock(safe_mutex_t *mp, my
         */
         pthread_mutex_lock(&THR_LOCK_mutex);
 
-        if (!hash_search(mutex_root->locked_mutex, (uchar*) &mp->id, 0))
+        if (! my_hash_search(mutex_root->locked_mutex, (uchar*) &mp->id, 0))
         {
           safe_mutex_deadlock_t *deadlock;
           safe_mutex_t *mutex;
@@ -287,7 +287,7 @@ int safe_mutex_lock(safe_mutex_t *mp, my
           mutex= mutex_root;
           do
           {
-            if (hash_search(mp->locked_mutex, (uchar*) &mutex->id, 0))
+            if (my_hash_search(mp->locked_mutex, (uchar*) &mutex->id, 0))
             {
               print_deadlock_warning(mp, mutex);
               /* Mark wrong usage to avoid future warnings for same error */
@@ -592,8 +592,8 @@ void safe_mutex_free_deadlock_data(safe_
                     mp);
     pthread_mutex_unlock(&THR_LOCK_mutex);
 
-    hash_free(mp->used_mutex);
-    hash_free(mp->locked_mutex);
+    my_hash_free(mp->used_mutex);
+    my_hash_free(mp->locked_mutex);
     my_free(mp->locked_mutex, 0);
     mp->create_flags|= MYF_NO_DEADLOCK_DETECTION;
   }
@@ -702,12 +702,12 @@ static my_bool remove_from_locked_mutex(
                        (ulong) delete_mutex, (ulong) mp, 
                        delete_mutex->id, mp->id));
 
-  found= (safe_mutex_deadlock_t *) hash_search(mp->locked_mutex,
-                                               (uchar*) &delete_mutex->id, 0);
+  found= (safe_mutex_deadlock_t*) my_hash_search(mp->locked_mutex,
+                                                 (uchar*) &delete_mutex->id, 0);
   DBUG_ASSERT(found);
   if (found)
   {
-    if (hash_delete(mp->locked_mutex, (uchar*) found))
+    if (my_hash_delete(mp->locked_mutex, (uchar*) found))
     {
       DBUG_ASSERT(0);
     }
@@ -724,7 +724,7 @@ static my_bool remove_from_used_mutex(sa
   DBUG_PRINT("enter", ("delete_mutex: 0x%lx  mutex: 0x%lx  (id: %lu <- %lu)",
                        (ulong) mutex, (ulong) locked_mutex, 
                        mutex->id, locked_mutex->id));
-  if (hash_delete(locked_mutex->mutex->used_mutex, (uchar*) mutex))
+  if (my_hash_delete(locked_mutex->mutex->used_mutex, (uchar*) mutex))
   {
     DBUG_ASSERT(0);
   }

=== modified file 'scripts/mysql_system_tables.sql'
--- a/scripts/mysql_system_tables.sql	2008-12-24 10:48:24 +0000
+++ b/scripts/mysql_system_tables.sql	2009-02-02 12:28:30 +0000
@@ -86,5 +86,5 @@ CREATE TABLE IF NOT EXISTS backup_histor
 
 CREATE TABLE IF NOT EXISTS backup_progress ( backup_id BIGINT UNSIGNED NOT NULL COMMENT 'Key for backup_history table entries', object CHAR (30) NOT NULL DEFAULT '' COMMENT 'The object being operated on', start_time datetime NOT NULL DEFAULT 0 COMMENT 'The date/time of start of operation', stop_time datetime NOT NULL DEFAULT 0 COMMENT 'The date/time of end of operation', total_bytes BIGINT NOT NULL DEFAULT 0 COMMENT 'The size of the object in bytes', progress BIGINT UNSIGNED NOT NULL DEFAULT 0 COMMENT 'The number of bytes processed', error_num INT NOT NULL DEFAULT 0 COMMENT 'The error from this run 0 == none', notes CHAR(100) NOT NULL DEFAULT '' COMMENT 'Commentary from the backup engine') ENGINE=CSV DEFAULT CHARACTER SET utf8;
 
-CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts INT UNSIGNED NOT NULL, updates INT UNSIGNED NOT NULL, deletes INT UNSIGNED NOT NULL, schemaops INT UNSIGNED NOT NULL, orig_server_id INT UNSIGNED NOT NULL, orig_epoch BIGINT UNSIGNED NOT NULL, gci INT UNSIGNED NOT NULL, PRIMARY KEY(epoch, orig_server_id, orig_epoch)) ENGINE=MYISAM;
+CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts INT UNSIGNED NOT NULL, updates INT UNSIGNED NOT NULL, deletes INT UNSIGNED NOT NULL, schemaops INT UNSIGNED NOT NULL, orig_server_id INT UNSIGNED NOT NULL, orig_epoch BIGINT UNSIGNED NOT NULL, gci INT UNSIGNED NOT NULL, PRIMARY KEY(epoch, orig_server_id, orig_epoch)) ENGINE=MARIA;
 

=== modified file 'sql/backup/backup_aux.h'
--- a/sql/backup/backup_aux.h	2008-12-18 21:46:36 +0000
+++ b/sql/backup/backup_aux.h	2009-02-04 10:49:16 +0000
@@ -356,15 +356,15 @@ template<class A, class B>
 inline
 Map<A,B>::Map(size_t init_size)
 {
-  hash_init(&m_hash, &::my_charset_bin, init_size, 
-            0, sizeof(A), NULL, Node::del_key, MYF(0));
+  my_hash_init(&m_hash, &::my_charset_bin, init_size,
+               0, sizeof(A), NULL, Node::del_key, MYF(0));
 }
 
 template<class A, class B>
 inline
 Map<A,B>::~Map()
 {
-  hash_free(&m_hash);
+  my_hash_free(&m_hash);
 }
 
 /** 
@@ -386,7 +386,7 @@ template<class A, class B>
 inline
 B* Map<A,B>::operator[](const A &a) const
 {
-  Node *n= (Node*) hash_search(&m_hash, (uchar*) &a, sizeof(A));
+  Node *n= (Node*) my_hash_search(&m_hash, (uchar*) &a, sizeof(A));
   
   return n ? n->ptr : NULL;
 }

=== modified file 'sql/backup/backup_info.cc'
--- a/sql/backup/backup_info.cc	2008-12-18 21:46:36 +0000
+++ b/sql/backup/backup_info.cc	2009-02-04 10:49:16 +0000
@@ -389,10 +389,10 @@ Backup_info::Backup_info(backup::Logger 
 
   bzero(m_snap, sizeof(m_snap));
 
-  if (hash_init(&ts_hash, &::my_charset_bin, 16, 0, 0,
-                Ts_hash_node::get_key, Ts_hash_node::free, MYF(0))
+  if (my_hash_init(&ts_hash, &::my_charset_bin, 16, 0, 0,
+                   Ts_hash_node::get_key, Ts_hash_node::free, MYF(0))
       ||
-      hash_init(&dep_hash, &::my_charset_bin, 16, 0, 0,
+      my_hash_init(&dep_hash, &::my_charset_bin, 16, 0, 0,
                 Dep_node::get_key, Dep_node::free, MYF(0)))
   {
     // Allocation failed. Error has been reported, but not logged to backup logs
@@ -474,8 +474,8 @@ Backup_info::~Backup_info()
   while ((snap= it++))
     delete snap;
 
-  hash_free(&ts_hash);  
-  hash_free(&dep_hash);
+  my_hash_free(&ts_hash);  
+  my_hash_free(&dep_hash);
 }
 
 /**
@@ -529,7 +529,7 @@ backup::Image_info::Ts* Backup_info::add
   size_t klen= 0;
   uchar  *key= Ts_hash_node::get_key((const uchar*)&n0, &klen, TRUE);
 
-  Ts_hash_node *n1= (Ts_hash_node*) hash_search(&ts_hash, key, klen);
+  Ts_hash_node *n1= (Ts_hash_node*) my_hash_search(&ts_hash, key, klen);
 
   // if tablespace was found, return the catalogue entry stored in the hash
   if (n1)
@@ -1236,7 +1236,7 @@ int Backup_info::get_dep_node(const ::St
   size_t klen;
   uchar  *key= Dep_node::get_key((const uchar*)&n, &klen, TRUE);
 
-  node= (Dep_node*) hash_search(&dep_hash, key, klen);
+  node= (Dep_node*) my_hash_search(&dep_hash, key, klen);
 
   // if we have found node in the hash there is nothing more to do
   if (node)

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2009-01-18 23:21:43 +0000
+++ b/sql/ha_ndbcluster.cc	2009-02-09 13:34:12 +0000
@@ -213,7 +213,10 @@ static int update_status_variables(st_nd
                             ns->connected_port);
   }
   ns->number_of_replicas= 0;
-  ns->number_of_ready_data_nodes= c->get_no_ready();
+  {
+    int n= c->get_no_ready();
+    ns->number_of_ready_data_nodes= n > 0 ?  n : 0;
+  }
   ns->number_of_data_nodes= c->no_db_nodes();
   ns->connect_count= c->get_connect_count();
   return 0;
@@ -370,8 +373,8 @@ Thd_ndb::Thd_ndb()
   m_error= FALSE;
   m_error_code= 0;
   options= 0;
-  (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
-                   (hash_get_key)thd_ndb_share_get_key, 0, 0);
+  (void) my_hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
+                   (my_hash_get_key)thd_ndb_share_get_key, 0, 0);
   m_unsent_bytes= 0;
   global_schema_lock_trans= NULL;
   global_schema_lock_count= 0;
@@ -387,7 +390,7 @@ Thd_ndb::~Thd_ndb()
     ndb= NULL;
   }
   changed_tables.empty();
-  hash_free(&open_tables);
+  my_hash_free(&open_tables);
   free_root(&m_batch_mem_root, MYF(0));
 }
 
@@ -2599,7 +2602,7 @@ int ha_ndbcluster::ordered_index_scan(co
   if (lm == NdbOperation::LM_Read)
     options.scan_flags|= NdbScanOperation::SF_KeyInfo;
   if (sorted)
-    options.scan_flags|= NdbScanOperation::SF_OrderBy;
+    options.scan_flags|= NdbScanOperation::SF_OrderByFull;
   if (descending)
     options.scan_flags|= NdbScanOperation::SF_Descending;
   const NdbRecord *key_rec= m_index[active_index].ndb_record_key;
@@ -4539,7 +4542,7 @@ int ha_ndbcluster::start_statement(THD *
   if (!thd_ndb->trans)
   {
     DBUG_ASSERT(thd_ndb->changed_tables.is_empty() == TRUE);
-    if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+    if (thd->in_multi_stmt_transaction())
       trans_register_ha(thd, TRUE, ndbcluster_hton);
     DBUG_PRINT("trans",("Starting transaction"));      
     thd_ndb->trans= ndb->startTransaction();
@@ -4612,14 +4615,14 @@ int ha_ndbcluster::init_handler_for_stat
   }
 #endif
 
-  if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+  if (thd->in_multi_stmt_transaction())
   {
     const void *key= m_table;
     HASH_SEARCH_STATE state;
     THD_NDB_SHARE *thd_ndb_share=
-      (THD_NDB_SHARE*)hash_first(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
+      (THD_NDB_SHARE*)my_hash_first(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
     while (thd_ndb_share && thd_ndb_share->key != key)
-      thd_ndb_share= (THD_NDB_SHARE*)hash_next(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
+      thd_ndb_share= (THD_NDB_SHARE*)my_hash_next(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
     if (thd_ndb_share == 0)
     {
       thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root,
@@ -4697,8 +4700,7 @@ int ha_ndbcluster::external_lock(THD *th
     {
       DBUG_PRINT("info", ("Rows has changed"));
 
-      if (thd_ndb->trans &&
-          thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+      if (thd_ndb->trans && thd->in_multi_stmt_transaction())
       {
         DBUG_PRINT("info", ("Add share to list of changed tables, %p",
                             m_share));
@@ -4722,7 +4724,7 @@ int ha_ndbcluster::external_lock(THD *th
       DBUG_PRINT("trans", ("Last external_lock"));
       PRINT_OPTION_FLAGS(thd);
 
-      if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+      if (!thd->in_multi_stmt_transaction())
       {
         if (thd_ndb->trans)
         {
@@ -4834,7 +4836,7 @@ static int ndbcluster_commit(handlerton 
     DBUG_PRINT("info", ("trans == NULL"));
     DBUG_RETURN(0);
   }
-  if (!all && (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+  if (!all && thd->in_multi_stmt_transaction())
   {
     /*
       An odditity in the handler interface is that commit on handlerton
@@ -4937,7 +4939,7 @@ static int ndbcluster_rollback(handlerto
     DBUG_PRINT("info", ("trans == NULL"));
     DBUG_RETURN(0);
   }
-  if (!all && (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
+  if (!all && thd->in_multi_stmt_transaction() &&
       (thd_ndb->save_point_count > 0))
   {
     /*
@@ -5754,8 +5756,8 @@ int ha_ndbcluster::create(const char *na
     */
     {
       uint length= (uint) strlen(name);
-      if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
-                                           (uchar*) name, length)))
+      if ((share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+                                              (uchar*) name, length)))
         handle_trailing_share(thd, share);
     }
     /*
@@ -7418,18 +7420,18 @@ int ndbcluster_find_files(handlerton *ht
                         NdbDictionary::Object::UserTable) != 0)
     ERR_RETURN(dict->getNdbError());
 
-  if (hash_init(&ndb_tables, system_charset_info,list.count,0,0,
-                (hash_get_key)tables_get_key,0,0))
+  if (my_hash_init(&ndb_tables, system_charset_info,list.count,0,0,
+                   (my_hash_get_key)tables_get_key,0,0))
   {
     DBUG_PRINT("error", ("Failed to init HASH ndb_tables"));
     DBUG_RETURN(-1);
   }
 
-  if (hash_init(&ok_tables, system_charset_info,32,0,0,
-                (hash_get_key)tables_get_key,0,0))
+  if (my_hash_init(&ok_tables, system_charset_info,32,0,0,
+                (my_hash_get_key)tables_get_key,0,0))
   {
     DBUG_PRINT("error", ("Failed to init HASH ok_tables"));
-    hash_free(&ndb_tables);
+    my_hash_free(&ndb_tables);
     DBUG_RETURN(-1);
   }  
 
@@ -7470,7 +7472,8 @@ int ndbcluster_find_files(handlerton *ht
   {
     bool file_on_disk= FALSE;
     DBUG_PRINT("info", ("%s", file_name->str));
-    if (hash_search(&ndb_tables, (uchar*) file_name->str, file_name->length))
+    if (my_hash_search(&ndb_tables, (uchar*) file_name->str,
+                       file_name->length))
     {
       build_table_filename(name, sizeof(name), db, file_name->str, reg_ext, 0);
       if (my_access(name, F_OK))
@@ -7503,10 +7506,10 @@ int ndbcluster_find_files(handlerton *ht
       if (file_on_disk)
       {
 	// Ignore this ndb table 
- 	uchar *record= hash_search(&ndb_tables, (uchar*) file_name->str,
-                                   file_name->length);
+        uchar *record= my_hash_search(&ndb_tables, (uchar*) file_name->str,
+                                      file_name->length);
 	DBUG_ASSERT(record);
-	hash_delete(&ndb_tables, record);
+	my_hash_delete(&ndb_tables, record);
 	push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
 			    ER_TABLE_EXISTS_ERROR,
 			    "Local table %s.%s shadows ndb table",
@@ -7540,7 +7543,7 @@ int ndbcluster_find_files(handlerton *ht
       build_table_filename(name, sizeof(name), db, "", "", 0);
     for (i= 0; i < ok_tables.records; i++)
     {
-      file_name_str= (char*)hash_element(&ok_tables, i);
+      file_name_str= (char*)my_hash_element(&ok_tables, i);
       end= end1 +
         tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name));
       pthread_mutex_lock(&LOCK_open);
@@ -7556,8 +7559,9 @@ int ndbcluster_find_files(handlerton *ht
   List<char> create_list;
   for (i= 0 ; i < ndb_tables.records ; i++)
   {
-    file_name_str= (char*) hash_element(&ndb_tables, i);
-    if (!hash_search(&ok_tables, (uchar*) file_name_str, strlen(file_name_str)))
+    file_name_str= (char*) my_hash_element(&ndb_tables, i);
+    if (!my_hash_search(&ok_tables, (uchar*) file_name_str,
+                        strlen(file_name_str)))
     {
       build_table_filename(name, sizeof(name), db, file_name_str, reg_ext, 0);
       if (my_access(name, F_OK))
@@ -7632,8 +7636,8 @@ int ndbcluster_find_files(handlerton *ht
 
   pthread_mutex_unlock(&LOCK_open);
 
-  hash_free(&ok_tables);
-  hash_free(&ndb_tables);
+  my_hash_free(&ok_tables);
+  my_hash_free(&ndb_tables);
 
   // Delete schema file from files
   if (!strcmp(db, NDB_REP_DB))
@@ -7738,8 +7742,8 @@ static int ndbcluster_init(void *p)
   if (ndbcluster_connect(connect_callback))
     goto ndbcluster_init_error;
 
-  (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
-                   (hash_get_key) ndbcluster_get_key,0,0);
+  (void) my_hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
+                      (my_hash_get_key) ndbcluster_get_key,0,0);
 #ifdef HAVE_NDB_BINLOG
   /* start the ndb injector thread */
   if (ndbcluster_binlog_start())
@@ -7752,7 +7756,7 @@ static int ndbcluster_init(void *p)
   if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0))
   {
     DBUG_PRINT("error", ("Could not create ndb utility thread"));
-    hash_free(&ndbcluster_open_tables);
+    my_hash_free(&ndbcluster_open_tables);
     pthread_mutex_destroy(&ndbcluster_mutex);
     pthread_mutex_destroy(&LOCK_ndb_util_thread);
     pthread_cond_destroy(&COND_ndb_util_thread);
@@ -7770,7 +7774,7 @@ static int ndbcluster_init(void *p)
   if (!ndb_util_thread_running)
   {
     DBUG_PRINT("error", ("ndb utility thread exited prematurely"));
-    hash_free(&ndbcluster_open_tables);
+    my_hash_free(&ndbcluster_open_tables);
     pthread_mutex_destroy(&ndbcluster_mutex);
     pthread_mutex_destroy(&LOCK_ndb_util_thread);
     pthread_cond_destroy(&COND_ndb_util_thread);
@@ -7818,7 +7822,7 @@ static int ndbcluster_end(handlerton *ht
     while (ndbcluster_open_tables.records)
     {
       NDB_SHARE *share=
-        (NDB_SHARE*) hash_element(&ndbcluster_open_tables, 0);
+        (NDB_SHARE*) my_hash_element(&ndbcluster_open_tables, 0);
 #ifndef DBUG_OFF
       fprintf(stderr, "NDB: table share %s with use_count %d not freed\n",
               share->key, share->use_count);
@@ -7828,7 +7832,7 @@ static int ndbcluster_end(handlerton *ht
     pthread_mutex_unlock(&ndbcluster_mutex);
   }
 #endif
-  hash_free(&ndbcluster_open_tables);
+  my_hash_free(&ndbcluster_open_tables);
 
   ndbcluster_disconnect();
 
@@ -8151,9 +8155,9 @@ uint ndb_get_commitcount(THD *thd, char 
   build_table_filename(name, sizeof(name), dbname, tabname, "", 0);
   DBUG_PRINT("enter", ("name: %s", name));
   pthread_mutex_lock(&ndbcluster_mutex);
-  if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
-                                       (uchar*) name,
-                                       strlen(name))))
+  if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+                                          (uchar*) name,
+                                          strlen(name))))
   {
     pthread_mutex_unlock(&ndbcluster_mutex);
     DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name));
@@ -8278,7 +8282,7 @@ ndbcluster_cache_retrieval_allowed(THD *
   DBUG_PRINT("enter", ("dbname: %s, tabname: %s",
                        dbname, tabname));
 
-  if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+  if (thd->in_multi_stmt_transaction())
   {
     /* Don't allow qc to be used if table has been previously
        modified in transaction */
@@ -8362,7 +8366,7 @@ ha_ndbcluster::register_query_cache_tabl
   DBUG_PRINT("enter",("dbname: %s, tabname: %s",
 		      m_dbname, m_tabname));
 
-  if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+  if (thd->in_multi_stmt_transaction())
   {
     /* Don't allow qc to be used if table has been previously
        modified in transaction */
@@ -8448,7 +8452,7 @@ static void print_ndbcluster_open_tables
   fprintf(DBUG_FILE, ">ndbcluster_open_tables\n");
   for (uint i= 0; i < ndbcluster_open_tables.records; i++)
     print_share("",
-                (NDB_SHARE*)hash_element(&ndbcluster_open_tables, i));
+                (NDB_SHARE*)my_hash_element(&ndbcluster_open_tables, i));
   fprintf(DBUG_FILE, "<ndbcluster_open_tables\n");
   DBUG_UNLOCK_FILE;
 }
@@ -8579,7 +8583,7 @@ int handle_trailing_share(THD *thd, NDB_
     at the cost of a possible mem leak, by "renaming" the share
     - First remove from hash
   */
-  hash_delete(&ndbcluster_open_tables, (uchar*) share);
+  my_hash_delete(&ndbcluster_open_tables, (uchar*) share);
 
   /*
     now give it a new name, just a running number
@@ -8631,12 +8635,12 @@ int ndbcluster_rename_share(THD *thd, ND
   uint new_length= (uint) strlen(share->new_key);
   DBUG_PRINT("ndbcluster_rename_share", ("old_key: %s  old__length: %d",
                               share->key, share->key_length));
-  if ((tmp= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
-                                     (uchar*) share->new_key, new_length)))
+  if ((tmp= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+                                        (uchar*) share->new_key, new_length)))
     handle_trailing_share(thd, tmp, have_lock_open);
 
   /* remove the share from hash */
-  hash_delete(&ndbcluster_open_tables, (uchar*) share);
+  my_hash_delete(&ndbcluster_open_tables, (uchar*) share);
   dbug_print_open_tables();
 
   /* save old stuff if insert should fail */
@@ -8739,9 +8743,9 @@ NDB_SHARE *ndbcluster_get_share(const ch
 
   if (!have_lock)
     pthread_mutex_lock(&ndbcluster_mutex);
-  if (!(share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
-                                        (uchar*) key,
-                                        length)))
+  if (!(share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+                                           (uchar*) key,
+                                           length)))
   {
     if (!create_if_not_exists)
     {
@@ -8822,7 +8826,7 @@ void ndbcluster_real_free_share(NDB_SHAR
   if (ndb_extra_logging > 9)
     sql_print_information ("ndbcluster_real_free_share: %s use_count: %u", (*share)->key, (*share)->use_count);
 
-  hash_delete(&ndbcluster_open_tables, (uchar*) *share);
+  my_hash_delete(&ndbcluster_open_tables, (uchar*) *share);
   thr_lock_delete(&(*share)->lock);
   pthread_mutex_destroy(&(*share)->mutex);
 
@@ -9275,18 +9279,18 @@ multi_range_row(uchar *p)
 }
 
 /* Get and put upper layer custom char *, use memcpy() for unaligned access. */
-static char *
-multi_range_get_custom(HANDLER_BUFFER *buffer, int range_no)
+static void
+multi_range_get_custom(HANDLER_BUFFER *buffer, int range_no, char **pcustom)
 {
   DBUG_ASSERT(range_no < MRR_MAX_RANGES);
-  return ((char **)(buffer->buffer))[range_no];
+  memcpy(pcustom, (char **)(buffer->buffer) + range_no, sizeof(*pcustom));
 }
 
 static void
 multi_range_put_custom(HANDLER_BUFFER *buffer, int range_no, char *custom)
 {
   DBUG_ASSERT(range_no < MRR_MAX_RANGES);
-  ((char **)(buffer->buffer))[range_no]= custom;
+  memcpy((char **)(buffer->buffer) + range_no, &custom, sizeof(custom));
 }
 
 /*
@@ -9346,12 +9350,10 @@ ha_ndbcluster::multi_range_read_info_con
   KEY* key_info= table->key_info + keyno;
   ulong reclength= table_share->reclength;
   uint entry_size= multi_range_max_entry(key_type, reclength);
-  ulong total_bufsize;
+  ulong total_bufsize= 0;
   uint save_bufsize= *bufsz;
   DBUG_ENTER("ha_ndbcluster::multi_range_read_info_const");
 
-  total_bufsize= multi_range_fixed_size(n_ranges_arg);
-
   seq_it= seq->init(seq_init_param, n_ranges, *flags);
   while (!seq->next(seq_it, &range))
   {
@@ -9382,6 +9384,9 @@ ha_ndbcluster::multi_range_read_info_con
                             reclength);
   }
 
+  /* n_ranges_arg may not be calculated, so we use actual calculated instead */
+  total_bufsize+= multi_range_fixed_size(n_ranges);
+
   if (total_rows != HA_POS_ERROR)
   {
     if (uses_blob_value(table->read_set) ||
@@ -9693,7 +9698,7 @@ int ha_ndbcluster::multi_range_start_ret
         if (lm == NdbOperation::LM_Read)
           options.scan_flags|= NdbScanOperation::SF_KeyInfo;
         if (mrr_is_output_sorted)
-          options.scan_flags|= NdbScanOperation::SF_OrderBy;
+          options.scan_flags|= NdbScanOperation::SF_OrderByFull;
 
         options.parallel=parallelism;
 
@@ -9883,8 +9888,8 @@ int ha_ndbcluster::multi_range_read_next
           m_active_cursor= NULL;
 
           /* Return the record. */
-          *range_info= multi_range_get_custom(multi_range_buffer,
-                                              expected_range_no);
+          multi_range_get_custom(multi_range_buffer,
+                                 expected_range_no, range_info);
           memcpy(table->record[0], multi_range_row(row_buf),
                  table_share->reclength);
           DBUG_RETURN(0);
@@ -9895,8 +9900,8 @@ int ha_ndbcluster::multi_range_read_next
             int res;
             if ((res= read_multi_range_fetch_next()) != 0)
             {
-              *range_info= multi_range_get_custom(multi_range_buffer,
-                                                  expected_range_no);
+              multi_range_get_custom(multi_range_buffer,
+                                     expected_range_no, range_info);
               first_running_range++;
               m_multi_range_result_ptr=
                 multi_range_next_entry(m_multi_range_result_ptr,
@@ -9927,8 +9932,8 @@ int ha_ndbcluster::multi_range_read_next
             */
             if (!mrr_is_output_sorted || expected_range_no == current_range_no)
             {
-              *range_info= multi_range_get_custom(multi_range_buffer,
-                                                  current_range_no);
+              multi_range_get_custom(multi_range_buffer,
+                                     current_range_no, range_info);
               /* Copy out data from the new row. */
               unpack_record(table->record[0], m_next_row);
               /*
@@ -10229,7 +10234,7 @@ pthread_handler_t ndb_util_thread_func(v
     }
     for (i= 0, open_count= 0; i < record_count; i++)
     {
-      share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i);
+      share= (NDB_SHARE *)my_hash_element(&ndbcluster_open_tables, i);
 #ifdef HAVE_NDB_BINLOG
       if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0))
           <= 0)
@@ -11138,20 +11143,20 @@ int ha_ndbcluster::alter_table_phase1(TH
          goto err;
        }
        /*
-	 If the user has not specified the field format
-	 make it dynamic to enable on-line add attribute
+         If the user has not specified the field format
+         make it dynamic to enable on-line add attribute
        */
        if (field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT &&
            create_info->row_type == ROW_TYPE_DEFAULT &&
            col.getDynamic())
        {
-	 push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+         push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
                              ER_ILLEGAL_HA_CREATE_OPTION,
-		             "Converted FIXED field to DYNAMIC "
-			     "to enable on-line ADD COLUMN",
+                             "Converted FIXED field to DYNAMIC "
+                             "to enable on-line ADD COLUMN",
                              field->field_name);
-	}
-        new_tab->addColumn(col);
+       }
+       new_tab->addColumn(col);
      }
   }
 
@@ -11231,8 +11236,11 @@ int ha_ndbcluster::alter_table_phase2(TH
 
 #ifdef HAVE_NDB_BINLOG
   if (!ndbcluster_has_global_schema_lock(get_thd_ndb(thd)))
-    DBUG_RETURN(ndbcluster_no_global_schema_lock_abort
-                (thd, "ha_ndbcluster::alter_table_phase2"));
+  {
+    error= ndbcluster_no_global_schema_lock_abort
+      (thd, "ha_ndbcluster::alter_table_phase2");
+    goto err;
+  }
 #endif
 
   if ((*alter_flags & dropping).is_set())
@@ -11252,11 +11260,11 @@ int ha_ndbcluster::alter_table_phase2(TH
  err:
   if (error)
   {
-    set_ndb_share_state(m_share, NSS_INITIAL);
     /* ndb_share reference schema free */
     DBUG_PRINT("NDB_SHARE", ("%s binlog schema free  use_count: %u",
                              m_share->key, m_share->use_count));
   }
+  set_ndb_share_state(m_share, NSS_INITIAL);
   free_share(&m_share); // Decrease ref_count
   delete alter_data;
   DBUG_RETURN(error);

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2008-12-24 10:48:24 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2009-02-05 12:49:39 +0000
@@ -37,12 +37,15 @@
 #endif
 
 extern my_bool opt_ndb_log_orig;
+extern my_bool opt_ndb_log_bin;
 
 extern my_bool opt_ndb_log_update_as_write;
 extern my_bool opt_ndb_log_updated_only;
 
 extern my_bool ndbcluster_silent;
 
+extern my_bool ndb_log_binlog_index;
+
 /*
   defines for cluster replication table names
 */
@@ -989,9 +992,9 @@ static NDB_SHARE *ndbcluster_check_ndb_a
 {
   pthread_mutex_lock(&ndbcluster_mutex);
 
-  void *share= hash_search(&ndbcluster_open_tables, 
-                           (uchar*) NDB_APPLY_TABLE_FILE,
-                           sizeof(NDB_APPLY_TABLE_FILE) - 1);
+  void *share= my_hash_search(&ndbcluster_open_tables,
+                              (uchar*) NDB_APPLY_TABLE_FILE,
+                              sizeof(NDB_APPLY_TABLE_FILE) - 1);
   DBUG_PRINT("info",("ndbcluster_check_ndb_apply_status_share %s %p",
                      NDB_APPLY_TABLE_FILE, share));
   pthread_mutex_unlock(&ndbcluster_mutex);
@@ -1007,9 +1010,9 @@ static NDB_SHARE *ndbcluster_check_ndb_s
 {
   pthread_mutex_lock(&ndbcluster_mutex);
 
-  void *share= hash_search(&ndbcluster_open_tables, 
-                           (uchar*) NDB_SCHEMA_TABLE_FILE,
-                           sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
+  void *share= my_hash_search(&ndbcluster_open_tables,
+                              (uchar*) NDB_SCHEMA_TABLE_FILE,
+                              sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
   DBUG_PRINT("info",("ndbcluster_check_ndb_schema_share %s %p",
                      NDB_SCHEMA_TABLE_FILE, share));
   pthread_mutex_unlock(&ndbcluster_mutex);
@@ -2633,8 +2636,8 @@ ndb_binlog_thread_handle_schema_event_po
       {
         pthread_mutex_lock(&ndbcluster_mutex);
         NDB_SCHEMA_OBJECT *ndb_schema_object=
-          (NDB_SCHEMA_OBJECT*) hash_search(&ndb_schema_objects,
-                                           (uchar*) key, strlen(key));
+          (NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects,
+                                              (uchar*) key, strlen(key));
         if (ndb_schema_object)
         {
           pthread_mutex_lock(&ndb_schema_object->mutex);
@@ -3068,21 +3071,23 @@ ndb_add_ndb_binlog_index(THD *thd, ndb_b
   */
   do
   {
+    ulonglong epoch= 0, orig_epoch= 0;
+    uint orig_server_id= 0;
     empty_record(ndb_binlog_index);
 
     ndb_binlog_index->field[0]->store(first->master_log_pos);
     ndb_binlog_index->field[1]->store(first->master_log_file,
                                       strlen(first->master_log_file),
                                       &my_charset_bin);
-    ndb_binlog_index->field[2]->store(first->epoch);
+    ndb_binlog_index->field[2]->store(epoch= first->epoch);
     if (ndb_binlog_index->s->fields > 7)
     {
       ndb_binlog_index->field[3]->store(row->n_inserts);
       ndb_binlog_index->field[4]->store(row->n_updates);
       ndb_binlog_index->field[5]->store(row->n_deletes);
       ndb_binlog_index->field[6]->store(row->n_schemaops);
-      ndb_binlog_index->field[7]->store(row->orig_server_id);
-      ndb_binlog_index->field[8]->store(row->orig_epoch);
+      ndb_binlog_index->field[7]->store(orig_server_id= row->orig_server_id);
+      ndb_binlog_index->field[8]->store(orig_epoch= row->orig_epoch);
       ndb_binlog_index->field[9]->store(first->gci);
       row= row->next;
     }
@@ -3103,7 +3108,17 @@ ndb_add_ndb_binlog_index(THD *thd, ndb_b
 
     if ((error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0])))
     {
-      sql_print_error("NDB Binlog: Writing row to ndb_binlog_index: %d", error);
+      char tmp[128];
+      if (ndb_binlog_index->s->fields > 7)
+        my_snprintf(tmp, sizeof(tmp), "%u/%u,%u,%u/%u",
+                    uint(epoch >> 32), uint(epoch),
+                    orig_server_id,
+                    uint(orig_epoch >> 32), uint(orig_epoch));
+
+      else
+        my_snprintf(tmp, sizeof(tmp), "%u/%u", uint(epoch >> 32), uint(epoch));
+      sql_print_error("NDB Binlog: Writing row (%s) to ndb_binlog_index: %d",
+                      tmp, error);
       error= -1;
       goto add_ndb_binlog_index_err;
     }
@@ -3270,8 +3285,8 @@ int ndbcluster_create_binlog_setup(THD *
   pthread_mutex_lock(&ndbcluster_mutex);
 
   /* Handle any trailing share */
-  NDB_SHARE *share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
-                                             (uchar*) key, key_len);
+  NDB_SHARE *share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+                                                (uchar*) key, key_len);
 
   if (share && share_may_exist)
   {
@@ -4595,9 +4610,9 @@ static NDB_SCHEMA_OBJECT *ndb_get_schema
   if (!have_lock)
     pthread_mutex_lock(&ndbcluster_mutex);
   while (!(ndb_schema_object=
-           (NDB_SCHEMA_OBJECT*) hash_search(&ndb_schema_objects,
-                                            (uchar*) key,
-                                            length)))
+           (NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects,
+                                               (uchar*) key,
+                                               length)))
   {
     if (!create_if_not_exists)
     {
@@ -4646,7 +4661,7 @@ static void ndb_free_schema_object(NDB_S
   if (!--(*ndb_schema_object)->use_count)
   {
     DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count));
-    hash_delete(&ndb_schema_objects, (uchar*) *ndb_schema_object);
+    my_hash_delete(&ndb_schema_objects, (uchar*) *ndb_schema_object);
     pthread_mutex_destroy(&(*ndb_schema_object)->mutex);
     my_free((uchar*) *ndb_schema_object, MYF(0));
     *ndb_schema_object= 0;
@@ -4667,7 +4682,6 @@ pthread_handler_t ndb_binlog_thread_func
   Ndb *i_ndb= 0;
   Ndb *s_ndb= 0;
   Thd_ndb *thd_ndb=0;
-  int ndb_update_ndb_binlog_index= 1;
   injector *inj= injector::instance();
   uint incident_id= 0;
 
@@ -4750,8 +4764,8 @@ pthread_handler_t ndb_binlog_thread_func
   }
 
   /* init hash for schema object distribution */
-  (void) hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0,
-                   (hash_get_key)ndb_schema_objects_get_key, 0, 0);
+  (void) my_hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0,
+                   (my_hash_get_key)ndb_schema_objects_get_key, 0, 0);
 
   /*
     Expose global reference to our ndb object.
@@ -4764,7 +4778,7 @@ pthread_handler_t ndb_binlog_thread_func
   injector_ndb= i_ndb;
   schema_ndb= s_ndb;
 
-  if (opt_bin_log)
+  if (opt_bin_log && opt_ndb_log_bin)
   {
     ndb_binlog_running= TRUE;
   }
@@ -5093,6 +5107,44 @@ restart:
     {
       DBUG_PRINT("info", ("pollEvents res: %d", res));
       THD_SET_PROC_INFO(thd, "Processing events");
+      uchar apply_status_buf[512];
+      TABLE *apply_status_table= NULL;
+      if (ndb_apply_status_share)
+      {
+        /*
+          We construct the buffer to write the apply status binlog
+          event here, as the table->record[0] buffer is referenced
+          by the apply status event operation, and will be filled
+          with data at the nextEvent call if the first event should
+          happen to be from the apply status table
+        */
+        Ndb_event_data *event_data= ndb_apply_status_share->event_data;
+        if (!event_data)
+        {
+          DBUG_ASSERT(ndb_apply_status_share->op);
+          event_data= 
+            (Ndb_event_data *) ndb_apply_status_share->op->getCustomData();
+          DBUG_ASSERT(event_data);
+        }
+        apply_status_table= event_data->table;
+
+        /* 
+           Intialize apply_status_table->record[0] 
+        */
+        empty_record(apply_status_table);
+
+        apply_status_table->field[0]->store((longlong)::server_id);
+        /*
+          gci is added later, just before writing to binlog as gci
+          is unknown here
+        */
+        apply_status_table->field[2]->store("", 0, &my_charset_bin);
+        apply_status_table->field[3]->store((longlong)0);
+        apply_status_table->field[4]->store((longlong)0);
+        DBUG_ASSERT(sizeof(apply_status_buf) >= apply_status_table->s->reclength);
+        memcpy(apply_status_buf, apply_status_table->record[0],
+               apply_status_table->s->reclength);
+      }
       NdbEventOperation *pOp= i_ndb->nextEvent();
       ndb_binlog_index_row _row;
       while (pOp != NULL)
@@ -5197,44 +5249,30 @@ restart:
         }
         if (trans.good())
         {
-          if (ndb_apply_status_share)
+          if (apply_status_table)
           {
-            Ndb_event_data *event_data= 0;
-            if (ndb_apply_status_share->event_data)
-            {
-              event_data= ndb_apply_status_share->event_data;
-            }
-            else if (ndb_apply_status_share->op)
-            {
-              event_data= 
-                (Ndb_event_data *) ndb_apply_status_share->op->getCustomData();
-            }
-            DBUG_ASSERT(event_data);
-            TABLE *table= event_data->table;
-
 #ifndef DBUG_OFF
-            const LEX_STRING& name= table->s->table_name;
+            const LEX_STRING& name= apply_status_table->s->table_name;
             DBUG_PRINT("info", ("use_table: %.*s",
                                 (int) name.length, name.str));
 #endif
-            injector::transaction::table tbl(table, TRUE);
+            injector::transaction::table tbl(apply_status_table, TRUE);
             IF_DBUG(int ret=) trans.use_table(::server_id, tbl);
             DBUG_ASSERT(ret == 0);
 
-	    /* 
-	       Intialize table->record[0] 
-	    */
-	    empty_record(table);
-
-            table->field[0]->store((longlong)::server_id);
-            table->field[1]->store((longlong)gci);
-            table->field[2]->store("", 0, &my_charset_bin);
-            table->field[3]->store((longlong)0);
-            table->field[4]->store((longlong)0);
+            /* add the gci to the record */
+            Field *field= apply_status_table->field[1];
+            my_ptrdiff_t row_offset=
+              (my_ptrdiff_t) (apply_status_buf - apply_status_table->record[0]);
+            field->move_field_offset(row_offset);
+            field->store((longlong)gci);
+            field->move_field_offset(-row_offset);
+
             trans.write_row(::server_id,
-                            injector::transaction::table(table, TRUE),
-                            &table->s->all_set, table->s->fields,
-                            table->record[0]);
+                            injector::transaction::table(apply_status_table, TRUE),
+                            &apply_status_table->s->all_set,
+                            apply_status_table->s->fields,
+                            apply_status_buf);
           }
           else
           {
@@ -5360,7 +5398,7 @@ restart:
           rows->master_log_pos= start.file_pos();
 
           DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
-          if (ndb_update_ndb_binlog_index)
+          if (ndb_log_binlog_index)
           {
             ndb_add_ndb_binlog_index(thd, rows);
           }
@@ -5508,7 +5546,7 @@ err:
     i_ndb= 0;
   }
 
-  hash_free(&ndb_schema_objects);
+  my_hash_free(&ndb_schema_objects);
 
   net_end(&thd->net);
   thd->cleanup();

=== modified file 'sql/handler.cc'
--- a/sql/handler.cc	2009-02-03 09:16:53 +0000
+++ b/sql/handler.cc	2009-02-09 13:36:40 +0000
@@ -1489,7 +1489,7 @@ static my_bool xarecover_handlerton(THD 
         }
         // recovery mode
         if (info->commit_list ?
-            hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
+            my_hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
             tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT)
         {
 #ifndef DBUG_OFF
@@ -1609,7 +1609,7 @@ bool mysql_xa_recover(THD *thd)
     DBUG_RETURN(1);
 
   pthread_mutex_lock(&LOCK_xid_cache);
-  while ((xs= (XID_STATE*)hash_element(&xid_cache, i++)))
+  while ((xs= (XID_STATE*) my_hash_element(&xid_cache, i++)))
   {
     if (xs->xa_state==XA_PREPARED)
     {
@@ -4362,17 +4362,20 @@ scan_it_again:
   @retval other Error
 */
 
-int DsMrr_impl::dsmrr_init(handler *h, KEY *key,
-                           RANGE_SEQ_IF *seq_funcs, void *seq_init_param,
-                           uint n_ranges, uint mode, HANDLER_BUFFER *buf)
+int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs, 
+                           void *seq_init_param, uint n_ranges, uint mode,
+                           HANDLER_BUFFER *buf)
 {
   uint elem_size;
-  uint keyno;
   Item *pushed_cond= NULL;
   handler *new_h2= 0;
   DBUG_ENTER("DsMrr_impl::dsmrr_init");
-  keyno= h->active_index;
 
+  /*
+    index_merge may invoke a scan on an object for which dsmrr_info[_const]
+    has not been called, so set the owner handler here as well.
+  */
+  h= h_arg;
   if (mode & HA_MRR_USE_DEFAULT_IMPL || mode & HA_MRR_SORTED)
   {
     use_default_impl= TRUE;
@@ -4393,17 +4396,28 @@ int DsMrr_impl::dsmrr_init(handler *h, K
                       elem_size;
   rowids_buf_end= rowids_buf_last;
 
+    /*
+    There can be two cases:
+    - This is the first call since index_init(), h2==NULL
+       Need to setup h2 then.
+    - This is not the first call, h2 is initalized and set up appropriately.
+       The caller might have called h->index_init(), need to switch h to
+       rnd_pos calls.
+  */
   if (!h2)
   {
     /* Create a separate handler object to do rndpos() calls. */
     THD *thd= current_thd;
+    /*
+      ::clone() takes up a lot of stack, especially on 64 bit platforms.
+      The constant 5 is an empiric result.
+    */
+    if (check_stack_overrun(thd, 5*STACK_MIN_SIZE, (uchar*) &new_h2))
+      DBUG_RETURN(1);
+    DBUG_ASSERT(h->active_index != MAX_KEY);
+    uint mrr_keyno= h->active_index;
 
-  /*
-    ::clone() takes up a lot of stack, especially on 64 bit platforms.
-    The constant 5 is an empiric result.
-  */
-  if (check_stack_overrun(thd, 5*STACK_MIN_SIZE, (uchar*) &new_h2))
-    DBUG_RETURN(1);
+    /* Create a separate handler object to do rndpos() calls. */
     if (!(new_h2= h->clone(thd->mem_root)) || 
         new_h2->ha_external_lock(thd, F_RDLCK))
     {
@@ -4411,31 +4425,55 @@ int DsMrr_impl::dsmrr_init(handler *h, K
       DBUG_RETURN(1);
     }
 
-    if (keyno == h->pushed_idx_cond_keyno)
+    if (mrr_keyno == h->pushed_idx_cond_keyno)
       pushed_cond= h->pushed_idx_cond;
+
+    /*
+      Caution: this call will invoke this->dsmrr_close(). Do not put the
+      created secondary table handler into this->h2 or it will delete it.
+    */
     if (h->ha_index_end())
     {
-      new_h2= h2;
+      h2=new_h2;
       goto error;
     }
 
-    h2= new_h2;
+    h2= new_h2; /* Ok, now can put it into h2 */
     table->prepare_for_position();
-    new_h2->extra(HA_EXTRA_KEYREAD);
+    h2->extra(HA_EXTRA_KEYREAD);
   
-    if (h2->ha_index_init(keyno, FALSE))
+    if (h2->ha_index_init(mrr_keyno, FALSE))
+      goto error;
+
+    use_default_impl= FALSE;
+    if (pushed_cond)
+      h2->idx_cond_push(mrr_keyno, pushed_cond);
+  }
+  else
+  {
+    /* 
+      We get here when the access alternates betwen MRR scan(s) and non-MRR
+      scans.
+
+      Calling h->index_end() will invoke dsmrr_close() for this object,
+      which will delete h2. We need to keep it, so save put it away and dont
+      let it be deleted:
+    */
+    handler *save_h2= h2;
+    h2= NULL;
+    int res= (h->inited == handler::INDEX && h->ha_index_end());
+    h2= save_h2;
+    use_default_impl= FALSE;
+    if (res)
       goto error;
   }
 
   if (h2->handler::multi_range_read_init(seq_funcs, seq_init_param, n_ranges,
-                                         mode, buf))
-    goto error;
-  
-  if (pushed_cond)
-    h2->idx_cond_push(keyno, pushed_cond);
-  if (dsmrr_fill_buffer(new_h2))
+                                          mode, buf) || 
+      dsmrr_fill_buffer())
+  {
     goto error;
-
+  }
   /*
     If the above call has scanned through all intervals in *seq, then
     adjust *buf to indicate that the remaining buffer space will not be used.
@@ -4504,7 +4542,7 @@ static int rowid_cmp(void *h, uchar *a, 
   @retval other  Error
 */
 
-int DsMrr_impl::dsmrr_fill_buffer(handler *unused)
+int DsMrr_impl::dsmrr_fill_buffer()
 {
   char *range_info;
   int res;
@@ -4551,7 +4589,7 @@ int DsMrr_impl::dsmrr_fill_buffer(handle
   DS-MRR implementation: multi_range_read_next() function
 */
 
-int DsMrr_impl::dsmrr_next(handler *h, char **range_info)
+int DsMrr_impl::dsmrr_next(char **range_info)
 {
   int res;
   uchar *cur_range_info= 0;
@@ -4569,8 +4607,7 @@ int DsMrr_impl::dsmrr_next(handler *h, c
         res= HA_ERR_END_OF_FILE;
         goto end;
       }
-
-      res= dsmrr_fill_buffer(h);
+    res= dsmrr_fill_buffer();
       if (res)
         goto end;
     }

=== modified file 'sql/handler.h'
--- a/sql/handler.h	2009-01-29 21:17:59 +0000
+++ b/sql/handler.h	2009-02-09 13:36:40 +0000
@@ -2461,14 +2461,15 @@ public:
 
   DsMrr_impl()
     : h2(NULL) {};
-
-  handler *h; /* The "owner" handler object. It is used for scanning the index */
-  TABLE *table; /* Always equal to h->table */
-private:
+  
   /*
-    Secondary handler object. It is used to retrieve full table rows by
-    calling rnd_pos().
+    The "owner" handler object (the one that calls dsmrr_XXX functions.
+    It is used to retrieve full table rows by calling rnd_pos().
   */
+  handler *h;
+  TABLE *table; /* Always equal to h->table */
+private:
+  /* Secondary handler object.  It is used for scanning the index */
   handler *h2;
 
   /* Buffer to store rowids, or (rowid, range_id) pairs */
@@ -2489,12 +2490,11 @@ public:
     h= h_arg; 
     table= table_arg;
   }
-  int dsmrr_init(handler *h, KEY *key, RANGE_SEQ_IF *seq_funcs, 
-                 void *seq_init_param, uint n_ranges, uint mode, 
-                 HANDLER_BUFFER *buf);
+  int dsmrr_init(handler *h, RANGE_SEQ_IF *seq_funcs, void *seq_init_param, 
+                 uint n_ranges, uint mode, HANDLER_BUFFER *buf);
   void dsmrr_close();
-  int dsmrr_fill_buffer(handler *h);
-  int dsmrr_next(handler *h, char **range_info);
+  int dsmrr_fill_buffer();
+  int dsmrr_next(char **range_info);
 
   ha_rows dsmrr_info(uint keyno, uint n_ranges, uint keys, uint *bufsz,
                      uint *flags, COST_VECT *cost);

=== modified file 'sql/hash_filo.h'
--- a/sql/hash_filo.h	2007-05-10 09:59:39 +0000
+++ b/sql/hash_filo.h	2009-01-27 02:08:48 +0000
@@ -38,8 +38,8 @@ class hash_filo_element
 class hash_filo
 {
   const uint size, key_offset, key_length;
-  const hash_get_key get_key;
-  hash_free_key free_element;
+  const my_hash_get_key get_key;
+  my_hash_free_key free_element;
   bool init;
   CHARSET_INFO *hash_charset;
 
@@ -49,7 +49,7 @@ public:
   HASH cache;
 
   hash_filo(uint size_arg, uint key_offset_arg , uint key_length_arg,
-	    hash_get_key get_key_arg, hash_free_key free_element_arg,
+	    my_hash_get_key get_key_arg, my_hash_free_key free_element_arg,
 	    CHARSET_INFO *hash_charset_arg)
     :size(size_arg), key_offset(key_offset_arg), key_length(key_length_arg),
     get_key(get_key_arg), free_element(free_element_arg),init(0),
@@ -63,7 +63,7 @@ public:
     if (init)
     {
       if (cache.array.buffer)	/* Avoid problems with thread library */
-	(void) hash_free(&cache);
+	(void) my_hash_free(&cache);
       pthread_mutex_destroy(&lock);
     }
   }
@@ -76,8 +76,8 @@ public:
     }
     if (!locked)
       (void) pthread_mutex_lock(&lock);
-    (void) hash_free(&cache);
-    (void) hash_init(&cache,hash_charset,size,key_offset, 
+    (void) my_hash_free(&cache);
+    (void) my_hash_init(&cache,hash_charset,size,key_offset, 
     		     key_length, get_key, free_element,0);
     if (!locked)
       (void) pthread_mutex_unlock(&lock);
@@ -87,7 +87,7 @@ public:
   hash_filo_element *search(uchar* key, size_t length)
   {
     hash_filo_element *entry=(hash_filo_element*)
-      hash_search(&cache,(uchar*) key,length);
+      my_hash_search(&cache,(uchar*) key,length);
     if (entry)
     {						// Found; link it first
       if (entry != first_link)
@@ -113,7 +113,7 @@ public:
     {
       hash_filo_element *tmp=last_link;
       last_link=last_link->prev_used;
-      hash_delete(&cache,(uchar*) tmp);
+      my_hash_delete(&cache,(uchar*) tmp);
     }
     if (my_hash_insert(&cache,(uchar*) entry))
     {

=== modified file 'sql/hostname.cc'
--- a/sql/hostname.cc	2008-12-13 20:01:27 +0000
+++ b/sql/hostname.cc	2009-01-27 02:08:48 +0000
@@ -61,7 +61,7 @@ bool hostname_cache_init()
   uint offset= (uint) ((char*) (&tmp.ip) - (char*) &tmp);
   if (!(hostname_cache=new hash_filo(HOST_CACHE_SIZE, offset,
 				     sizeof(struct sockaddr_storage),NULL,
-				     (hash_free_key) free,
+				     (my_hash_free_key) free,
 				     &my_charset_bin)))
     return 1;
   hostname_cache->clear();

=== modified file 'sql/item_create.cc'
--- a/sql/item_create.cc	2008-12-14 11:36:15 +0000
+++ b/sql/item_create.cc	2009-01-27 02:08:48 +0000
@@ -4959,14 +4959,14 @@ int item_create_init()
 
   DBUG_ENTER("item_create_init");
 
-  if (hash_init(& native_functions_hash,
-                system_charset_info,
-                array_elements(func_array),
-                0,
-                0,
-                (hash_get_key) get_native_fct_hash_key,
-                NULL,                          /* Nothing to free */
-                MYF(0)))
+  if (my_hash_init(& native_functions_hash,
+                   system_charset_info,
+                   array_elements(func_array),
+                   0,
+                   0,
+                   (my_hash_get_key) get_native_fct_hash_key,
+                   NULL,                          /* Nothing to free */
+                   MYF(0)))
     DBUG_RETURN(1);
 
   for (func= func_array; func->builder != NULL; func++)
@@ -4978,7 +4978,7 @@ int item_create_init()
 #ifndef DBUG_OFF
   for (uint i=0 ; i < native_functions_hash.records ; i++)
   {
-    func= (Native_func_registry*) hash_element(& native_functions_hash, i);
+    func= (Native_func_registry*) my_hash_element(& native_functions_hash, i);
     DBUG_PRINT("info", ("native function: %s  length: %u",
                         func->name.str, (uint) func->name.length));
   }
@@ -4996,7 +4996,7 @@ int item_create_init()
 void item_create_cleanup()
 {
   DBUG_ENTER("item_create_cleanup");
-  hash_free(& native_functions_hash);
+  my_hash_free(& native_functions_hash);
   DBUG_VOID_RETURN;
 }
 
@@ -5007,9 +5007,9 @@ find_native_function_builder(THD *thd, L
   Create_func *builder= NULL;
 
   /* Thread safe */
-  func= (Native_func_registry*) hash_search(& native_functions_hash,
-                                            (uchar*) name.str,
-                                             name.length);
+  func= (Native_func_registry*) my_hash_search(& native_functions_hash,
+                                               (uchar*) name.str,
+                                               name.length);
 
   if (func)
   {

=== modified file 'sql/item_func.cc'
--- a/sql/item_func.cc	2009-02-03 09:16:53 +0000
+++ b/sql/item_func.cc	2009-02-05 12:49:39 +0000
@@ -3329,7 +3329,7 @@ public:
   {
     if (key)
     {
-      hash_delete(&hash_user_locks,(uchar*) this);
+      my_hash_delete(&hash_user_locks,(uchar*) this);
       my_free(key, MYF(0));
     }
     pthread_cond_destroy(&cond);
@@ -3353,8 +3353,8 @@ static bool item_user_lock_inited= 0;
 void item_user_lock_init(void)
 {
   pthread_mutex_init(&LOCK_user_locks,MY_MUTEX_INIT_SLOW);
-  hash_init(&hash_user_locks,system_charset_info,
-	    16,0,0,(hash_get_key) ull_get_key,NULL,0);
+  my_hash_init(&hash_user_locks,system_charset_info,
+	    16,0,0,(my_hash_get_key) ull_get_key,NULL,0);
   item_user_lock_inited= 1;
 }
 
@@ -3363,7 +3363,7 @@ void item_user_lock_free(void)
   if (item_user_lock_inited)
   {
     item_user_lock_inited= 0;
-    hash_free(&hash_user_locks);
+    my_hash_free(&hash_user_locks);
     pthread_mutex_destroy(&LOCK_user_locks);
   }
 }
@@ -3444,9 +3444,9 @@ void debug_sync_point(const char* lock_n
     this case, we will not be waiting, but rather, just waste CPU and
     memory on the whole deal
   */
-  if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks,
-                                             (uchar*) lock_name,
-                                             lock_name_len))))
+  if (!(ull= ((User_level_lock*) my_hash_search(&hash_user_locks,
+                                                (uchar*) lock_name,
+                                                lock_name_len))))
   {
     pthread_mutex_unlock(&LOCK_user_locks);
     DBUG_VOID_RETURN;
@@ -3597,9 +3597,9 @@ longlong Item_func_get_lock::val_int()
     thd->ull=0;
   }
 
-  if (!(ull= ((User_level_lock *) hash_search(&hash_user_locks,
-                                              (uchar*) res->ptr(),
-                                              (size_t) res->length()))))
+  if (!(ull= ((User_level_lock *) my_hash_search(&hash_user_locks,
+                                                 (uchar*) res->ptr(),
+                                                 (size_t) res->length()))))
   {
     ull= new User_level_lock((uchar*) res->ptr(), (size_t) res->length(),
                              thd->thread_id);
@@ -3700,9 +3700,9 @@ longlong Item_func_release_lock::val_int
 
   result=0;
   pthread_mutex_lock(&LOCK_user_locks);
-  if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks,
-                                             (const uchar*) res->ptr(),
-                                             (size_t) res->length()))))
+  if (!(ull= ((User_level_lock*) my_hash_search(&hash_user_locks,
+                                                (const uchar*) res->ptr(),
+                                                (size_t) res->length()))))
   {
     null_value=1;
   }
@@ -3880,12 +3880,12 @@ static user_var_entry *get_variable(HASH
 {
   user_var_entry *entry;
 
-  if (!(entry = (user_var_entry*) hash_search(hash, (uchar*) name.str,
-					      name.length)) &&
+  if (!(entry = (user_var_entry*) my_hash_search(hash, (uchar*) name.str,
+                                                 name.length)) &&
       create_if_not_exists)
   {
     uint size=ALIGN_SIZE(sizeof(user_var_entry))+name.length+1+extra_size;
-    if (!hash_inited(hash))
+    if (!my_hash_inited(hash))
       return 0;
     if (!(entry = (user_var_entry*) my_malloc(size,MYF(MY_WME | ME_FATALERROR))))
       return 0;
@@ -5755,8 +5755,8 @@ longlong Item_func_is_free_lock::val_int
   }
   
   pthread_mutex_lock(&LOCK_user_locks);
-  ull= (User_level_lock *) hash_search(&hash_user_locks, (uchar*) res->ptr(),
-                                       (size_t) res->length());
+  ull= (User_level_lock *) my_hash_search(&hash_user_locks, (uchar*) res->ptr(),
+                                          (size_t) res->length());
   pthread_mutex_unlock(&LOCK_user_locks);
   if (!ull || !ull->locked)
     return 1;
@@ -5774,8 +5774,8 @@ longlong Item_func_is_used_lock::val_int
     return 0;
   
   pthread_mutex_lock(&LOCK_user_locks);
-  ull= (User_level_lock *) hash_search(&hash_user_locks, (uchar*) res->ptr(),
-                                       (size_t) res->length());
+  ull= (User_level_lock *) my_hash_search(&hash_user_locks, (uchar*) res->ptr(),
+                                          (size_t) res->length());
   pthread_mutex_unlock(&LOCK_user_locks);
   if (!ull || !ull->locked)
     return 0;

=== modified file 'sql/log.cc'
--- a/sql/log.cc	2009-01-26 16:32:29 +0000
+++ b/sql/log.cc	2009-02-05 12:49:39 +0000
@@ -2647,7 +2647,7 @@ binlog_end_trans(THD *thd, binlog_trx_da
       transaction cache to remove the statement.
      */
     thd->binlog_remove_pending_rows_event(TRUE);
-    if (all || !(thd->options & (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT)))
+    if (all || !thd->in_multi_stmt_transaction())
     {
       trx_data->reset();
 
@@ -2716,8 +2716,7 @@ static int binlog_commit(handlerton *hto
 
     Otherwise, we accumulate the statement
   */
-  ulonglong const in_transaction=
-    thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN);
+  bool const in_transaction= thd->in_multi_stmt_transaction();
   DBUG_PRINT("debug",
              ("all: %d, empty: %s, in_transaction: %s, all.modified_non_trans_table: %s, stmt.modified_non_trans_table: %s",
               all,
@@ -5527,7 +5526,7 @@ THD::binlog_start_trans_and_stmt()
       trx_data->before_stmt_pos == MY_OFF_T_UNDEF)
   {
     this->binlog_set_stmt_begin();
-    if (options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
+    if (in_multi_stmt_transaction())
       trans_register_ha(this, TRUE, binlog_hton);
     trans_register_ha(this, FALSE, binlog_hton);
     /*
@@ -7150,8 +7149,8 @@ int TC_LOG_MMAP::recover()
     goto err1;
   }
 
-  if (hash_init(&xids, &my_charset_bin, tc_log_page_size/3, 0,
-                sizeof(my_xid), 0, 0, MYF(0)))
+  if (my_hash_init(&xids, &my_charset_bin, tc_log_page_size/3, 0,
+                   sizeof(my_xid), 0, 0, MYF(0)))
     goto err1;
 
   for ( ; p < end_p ; p++)
@@ -7164,12 +7163,12 @@ int TC_LOG_MMAP::recover()
   if (ha_recover(&xids))
     goto err2;
 
-  hash_free(&xids);
+  my_hash_free(&xids);
   bzero(data, (size_t)file_length);
   return 0;
 
 err2:
-  hash_free(&xids);
+  my_hash_free(&xids);
 err1:
   sql_print_error("Crash recovery failed. Either correct the problem "
                   "(if it's, for example, out of memory error) and restart, "
@@ -7353,8 +7352,8 @@ int TC_LOG_BINLOG::recover(IO_CACHE *log
   MEM_ROOT mem_root;
 
   if (! fdle->is_valid() ||
-      hash_init(&xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0,
-                sizeof(my_xid), 0, 0, MYF(0)))
+      my_hash_init(&xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0,
+                   sizeof(my_xid), 0, 0, MYF(0)))
     goto err1;
 
   init_alloc_root(&mem_root, TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE);
@@ -7379,12 +7378,12 @@ int TC_LOG_BINLOG::recover(IO_CACHE *log
     goto err2;
 
   free_root(&mem_root, MYF(0));
-  hash_free(&xids);
+  my_hash_free(&xids);
   return 0;
 
 err2:
   free_root(&mem_root, MYF(0));
-  hash_free(&xids);
+  my_hash_free(&xids);
 err1:
   sql_print_error("Crash recovery failed. Either correct the problem "
                   "(if it's, for example, out of memory error) and restart, "

=== modified file 'sql/mdl.cc'
--- a/sql/mdl.cc	2008-12-17 10:11:14 +0000
+++ b/sql/mdl.cc	2009-01-27 13:41:58 +0000
@@ -119,8 +119,8 @@ void mdl_init()
   mdl_initialized= 1;
   pthread_mutex_init(&LOCK_mdl, NULL);
   pthread_cond_init(&COND_mdl, NULL);
-  hash_init(&mdl_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
-            mdl_locks_key, 0, 0);
+  my_hash_init(&mdl_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
+               mdl_locks_key, 0, 0);
   global_lock.waiting_shared= global_lock.active_shared= 0;
   global_lock.active_intention_exclusive= 0;
 }
@@ -141,7 +141,7 @@ void mdl_destroy()
     DBUG_ASSERT(!mdl_locks.records);
     pthread_mutex_destroy(&LOCK_mdl);
     pthread_cond_destroy(&COND_mdl);
-    hash_free(&mdl_locks);
+    my_hash_free(&mdl_locks);
   }
 }
 
@@ -740,8 +740,8 @@ bool mdl_acquire_shared_lock(MDL_CONTEXT
     return TRUE;
   }
 
-  if (!(lock= (MDL_LOCK *)hash_search(&mdl_locks, (uchar*)lock_data->key,
-                                      lock_data->key_length)))
+  if (!(lock= (MDL_LOCK*) my_hash_search(&mdl_locks, (uchar*)lock_data->key,
+                                         lock_data->key_length)))
   {
     if (!(lock= get_lock_object()))
     {
@@ -829,8 +829,8 @@ bool mdl_acquire_exclusive_locks(MDL_CON
   {
     DBUG_ASSERT(lock_data->type == MDL_EXCLUSIVE &&
                 lock_data->state == MDL_INITIALIZED);
-    if (!(lock= (MDL_LOCK *)hash_search(&mdl_locks, (uchar*)lock_data->key,
-                                        lock_data->key_length)))
+    if (!(lock= (MDL_LOCK*) my_hash_search(&mdl_locks, (uchar*)lock_data->key,
+                                           lock_data->key_length)))
     {
       if (!(lock= get_lock_object()))
         goto err;
@@ -1105,8 +1105,8 @@ bool mdl_try_acquire_exclusive_lock(MDL_
 
   pthread_mutex_lock(&LOCK_mdl);
 
-  if (!(lock= (MDL_LOCK *)hash_search(&mdl_locks, (uchar*)lock_data->key,
-                                      lock_data->key_length)))
+  if (!(lock= (MDL_LOCK*) my_hash_search(&mdl_locks, (uchar*)lock_data->key,
+                                         lock_data->key_length)))
   {
     if (!(lock= get_lock_object()))
       goto err;
@@ -1227,8 +1227,8 @@ bool mdl_wait_for_locks(MDL_CONTEXT *con
         request for MDL_EXCLUSIVE lock.
       */
       if (is_shared(lock_data) &&
-          (lock= (MDL_LOCK *)hash_search(&mdl_locks, (uchar*)lock_data->key,
-                                         lock_data->key_length)) &&
+          (lock= (MDL_LOCK*) my_hash_search(&mdl_locks, (uchar*)lock_data->key,
+                                            lock_data->key_length)) &&
           !can_grant_lock(lock, lock_data))
         break;
     }
@@ -1264,7 +1264,7 @@ static void release_lock(MDL_LOCK_DATA *
   lock= lock_data->lock;
   if (lock->has_one_lock_data())
   {
-    hash_delete(&mdl_locks, (uchar *)lock);
+    my_hash_delete(&mdl_locks, (uchar *)lock);
     DBUG_PRINT("info", ("releasing cached_object cached_object=%p",
                         lock->cached_object));
     if (lock->cached_object)
@@ -1330,10 +1330,11 @@ void mdl_release_locks(MDL_CONTEXT *cont
   {
     DBUG_PRINT("info", ("found lock to release lock_data=%p", lock_data));
     /*
-      We should not release locks which pending shared locks as these
-      are not associated with lock object and don't present in its
-      lists. Allows us to avoid problems in open_tables() in case of
-      back-off
+      Don't call release_lock() for a shared lock if has not been
+      granted. Lock state in this case is MDL_INITIALIZED.
+      We have pending and granted shared locks in the same context
+      when this function is called from the "back-off" path of
+      open_tables().
     */
     if (lock_data->state != MDL_INITIALIZED)
     {

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2009-02-09 16:06:35 +0000
+++ b/sql/mysqld.cc	2009-02-11 12:11:20 +0000
@@ -488,9 +488,11 @@ ulong ndb_extra_logging;
 #ifdef HAVE_NDB_BINLOG
 ulong ndb_report_thresh_binlog_epoch_slip;
 ulong ndb_report_thresh_binlog_mem_usage;
+my_bool ndb_log_binlog_index;
 my_bool opt_ndb_log_update_as_write;
 my_bool opt_ndb_log_updated_only;
 my_bool opt_ndb_log_orig;
+my_bool opt_ndb_log_bin;
 #endif
 
 extern const char *ndb_distribution_names[];
@@ -5793,7 +5795,7 @@ enum options_mysqld
   OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
   OPT_NDB_USE_COPYING_ALTER_TABLE,
   OPT_NDB_LOG_UPDATE_AS_WRITE, OPT_NDB_LOG_UPDATED_ONLY,
-  OPT_NDB_LOG_ORIG,
+  OPT_NDB_LOG_ORIG, OPT_NDB_LOG_BIN, OPT_NDB_LOG_BINLOG_INDEX,
   OPT_SKIP_SAFEMALLOC, OPT_MUTEX_DEADLOCK_DETECTOR,
   OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
   OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -6422,6 +6424,16 @@ thread is in the master's binlogs.",
    (uchar**) &opt_ndb_log_orig,
    (uchar**) &opt_ndb_log_orig,
    0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
+  {"ndb-log-bin", OPT_NDB_LOG_BIN,
+   "Log ndb tables in the binary log. Option only has meaning if "
+   "the binary log has been turned on for the server.",
+   (uchar**) &opt_ndb_log_bin, (uchar**) &opt_ndb_log_bin,
+   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+  {"ndb-log-binlog-index", OPT_NDB_LOG_BINLOG_INDEX,
+   "Insert mapping between epochs and binlog positions into the "
+   "ndb_binlog_index table.",
+   (uchar**) &ndb_log_binlog_index, (uchar**) &ndb_log_binlog_index,
+   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
 #endif
   {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT,
    "Use exact records count during query planning and for fast "

=== modified file 'sql/repl_failsafe.cc'
--- a/sql/repl_failsafe.cc	2009-01-09 12:50:24 +0000
+++ b/sql/repl_failsafe.cc	2009-01-27 02:08:48 +0000
@@ -146,10 +146,10 @@ void unregister_slave(THD* thd, bool onl
       pthread_mutex_lock(&LOCK_slave_list);
 
     SLAVE_INFO* old_si;
-    if ((old_si = (SLAVE_INFO*)hash_search(&slave_list,
-					   (uchar*)&thd->server_id, 4)) &&
+    if ((old_si = (SLAVE_INFO*)my_hash_search(&slave_list,
+                                              (uchar*)&thd->server_id, 4)) &&
 	(!only_mine || old_si->thd == thd))
-    hash_delete(&slave_list, (uchar*)old_si);
+    my_hash_delete(&slave_list, (uchar*)old_si);
 
     if (need_mutex)
       pthread_mutex_unlock(&LOCK_slave_list);
@@ -221,17 +221,18 @@ extern "C" void slave_info_free(void *s)
 
 void init_slave_list()
 {
-  hash_init(&slave_list, system_charset_info, SLAVE_LIST_CHUNK, 0, 0,
-	    (hash_get_key) slave_list_key, (hash_free_key) slave_info_free, 0);
+  my_hash_init(&slave_list, system_charset_info, SLAVE_LIST_CHUNK, 0, 0,
+               (my_hash_get_key) slave_list_key,
+               (my_hash_free_key) slave_info_free, 0);
   pthread_mutex_init(&LOCK_slave_list, MY_MUTEX_INIT_FAST);
 }
 
 void end_slave_list()
 {
   /* No protection by a mutex needed as we are only called at shutdown */
-  if (hash_inited(&slave_list))
+  if (my_hash_inited(&slave_list))
   {
-    hash_free(&slave_list);
+    my_hash_free(&slave_list);
     pthread_mutex_destroy(&LOCK_slave_list);
   }
 }
@@ -547,8 +548,8 @@ HOSTS";
     uint32 log_server_id;
     SLAVE_INFO* si, *old_si;
     log_server_id = atoi(row[0]);
-    if ((old_si= (SLAVE_INFO*)hash_search(&slave_list,
-					  (uchar*)&log_server_id,4)))
+    if ((old_si= (SLAVE_INFO*)my_hash_search(&slave_list,
+                                             (uchar*)&log_server_id,4)))
       si = old_si;
     else
     {
@@ -682,7 +683,7 @@ bool show_slave_hosts(THD* thd)
 
   for (uint i = 0; i < slave_list.records; ++i)
   {
-    SLAVE_INFO* si = (SLAVE_INFO*) hash_element(&slave_list, i);
+    SLAVE_INFO* si = (SLAVE_INFO*) my_hash_element(&slave_list, i);
     protocol->prepare_for_resend();
     protocol->store((uint32) si->server_id);
     protocol->store(si->host, &my_charset_bin);

=== modified file 'sql/rpl_filter.cc'
--- a/sql/rpl_filter.cc	2008-02-07 18:51:50 +0000
+++ b/sql/rpl_filter.cc	2009-01-27 02:08:48 +0000
@@ -32,9 +32,9 @@ Rpl_filter::Rpl_filter() : 
 Rpl_filter::~Rpl_filter() 
 {
   if (do_table_inited) 
-    hash_free(&do_table);
+    my_hash_free(&do_table);
   if (ignore_table_inited)
-    hash_free(&ignore_table);
+    my_hash_free(&ignore_table);
   if (wild_do_table_inited)
     free_string_array(&wild_do_table);
   if (wild_ignore_table_inited)
@@ -103,12 +103,12 @@ Rpl_filter::tables_ok(const char* db, TA
     len= (uint) (strmov(end, tables->table_name) - hash_key);
     if (do_table_inited) // if there are any do's
     {
-      if (hash_search(&do_table, (uchar*) hash_key, len))
+      if (my_hash_search(&do_table, (uchar*) hash_key, len))
 	DBUG_RETURN(1);
     }
     if (ignore_table_inited) // if there are any ignores
     {
-      if (hash_search(&ignore_table, (uchar*) hash_key, len))
+      if (my_hash_search(&ignore_table, (uchar*) hash_key, len))
 	DBUG_RETURN(0); 
     }
     if (wild_do_table_inited && 
@@ -388,7 +388,7 @@ void free_table_ent(void* a)
 void 
 Rpl_filter::init_table_rule_hash(HASH* h, bool* h_inited)
 {
-  hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
+  my_hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
 	    get_table_key, free_table_ent, 0);
   *h_inited = 1;
 }
@@ -459,7 +459,7 @@ Rpl_filter::table_rule_ent_hash_to_str(S
   {
     for (uint i= 0; i < h->records; i++)
     {
-      TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i);
+      TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) my_hash_element(h, i);
       if (s->length())
         s->append(',');
       s->append(e->db,e->key_len);

=== modified file 'sql/rpl_handler.cc'
--- a/sql/rpl_handler.cc	2008-10-24 07:03:30 +0000
+++ b/sql/rpl_handler.cc	2009-01-27 02:08:48 +0000
@@ -43,9 +43,9 @@ int get_user_var_int(const char *name,
                      long long int *value, int *null_value)
 {
   my_bool null_val;
-  user_var_entry *entry= 
-    (user_var_entry*) hash_search(&current_thd->user_vars,
-                                  (uchar*) name, strlen(name));
+  user_var_entry *entry=
+    (user_var_entry*) my_hash_search(&current_thd->user_vars,
+                                     (uchar*) name, strlen(name));
   if (!entry)
     return 1;
   *value= entry->val_int(&null_val);
@@ -58,9 +58,9 @@ int get_user_var_real(const char *name,
                       double *value, int *null_value)
 {
   my_bool null_val;
-  user_var_entry *entry= 
-    (user_var_entry*) hash_search(&current_thd->user_vars,
-                                  (uchar*) name, strlen(name));
+  user_var_entry *entry=
+    (user_var_entry*) my_hash_search(&current_thd->user_vars,
+                                     (uchar*) name, strlen(name));
   if (!entry)
     return 1;
   *value= entry->val_real(&null_val);
@@ -74,9 +74,9 @@ int get_user_var_str(const char *name, c
 {
   String str;
   my_bool null_val;
-  user_var_entry *entry= 
-    (user_var_entry*) hash_search(&current_thd->user_vars,
-                                  (uchar*) name, strlen(name));
+  user_var_entry *entry=
+    (user_var_entry*) my_hash_search(&current_thd->user_vars,
+                                     (uchar*) name, strlen(name));
   if (!entry)
     return 1;
   entry->val_str(&null_val, &str, precision);

=== modified file 'sql/rpl_tblmap.cc'
--- a/sql/rpl_tblmap.cc	2008-09-04 18:30:34 +0000
+++ b/sql/rpl_tblmap.cc	2009-01-27 02:08:48 +0000
@@ -34,10 +34,10 @@ table_mapping::table_mapping()
     No "free_element" function for entries passed here, as the entries are
     allocated in a MEM_ROOT (freed as a whole in the destructor), they cannot
     be freed one by one.
-    Note that below we don't test if hash_init() succeeded. This constructor
-    is called at startup only.
+    Note that below we don't test if my_hash_init() succeeded. This
+    constructor is called at startup only.
   */
-  (void) hash_init(&m_table_ids,&my_charset_bin,TABLE_ID_HASH_SIZE,
+  (void) my_hash_init(&m_table_ids,&my_charset_bin,TABLE_ID_HASH_SIZE,
 		   offsetof(entry,table_id),sizeof(ulong),
 		   0,0,0);
   /* We don't preallocate any block, this is consistent with m_free=0 above */
@@ -49,7 +49,7 @@ table_mapping::~table_mapping()
 #ifdef MYSQL_CLIENT
   clear_tables();
 #endif
-  hash_free(&m_table_ids);
+  my_hash_free(&m_table_ids);
   free_root(&m_mem_root, MYF(0));
 }
 
@@ -115,7 +115,7 @@ int table_mapping::set_table(ulong table
 #ifdef MYSQL_CLIENT
     free_table_map_log_event(e->table);
 #endif
-    hash_delete(&m_table_ids,(uchar *)e);
+    my_hash_delete(&m_table_ids,(uchar *)e);
   }
   e->table_id= table_id;
   e->table= table;
@@ -132,7 +132,7 @@ int table_mapping::remove_table(ulong ta
   entry *e= find_entry(table_id);
   if (e)
   {
-    hash_delete(&m_table_ids,(uchar *)e);
+    my_hash_delete(&m_table_ids,(uchar *)e);
     /* we add this entry to the chain of free (free for use) entries */
     e->next= m_free;
     m_free= e;
@@ -150,7 +150,7 @@ void table_mapping::clear_tables()
   DBUG_ENTER("table_mapping::clear_tables()");
   for (uint i= 0; i < m_table_ids.records; i++)
   {
-    entry *e= (entry *)hash_element(&m_table_ids, i);
+    entry *e= (entry *)my_hash_element(&m_table_ids, i);
 #ifdef MYSQL_CLIENT
     free_table_map_log_event(e->table);
 #endif

=== modified file 'sql/rpl_tblmap.h'
--- a/sql/rpl_tblmap.h	2008-09-04 18:30:34 +0000
+++ b/sql/rpl_tblmap.h	2009-01-27 02:08:48 +0000
@@ -90,9 +90,9 @@ private:
 
   entry *find_entry(ulong table_id)
   {
-    return (entry *)hash_search(&m_table_ids,
-				(uchar*)&table_id,
-				sizeof(table_id));
+    return (entry *) my_hash_search(&m_table_ids,
+                                    (uchar*)&table_id,
+                                    sizeof(table_id));
   }
   int expand();
 

=== modified file 'sql/set_var.cc'
--- a/sql/set_var.cc	2009-02-09 16:06:35 +0000
+++ b/sql/set_var.cc	2009-02-11 12:11:20 +0000
@@ -73,6 +73,7 @@ extern ulong ndb_extra_logging;
 #ifdef HAVE_NDB_BINLOG
 extern ulong ndb_report_thresh_binlog_epoch_slip;
 extern ulong ndb_report_thresh_binlog_mem_usage;
+extern my_bool ndb_log_binlog_index;
 #endif
 
 extern CHARSET_INFO *character_set_filesystem;
@@ -715,6 +716,8 @@ sys_ndb_report_thresh_binlog_epoch_slip(
 static sys_var_long_ptr
 sys_ndb_report_thresh_binlog_mem_usage(&vars, "ndb_report_thresh_binlog_mem_usage",
                                        &ndb_report_thresh_binlog_mem_usage);
+static sys_var_bool_ptr
+sys_ndb_log_binlog_index(&vars, "ndb_log_binlog_index", &ndb_log_binlog_index);
 #endif
 static sys_var_thd_bool
 sys_ndb_use_exact_count(&vars, "ndb_use_exact_count", &SV::ndb_use_exact_count);
@@ -3691,7 +3694,7 @@ int mysql_add_sys_var_chain(sys_var *fir
 
 error:
   for (; first != var; first= first->next)
-    hash_delete(&system_variable_hash, (uchar*) first);
+    my_hash_delete(&system_variable_hash, (uchar*) first);
   return 1;
 }
  
@@ -3715,7 +3718,7 @@ int mysql_del_sys_var_chain(sys_var *fir
   /* A write lock should be held on LOCK_system_variables_hash */
    
   for (sys_var *var= first; var; var= var->next)
-    result|= hash_delete(&system_variable_hash, (uchar*) var);
+    result|= my_hash_delete(&system_variable_hash, (uchar*) var);
 
   return result;
 }
@@ -3752,7 +3755,7 @@ SHOW_VAR* enumerate_sys_vars(THD *thd, b
 
     for (i= 0; i < count; i++)
     {
-      sys_var *var= (sys_var*) hash_element(&system_variable_hash, i);
+      sys_var *var= (sys_var*) my_hash_element(&system_variable_hash, i);
       show->name= var->name;
       show->value= (char*) var;
       show->type= SHOW_SYS;
@@ -3789,8 +3792,8 @@ int set_var_init()
   
   for (sys_var *var=vars.first; var; var= var->next, count++) {}
 
-  if (hash_init(&system_variable_hash, system_charset_info, count, 0,
-                0, (hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
+  if (my_hash_init(&system_variable_hash, system_charset_info, count, 0,
+                   0, (my_hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
     goto error;
 
   vars.last->next= NULL;
@@ -3815,7 +3818,7 @@ error:
 
 void set_var_free()
 {
-  hash_free(&system_variable_hash);
+  my_hash_free(&system_variable_hash);
 }
 
 
@@ -3841,7 +3844,7 @@ sys_var *intern_find_sys_var(const char 
     This function is only called from the sql_plugin.cc.
     A lock on LOCK_system_variable_hash should be held
   */
-  var= (sys_var*) hash_search(&system_variable_hash,
+  var= (sys_var*) my_hash_search(&system_variable_hash,
 			      (uchar*) str, length ? length : strlen(str));
   if (!(var || no_error))
     my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str);

=== modified file 'sql/si_objects.cc'
--- a/sql/si_objects.cc	2009-01-29 21:17:59 +0000
+++ b/sql/si_objects.cc	2009-02-04 10:49:16 +0000
@@ -1492,16 +1492,16 @@ Iterator *View_base_obj_iterator::create
 inline View_base_obj_iterator::View_base_obj_iterator()
   :m_cur_idx(0)
 {
-  hash_init(&m_table_names, system_charset_info, 16, 0, 0,
-            get_table_name_key, free_table_name_key,
-            MYF(0));
+  my_hash_init(&m_table_names, system_charset_info, 16, 0, 0,
+               get_table_name_key, free_table_name_key,
+               MYF(0));
 }
 
 ///////////////////////////////////////////////////////////////////////////
 
 inline View_base_obj_iterator::~View_base_obj_iterator()
 {
-  hash_free(&m_table_names);
+  my_hash_free(&m_table_names);
 }
 
 
@@ -1631,7 +1631,7 @@ Obj *View_base_obj_iterator::next()
     return NULL;
 
   Table_name_key *table_name_key=
-    (Table_name_key *) hash_element(&m_table_names, m_cur_idx);
+    (Table_name_key *) my_hash_element(&m_table_names, m_cur_idx);
 
   ++m_cur_idx;
 

=== modified file 'sql/slave.cc'
--- a/sql/slave.cc	2009-01-26 16:03:39 +0000
+++ b/sql/slave.cc	2009-02-04 12:35:46 +0000
@@ -2782,11 +2782,15 @@ Slave SQL thread aborted. Can't execute 
 
   /* Read queries from the IO/THREAD until this thread is killed */
 
+  thd->variables.new_mode= global_system_variables.new_mode;
+
   while (!sql_slave_killed(thd,rli))
   {
     thd_proc_info(thd, "Reading event from the relay log");
     DBUG_ASSERT(rli->sql_thd == thd);
     THD_CHECK_SENTRY(thd);
+
+    sql_print_information("new_mode %u", thd->variables.new_mode);
     if (exec_relay_log_event(thd,rli))
     {
       DBUG_PRINT("info", ("exec_relay_log_event() failed"));

=== modified file 'sql/sp.cc'
--- a/sql/sp.cc	2009-01-06 10:38:47 +0000
+++ b/sql/sp.cc	2009-01-27 02:08:48 +0000
@@ -1490,11 +1490,11 @@ static bool add_used_routine(LEX *lex, Q
                              const LEX_STRING *key,
                              TABLE_LIST *belong_to_view)
 {
-  hash_init_opt(&lex->sroutines, system_charset_info,
-                Query_tables_list::START_SROUTINES_HASH_SIZE,
-                0, 0, sp_sroutine_key, 0, 0);
+  my_hash_init_opt(&lex->sroutines, system_charset_info,
+                   Query_tables_list::START_SROUTINES_HASH_SIZE,
+                   0, 0, sp_sroutine_key, 0, 0);
 
-  if (!hash_search(&lex->sroutines, (uchar *)key->str, key->length))
+  if (!my_hash_search(&lex->sroutines, (uchar *)key->str, key->length))
   {
     Sroutine_hash_entry *rn=
       (Sroutine_hash_entry *)arena->alloc(sizeof(Sroutine_hash_entry) +
@@ -1559,7 +1559,7 @@ void sp_remove_not_own_routines(LEX *lex
       but we want to be more future-proof.
     */
     next_rt= not_own_rt->next;
-    hash_delete(&lex->sroutines, (uchar *)not_own_rt);
+    my_hash_delete(&lex->sroutines, (uchar *)not_own_rt);
   }
 
   *(Sroutine_hash_entry **)lex->sroutines_list_own_last= NULL;
@@ -1588,8 +1588,8 @@ void sp_update_sp_used_routines(HASH *ds
 {
   for (uint i=0 ; i < src->records ; i++)
   {
-    Sroutine_hash_entry *rt= (Sroutine_hash_entry *)hash_element(src, i);
-    if (!hash_search(dst, (uchar *)rt->key.str, rt->key.length))
+    Sroutine_hash_entry *rt= (Sroutine_hash_entry *)my_hash_element(src, i);
+    if (!my_hash_search(dst, (uchar *)rt->key.str, rt->key.length))
       my_hash_insert(dst, (uchar *)rt);
   }
 }
@@ -1615,7 +1615,7 @@ sp_update_stmt_used_routines(THD *thd, L
 {
   for (uint i=0 ; i < src->records ; i++)
   {
-    Sroutine_hash_entry *rt= (Sroutine_hash_entry *)hash_element(src, i);
+    Sroutine_hash_entry *rt= (Sroutine_hash_entry *)my_hash_element(src, i);
     (void)add_used_routine(lex, thd->stmt_arena, &rt->key, belong_to_view);
   }
 }

=== modified file 'sql/sp_cache.cc'
--- a/sql/sp_cache.cc	2008-12-02 22:02:52 +0000
+++ b/sql/sp_cache.cc	2009-01-27 02:08:48 +0000
@@ -44,7 +44,8 @@ public:
 
   inline sp_head *lookup(char *name, uint namelen)
   {
-    return (sp_head *)hash_search(&m_hashtable, (const uchar *)name, namelen);
+    return (sp_head *) my_hash_search(&m_hashtable, (const uchar *)name,
+                                      namelen);
   }
 
 #ifdef NOT_USED
@@ -261,15 +262,15 @@ sp_cache::sp_cache()
 
 sp_cache::~sp_cache()
 {
-  hash_free(&m_hashtable);
+  my_hash_free(&m_hashtable);
 }
 
 
 void
 sp_cache::init()
 {
-  hash_init(&m_hashtable, system_charset_info, 0, 0, 0,
-	    hash_get_key_for_sp_head, hash_free_sp_head, 0);
+  my_hash_init(&m_hashtable, system_charset_info, 0, 0, 0,
+               hash_get_key_for_sp_head, hash_free_sp_head, 0);
   version= 0;
 }
 
@@ -277,5 +278,5 @@ sp_cache::init()
 void
 sp_cache::cleanup()
 {
-  hash_free(&m_hashtable);
+  my_hash_free(&m_hashtable);
 }

=== modified file 'sql/sp_head.cc'
--- a/sql/sp_head.cc	2009-01-26 16:03:39 +0000
+++ b/sql/sp_head.cc	2009-02-05 12:49:39 +0000
@@ -523,8 +523,9 @@ sp_head::sp_head()
   m_backpatch.empty();
   m_cont_backpatch.empty();
   m_lex.empty();
-  hash_init(&m_sptabs, system_charset_info, 0, 0, 0, sp_table_key, 0, 0);
-  hash_init(&m_sroutines, system_charset_info, 0, 0, 0, sp_sroutine_key, 0, 0);
+  my_hash_init(&m_sptabs, system_charset_info, 0, 0, 0, sp_table_key, 0, 0);
+  my_hash_init(&m_sroutines, system_charset_info, 0, 0, 0, sp_sroutine_key,
+               0, 0);
 
   m_body_utf8.str= NULL;
   m_body_utf8.length= 0;
@@ -773,8 +774,8 @@ sp_head::destroy()
     m_thd->lex= lex;
   }
 
-  hash_free(&m_sptabs);
-  hash_free(&m_sroutines);
+  my_hash_free(&m_sptabs);
+  my_hash_free(&m_sroutines);
   DBUG_VOID_RETURN;
 }
 
@@ -3816,7 +3817,7 @@ sp_head::merge_table_list(THD *thd, TABL
 
   for (uint i= 0 ; i < m_sptabs.records ; i++)
   {
-    tab= (SP_TABLE *)hash_element(&m_sptabs, i);
+    tab= (SP_TABLE*) my_hash_element(&m_sptabs, i);
     tab->query_lock_count= 0;
   }
 
@@ -3850,8 +3851,8 @@ sp_head::merge_table_list(THD *thd, TABL
         (and therefore should not be prelocked). Otherwise we will erroneously
         treat table with same name but with different alias as non-temporary.
       */
-      if ((tab= (SP_TABLE *)hash_search(&m_sptabs, (uchar *)tname, tlen)) ||
-          ((tab= (SP_TABLE *)hash_search(&m_sptabs, (uchar *)tname,
+      if ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname, tlen)) ||
+          ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname,
                                         tlen - alen - 1)) &&
            tab->temp))
       {
@@ -3942,7 +3943,7 @@ sp_head::add_used_tables_to_table_list(T
   {
     char *tab_buff, *key_buff;
     TABLE_LIST *table;
-    SP_TABLE *stab= (SP_TABLE *)hash_element(&m_sptabs, i);
+    SP_TABLE *stab= (SP_TABLE*) my_hash_element(&m_sptabs, i);
     if (stab->temp)
       continue;
 

=== modified file 'sql/sql_acl.cc'
--- a/sql/sql_acl.cc	2009-01-26 16:03:39 +0000
+++ b/sql/sql_acl.cc	2009-02-05 12:49:39 +0000
@@ -261,8 +261,8 @@ my_bool acl_init(bool dont_read_acl_tabl
   DBUG_ENTER("acl_init");
 
   acl_cache= new hash_filo(ACL_CACHE_SIZE, 0, 0,
-                           (hash_get_key) acl_entry_get_key,
-                           (hash_free_key) free,
+                           (my_hash_get_key) acl_entry_get_key,
+                           (my_hash_free_key) free,
                            lower_case_file_system ?
                            system_charset_info : &my_charset_bin);
   if (dont_read_acl_tables)
@@ -637,7 +637,7 @@ void acl_free(bool end)
   delete_dynamic(&acl_users);
   delete_dynamic(&acl_dbs);
   delete_dynamic(&acl_wild_hosts);
-  hash_free(&acl_check_hosts);
+  my_hash_free(&acl_check_hosts);
   if (!end)
     acl_cache->clear(1); /* purecov: inspected */
   else
@@ -710,7 +710,7 @@ my_bool acl_reload(THD *thd)
   old_acl_dbs=acl_dbs;
   old_mem=mem;
   delete_dynamic(&acl_wild_hosts);
-  hash_free(&acl_check_hosts);
+  my_hash_free(&acl_check_hosts);
 
   if ((return_val= acl_load(thd, tables)))
   {					// Error. Revert to old list
@@ -1418,8 +1418,9 @@ static void init_check_host(void)
   DBUG_ENTER("init_check_host");
   (void) my_init_dynamic_array(&acl_wild_hosts,sizeof(struct acl_host_and_ip),
 			  acl_users.elements,1);
-  (void) hash_init(&acl_check_hosts,system_charset_info,acl_users.elements,0,0,
-		 (hash_get_key) check_get_key,0,0);
+  (void) my_hash_init(&acl_check_hosts,system_charset_info,
+                      acl_users.elements, 0, 0,
+                      (my_hash_get_key) check_get_key, 0, 0);
   if (!allow_all_hosts)
   {
     for (uint i=0 ; i < acl_users.elements ; i++)
@@ -1441,8 +1442,9 @@ static void init_check_host(void)
 	if (j == acl_wild_hosts.elements)	// If new
 	  (void) push_dynamic(&acl_wild_hosts,(uchar*) &acl_user->host);
       }
-      else if (!hash_search(&acl_check_hosts,(uchar*) acl_user->host.hostname,
-			    strlen(acl_user->host.hostname)))
+      else if (!my_hash_search(&acl_check_hosts,(uchar*)
+                               acl_user->host.hostname,
+                               strlen(acl_user->host.hostname)))
       {
 	if (my_hash_insert(&acl_check_hosts,(uchar*) acl_user))
 	{					// End of memory
@@ -1469,7 +1471,7 @@ static void init_check_host(void)
 void rebuild_check_host(void)
 {
   delete_dynamic(&acl_wild_hosts);
-  hash_free(&acl_check_hosts);
+  my_hash_free(&acl_check_hosts);
   init_check_host();
 }
 
@@ -1482,8 +1484,8 @@ bool acl_check_host(const char *host, co
     return 0;
   pthread_mutex_lock(&acl_cache->lock);
 
-  if (host && hash_search(&acl_check_hosts,(uchar*) host,strlen(host)) ||
-      ip && hash_search(&acl_check_hosts,(uchar*) ip, strlen(ip)))
+  if (host && my_hash_search(&acl_check_hosts,(uchar*) host,strlen(host)) ||
+      ip && my_hash_search(&acl_check_hosts,(uchar*) ip, strlen(ip)))
   {
     pthread_mutex_unlock(&acl_cache->lock);
     return 0;					// Found host
@@ -2287,8 +2289,8 @@ GRANT_TABLE::GRANT_TABLE(const char *h, 
                 	 const char *t, ulong p, ulong c)
   :GRANT_NAME(h,d,u,t,p), cols(c)
 {
-  (void) hash_init2(&hash_columns,4,system_charset_info,
-                   0,0,0, (hash_get_key) get_key_column,0,0);
+  (void) my_hash_init2(&hash_columns,4,system_charset_info,
+                   0,0,0, (my_hash_get_key) get_key_column,0,0);
 }
 
 
@@ -2328,15 +2330,15 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TA
   if (!db || !tname)
   {
     /* Wrong table row; Ignore it */
-    hash_clear(&hash_columns);                  /* allow for destruction */
+    my_hash_clear(&hash_columns);               /* allow for destruction */
     cols= 0;
     return;
   }
   cols= (ulong) form->field[7]->val_int();
   cols =  fix_rights_for_column(cols);
 
-  (void) hash_init2(&hash_columns,4,system_charset_info,
-                   0,0,0, (hash_get_key) get_key_column,0,0);
+  (void) my_hash_init2(&hash_columns,4,system_charset_info,
+                   0,0,0, (my_hash_get_key) get_key_column,0,0);
   if (cols)
   {
     uint key_prefix_len;
@@ -2388,7 +2390,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TA
 
 GRANT_TABLE::~GRANT_TABLE()
 {
-  hash_free(&hash_columns);
+  my_hash_free(&hash_columns);
 }
 
 
@@ -2402,7 +2404,7 @@ static uchar* get_grant_table(GRANT_NAME
 
 void free_grant_table(GRANT_TABLE *grant_table)
 {
-  hash_free(&grant_table->hash_columns);
+  my_hash_free(&grant_table->hash_columns);
 }
 
 
@@ -2420,11 +2422,11 @@ static GRANT_NAME *name_hash_search(HASH
   HASH_SEARCH_STATE state;
 
   len  = (uint) (strmov(strmov(strmov(helping,user)+1,db)+1,tname)-helping)+ 1;
-  for (grant_name= (GRANT_NAME*) hash_first(name_hash, (uchar*) helping,
-                                            len, &state);
+  for (grant_name= (GRANT_NAME*) my_hash_first(name_hash, (uchar*) helping,
+                                               len, &state);
        grant_name ;
-       grant_name= (GRANT_NAME*) hash_next(name_hash,(uchar*) helping,
-                                           len, &state))
+       grant_name= (GRANT_NAME*) my_hash_next(name_hash,(uchar*) helping,
+                                              len, &state))
   {
     if (exact)
     {
@@ -2468,7 +2470,8 @@ table_hash_search(const char *host, cons
 inline GRANT_COLUMN *
 column_hash_search(GRANT_TABLE *t, const char *cname, uint length)
 {
-  return (GRANT_COLUMN*) hash_search(&t->hash_columns, (uchar*) cname,length);
+  return (GRANT_COLUMN*) my_hash_search(&t->hash_columns,
+                                        (uchar*) cname, length);
 }
 
 
@@ -2648,7 +2651,7 @@ static int replace_column_table(GRANT_TA
 	    goto end;				/* purecov: deadcode */
 	  }
 	  if (grant_column)
-	    hash_delete(&g_t->hash_columns,(uchar*) grant_column);
+	    my_hash_delete(&g_t->hash_columns,(uchar*) grant_column);
 	}
       }
     } while (!table->file->index_next(table->record[0]) &&
@@ -2774,7 +2777,7 @@ static int replace_table_table(THD *thd,
   }
   else
   {
-    hash_delete(&column_priv_hash,(uchar*) grant_table);
+    my_hash_delete(&column_priv_hash,(uchar*) grant_table);
   }
   DBUG_RETURN(0);
 
@@ -2895,7 +2898,8 @@ static int replace_routine_table(THD *th
   }
   else
   {
-    hash_delete(is_proc ? &proc_priv_hash : &func_priv_hash,(uchar*) grant_name);
+    my_hash_delete(is_proc ? &proc_priv_hash : &func_priv_hash,(uchar*)
+                   grant_name);
   }
   DBUG_RETURN(0);
 
@@ -3137,8 +3141,8 @@ int mysql_table_grant(THD *thd, TABLE_LI
       column_priv= 0;
       for (uint idx=0 ; idx < grant_table->hash_columns.records ; idx++)
       {
-	grant_column= (GRANT_COLUMN*) hash_element(&grant_table->hash_columns,
-						   idx);
+        grant_column= (GRANT_COLUMN*)
+          my_hash_element(&grant_table->hash_columns, idx);
 	grant_column->rights&= ~rights;		// Fix other columns
 	column_priv|= grant_column->rights;
       }
@@ -3478,9 +3482,9 @@ bool mysql_grant(THD *thd, const char *d
 void  grant_free(void)
 {
   DBUG_ENTER("grant_free");
-  hash_free(&column_priv_hash);
-  hash_free(&proc_priv_hash);
-  hash_free(&func_priv_hash);
+  my_hash_free(&column_priv_hash);
+  my_hash_free(&proc_priv_hash);
+  my_hash_free(&func_priv_hash);
   free_root(&memex,MYF(0));
   DBUG_VOID_RETURN;
 }
@@ -3536,12 +3540,12 @@ static my_bool grant_load_procs_priv(TAB
   MEM_ROOT **save_mem_root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,
                                                            THR_MALLOC);
   DBUG_ENTER("grant_load_procs_priv");
-  (void) hash_init(&proc_priv_hash,system_charset_info,
-                   0,0,0, (hash_get_key) get_grant_table,
-                   0,0);
-  (void) hash_init(&func_priv_hash,system_charset_info,
-                   0,0,0, (hash_get_key) get_grant_table,
-                   0,0);
+  (void) my_hash_init(&proc_priv_hash,system_charset_info,
+                      0,0,0, (my_hash_get_key) get_grant_table,
+                      0,0);
+  (void) my_hash_init(&func_priv_hash,system_charset_info,
+                      0,0,0, (my_hash_get_key) get_grant_table,
+                      0,0);
   p_table->file->ha_index_init(0, 1);
   p_table->use_all_columns();
 
@@ -3637,9 +3641,9 @@ static my_bool grant_load(THD *thd, TABL
 
   thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
 
-  (void) hash_init(&column_priv_hash,system_charset_info,
-                   0,0,0, (hash_get_key) get_grant_table,
-                   (hash_free_key) free_grant_table,0);
+  (void) my_hash_init(&column_priv_hash,system_charset_info,
+                      0,0,0, (my_hash_get_key) get_grant_table,
+                      (my_hash_free_key) free_grant_table,0);
 
   t_table = tables[0].table;
   c_table = tables[1].table;
@@ -3743,8 +3747,8 @@ static my_bool grant_reload_procs_priv(T
   }
   else
   {
-    hash_free(&old_proc_priv_hash);
-    hash_free(&old_func_priv_hash);
+    my_hash_free(&old_proc_priv_hash);
+    my_hash_free(&old_func_priv_hash);
   }
   rw_unlock(&LOCK_grant);
 
@@ -3815,7 +3819,7 @@ my_bool grant_reload(THD *thd)
   }
   else
   {
-    hash_free(&old_column_priv_hash);
+    my_hash_free(&old_column_priv_hash);
     free_root(&old_mem,MYF(0));
   }
   rw_unlock(&LOCK_grant);
@@ -4318,7 +4322,7 @@ static bool check_grant_db_routine(THD *
 
   for (uint idx= 0; idx < hash->records; ++idx)
   {
-    GRANT_NAME *item= (GRANT_NAME*) hash_element(hash, idx);
+    GRANT_NAME *item= (GRANT_NAME*) my_hash_element(hash, idx);
 
     if (strcmp(item->user, sctx->priv_user) == 0 &&
         strcmp(item->db, db) == 0 &&
@@ -4351,8 +4355,9 @@ bool check_grant_db(THD *thd,const char 
 
   for (uint idx=0 ; idx < column_priv_hash.records ; idx++)
   {
-    GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
-							  idx);
+    GRANT_TABLE *grant_table= (GRANT_TABLE*)
+      my_hash_element(&column_priv_hash,
+                      idx);
     if (len < grant_table->key_length &&
 	!memcmp(grant_table->hash_key,helping,len) &&
         compare_hostname(&grant_table->host, sctx->host, sctx->ip))
@@ -4824,8 +4829,8 @@ bool mysql_show_grants(THD *thd,LEX_USER
   for (index=0 ; index < column_priv_hash.records ; index++)
   {
     const char *user, *host;
-    GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
-							  index);
+    GRANT_TABLE *grant_table= (GRANT_TABLE*)
+      my_hash_element(&column_priv_hash, index);
 
     if (!(user=grant_table->user))
       user= "";
@@ -4878,7 +4883,7 @@ bool mysql_show_grants(THD *thd,LEX_USER
 		     col_index++)
 		{
 		  GRANT_COLUMN *grant_column = (GRANT_COLUMN*)
-		    hash_element(&grant_table->hash_columns,col_index);
+                    my_hash_element(&grant_table->hash_columns,col_index);
 		  if (grant_column->rights & j)
 		  {
 		    if (!found_col)
@@ -4968,7 +4973,7 @@ static int show_routine_grants(THD* thd,
   for (index=0 ; index < hash->records ; index++)
   {
     const char *user, *host;
-    GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, index);
+    GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, index);
 
     if (!(user=grant_proc->user))
       user= "";
@@ -5485,13 +5490,13 @@ static int handle_grant_struct(uint stru
       break;
 
     case 2:
-      grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx);
+      grant_name= (GRANT_NAME*) my_hash_element(&column_priv_hash, idx);
       user= grant_name->user;
       host= grant_name->host.hostname;
       break;
 
     case 3:
-      grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx);
+      grant_name= (GRANT_NAME*) my_hash_element(&proc_priv_hash, idx);
       user= grant_name->user;
       host= grant_name->host.hostname;
       break;
@@ -5524,11 +5529,11 @@ static int handle_grant_struct(uint stru
         break;
 
       case 2:
-        hash_delete(&column_priv_hash, (uchar*) grant_name);
+        my_hash_delete(&column_priv_hash, (uchar*) grant_name);
 	break;
 
       case 3:
-        hash_delete(&proc_priv_hash, (uchar*) grant_name);
+        my_hash_delete(&proc_priv_hash, (uchar*) grant_name);
 	break;
       }
       elements--;
@@ -6036,8 +6041,8 @@ bool mysql_revoke_all(THD *thd,  List <L
       for (counter= 0, revoked= 0 ; counter < column_priv_hash.records ; )
       {
 	const char *user,*host;
-	GRANT_TABLE *grant_table= (GRANT_TABLE*)hash_element(&column_priv_hash,
-							     counter);
+        GRANT_TABLE *grant_table=
+          (GRANT_TABLE*) my_hash_element(&column_priv_hash, counter);
 	if (!(user=grant_table->user))
 	  user= "";
 	if (!(host=grant_table->host.hostname))
@@ -6083,7 +6088,7 @@ bool mysql_revoke_all(THD *thd,  List <L
       for (counter= 0, revoked= 0 ; counter < hash->records ; )
       {
 	const char *user,*host;
-	GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter);
+        GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, counter);
 	if (!(user=grant_proc->user))
 	  user= "";
 	if (!(host=grant_proc->host.hostname))
@@ -6222,7 +6227,7 @@ bool sp_revoke_privileges(THD *thd, cons
   {
     for (counter= 0, revoked= 0 ; counter < hash->records ; )
     {
-      GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter);
+      GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, counter);
       if (!my_strcasecmp(system_charset_info, grant_proc->db, sp_db) &&
 	  !my_strcasecmp(system_charset_info, grant_proc->tname, sp_name))
       {
@@ -6602,7 +6607,7 @@ int fill_schema_table_privileges(THD *th
   for (index=0 ; index < column_priv_hash.records ; index++)
   {
     const char *user, *host, *is_grantable= "YES";
-    GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
+    GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
 							  index);
     if (!(user=grant_table->user))
       user= "";
@@ -6685,7 +6690,7 @@ int fill_schema_column_privileges(THD *t
   for (index=0 ; index < column_priv_hash.records ; index++)
   {
     const char *user, *host, *is_grantable= "YES";
-    GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
+    GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
 							  index);
     if (!(user=grant_table->user))
       user= "";
@@ -6720,7 +6725,7 @@ int fill_schema_column_privileges(THD *t
                  col_index++)
             {
               GRANT_COLUMN *grant_column = (GRANT_COLUMN*)
-                hash_element(&grant_table->hash_columns,col_index);
+                my_hash_element(&grant_table->hash_columns,col_index);
               if ((grant_column->rights & j) && (table_access & j))
               {
                 if (update_schema_privilege(thd, table, buff, grant_table->db,

=== modified file 'sql/sql_base.cc'
--- a/sql/sql_base.cc	2009-02-09 23:03:35 +0000
+++ b/sql/sql_base.cc	2009-02-11 12:11:20 +0000
@@ -149,7 +149,7 @@ static void check_unused(void)
   }
   for (idx=0 ; idx < table_def_cache.records ; idx++)
   {
-    share= (TABLE_SHARE*) hash_element(&table_def_cache, idx);
+    share= (TABLE_SHARE*) my_hash_element(&table_def_cache, idx);
 
     I_P_List_iterator<TABLE, TABLE_share> it(share->free_tables);
     while ((entry= it++))
@@ -262,9 +262,9 @@ bool table_def_init(void)
   oldest_unused_share= &end_of_unused_share;
   end_of_unused_share.prev= &oldest_unused_share;
 
-  return hash_init(&table_def_cache, &my_charset_bin, table_def_size,
-		   0, 0, table_def_key,
-		   (hash_free_key) table_def_free_entry, 0) != 0;
+  return my_hash_init(&table_def_cache, &my_charset_bin, table_def_size,
+                      0, 0, table_def_key,
+                      (my_hash_free_key) table_def_free_entry, 0) != 0;
 }
 
 
@@ -277,7 +277,7 @@ void table_def_free(void)
     close_cached_tables(NULL, NULL, FALSE, FALSE);
     table_def_inited= 0;
     /* Free table definitions. */
-    hash_free(&table_def_cache);
+    my_hash_free(&table_def_cache);
   }
   DBUG_VOID_RETURN;
 }
@@ -442,8 +442,8 @@ TABLE_SHARE *get_table_share(THD *thd, T
                                 table_list->table_name));
 
   /* Read table definition from cache */
-  if ((share= (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key,
-                                         key_length)))
+  if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key,
+                                            key_length)))
     goto found;
 
   if (!(share= alloc_table_share(table_list, key, key_length)))
@@ -474,7 +474,7 @@ TABLE_SHARE *get_table_share(THD *thd, T
   if (open_table_def(thd, share, db_flags))
   {
     *error= share->error;
-    (void) hash_delete(&table_def_cache, (uchar*) share);
+    (void) my_hash_delete(&table_def_cache, (uchar*) share);
     DBUG_RETURN(0);
   }
   share->ref_count++;				// Mark in use
@@ -515,7 +515,7 @@ found:
    /* Free cache if too big */
   while (table_def_cache.records > table_def_size &&
          oldest_unused_share->next)
-    hash_delete(&table_def_cache, (uchar*) oldest_unused_share);
+    my_hash_delete(&table_def_cache, (uchar*) oldest_unused_share);
 
   DBUG_PRINT("exit", ("share: %p  ref_count: %u",
                       share, share->ref_count));
@@ -653,7 +653,7 @@ void release_table_share(TABLE_SHARE *sh
   if (to_be_deleted)
   {
     DBUG_PRINT("info", ("Deleting share"));
-    hash_delete(&table_def_cache, (uchar*) share);
+    my_hash_delete(&table_def_cache, (uchar*) share);
   }
   DBUG_VOID_RETURN;
 }
@@ -682,7 +682,8 @@ TABLE_SHARE *get_cached_table_share(cons
   table_list.db= (char*) db;
   table_list.table_name= (char*) table_name;
   key_length= create_table_def_key((THD*) 0, key, &table_list, 0);
-  return (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key, key_length);
+  return (TABLE_SHARE*) my_hash_search(&table_def_cache,
+                                       (uchar*) key, key_length);
 }  
 
 
@@ -737,7 +738,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *t
 
   for (uint idx=0 ; result == 0 && idx < table_def_cache.records; idx++)
   {
-    TABLE_SHARE *share= (TABLE_SHARE *)hash_element(&table_def_cache, idx);
+    TABLE_SHARE *share= (TABLE_SHARE *)my_hash_element(&table_def_cache, idx);
 
     if (db && my_strcasecmp(system_charset_info, db, share->db.str))
       continue;
@@ -913,7 +914,7 @@ bool close_cached_tables(THD *thd, TABLE
       free_cache_entry(unused_tables);
     /* Free table shares which were not freed implicitly by loop above. */
     while (oldest_unused_share->next)
-      (void) hash_delete(&table_def_cache, (uchar*) oldest_unused_share);
+      (void) my_hash_delete(&table_def_cache, (uchar*) oldest_unused_share);
   }
   else
   {
@@ -995,7 +996,8 @@ bool close_cached_tables(THD *thd, TABLE
     {
       for (uint idx=0 ; idx < table_def_cache.records ; idx++)
       {
-        TABLE_SHARE *share=(TABLE_SHARE*) hash_element(&table_def_cache, idx);
+        TABLE_SHARE *share=(TABLE_SHARE*) my_hash_element(&table_def_cache,
+                                                          idx);
         if (share->version != refresh_version)
         {
           found= TRUE;
@@ -1067,7 +1069,7 @@ bool close_cached_connection_tables(THD 
 
   for (idx= 0; idx < table_def_cache.records; idx++)
   {
-    TABLE_SHARE *share= (TABLE_SHARE *) hash_element(&table_def_cache, idx);
+    TABLE_SHARE *share= (TABLE_SHARE *) my_hash_element(&table_def_cache, idx);
 
     /* Ignore if table is not open or does not have a connect_string */
     if (!share->connect_string.length || !share->ref_count)
@@ -3812,7 +3814,7 @@ int open_tables(THD *thd, TABLE_LIST **s
         Let us free memory used by 'sroutines' hash here since we never
         call destructor for this LEX.
       */
-      hash_free(&tables->view->sroutines);
+      my_hash_free(&tables->view->sroutines);
       goto process_view_routines;
     }
 
@@ -5045,8 +5047,8 @@ find_field_in_table(THD *thd, TABLE *tab
     field_ptr= table->field + cached_field_index;
   else if (table->s->name_hash.records)
   {
-    field_ptr= (Field**) hash_search(&table->s->name_hash, (uchar*) name,
-                                     length);
+    field_ptr= (Field**) my_hash_search(&table->s->name_hash, (uchar*) name,
+                                        length);
     if (field_ptr)
     {
       /*
@@ -5294,8 +5296,8 @@ Field *find_field_in_table_sef(TABLE *ta
   Field **field_ptr;
   if (table->s->name_hash.records)
   {
-    field_ptr= (Field**)hash_search(&table->s->name_hash,(uchar*) name,
-                                    strlen(name));
+    field_ptr= (Field**)my_hash_search(&table->s->name_hash,(uchar*) name,
+                                       strlen(name));
     if (field_ptr)
     {
       /*
@@ -7648,8 +7650,8 @@ void tdc_remove_table(THD *thd, enum_tdc
 
   key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
 
-  if ((share= (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key,
-                                         key_length)))
+  if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key,
+                                            key_length)))
   {
     if (share->ref_count)
     {
@@ -7678,7 +7680,7 @@ void tdc_remove_table(THD *thd, enum_tdc
         free_cache_entry(table);
     }
     else
-      (void) hash_delete(&table_def_cache, (uchar*) share);
+      (void) my_hash_delete(&table_def_cache, (uchar*) share);
   }
 }
 
@@ -7714,8 +7716,9 @@ static bool tdc_wait_for_old_versions(TH
     while ((lock_data= it++))
     {
       mdl_get_tdc_key(lock_data, &key);
-      if ((share= (TABLE_SHARE*) hash_search(&table_def_cache, (uchar*) key.str,
-                                             key.length)) &&
+      if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,
+                                                (uchar*) key.str,
+                                                key.length)) &&
           share->version != refresh_version)
         break;
     }

=== modified file 'sql/sql_cache.cc'
--- a/sql/sql_cache.cc	2009-01-30 14:13:39 +0000
+++ b/sql/sql_cache.cc	2009-02-04 10:49:16 +0000
@@ -1112,7 +1112,7 @@ def_week_frmt: %lu, in_trans: %d, autoco
 
     /* Check if another thread is processing the same query? */
     Query_cache_block *competitor = (Query_cache_block *)
-      hash_search(&queries, (uchar*) thd->query, tot_length);
+      my_hash_search(&queries, (uchar*) thd->query, tot_length);
     DBUG_PRINT("qcache", ("competitor %p", competitor));
     if (competitor == 0)
     {
@@ -1141,7 +1141,7 @@ def_week_frmt: %lu, in_trans: %d, autoco
 	{
 	  refused++;
 	  DBUG_PRINT("warning", ("tables list including failed"));
-	  hash_delete(&queries, (uchar *) query_block);
+	  my_hash_delete(&queries, (uchar *) query_block);
 	  header->unlock_n_destroy();
 	  free_memory_block(query_block);
 	  STRUCT_UNLOCK(&structure_guard_mutex);
@@ -1333,8 +1333,8 @@ def_week_frmt: %lu, in_trans: %d, autoco
                           (int)flags.autocommit));
   memcpy((uchar *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)),
 	 (uchar*) &flags, QUERY_CACHE_FLAGS_SIZE);
-  query_block = (Query_cache_block *)  hash_search(&queries, (uchar*) sql,
-						   tot_length);
+  query_block = (Query_cache_block *)  my_hash_search(&queries, (uchar*) sql,
+                                                      tot_length);
   /* Quick abort on unlocked data */
   if (query_block == 0 ||
       query_block->query()->result() == 0 ||
@@ -1363,7 +1363,7 @@ def_week_frmt: %lu, in_trans: %d, autoco
   }
   DBUG_PRINT("qcache", ("Query have result %p", query));
 
-  if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
+  if (thd->in_multi_stmt_transaction() &&
       (query->tables_type() & HA_CACHE_TBL_TRANSACT))
   {
     DBUG_PRINT("qcache",
@@ -1521,8 +1521,7 @@ void Query_cache::invalidate(THD *thd, T
   if (is_disabled())
     DBUG_VOID_RETURN;
 
-  using_transactions= using_transactions &&
-    (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+  using_transactions= using_transactions && thd->in_multi_stmt_transaction();
   for (; tables_used; tables_used= tables_used->next_local)
   {
     DBUG_ASSERT(!using_transactions || tables_used->table!=0);
@@ -1603,8 +1602,7 @@ void Query_cache::invalidate(THD *thd, T
   if (is_disabled())
     DBUG_VOID_RETURN;
 
-  using_transactions= using_transactions &&
-    (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+  using_transactions= using_transactions && thd->in_multi_stmt_transaction();
   if (using_transactions && 
       (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT))
     thd->add_changed_table(table);
@@ -1622,8 +1620,7 @@ void Query_cache::invalidate(THD *thd, c
   if (is_disabled())
     DBUG_VOID_RETURN;
 
-  using_transactions= using_transactions &&
-    (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
+  using_transactions= using_transactions && thd->in_multi_stmt_transaction();
   if (using_transactions) // used for innodb => has_transactions() is TRUE
     thd->add_changed_table(key, key_length);
   else
@@ -2009,8 +2006,8 @@ ulong Query_cache::init_cache()
 
   DUMP(this);
 
-  (void) hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0,
-		 query_cache_query_get_key, 0, 0);
+  (void) my_hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0,
+                      query_cache_query_get_key, 0, 0);
 #ifndef FN_NO_CASE_SENCE
   /*
     If lower_case_table_names!=0 then db and table names are already 
@@ -2020,8 +2017,8 @@ ulong Query_cache::init_cache()
     lower_case_table_names == 0 then we should distinguish my_table
     and MY_TABLE cases and so again can use binary collation.
   */
-  (void) hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0,
-		 query_cache_table_get_key, 0, 0);
+  (void) my_hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0,
+                      query_cache_table_get_key, 0, 0);
 #else
   /*
     On windows, OS/2, MacOS X with HFS+ or any other case insensitive
@@ -2031,10 +2028,11 @@ ulong Query_cache::init_cache()
     file system) and so should use case insensitive collation for
     comparison.
   */
-  (void) hash_init(&tables,
-		 lower_case_table_names ? &my_charset_bin :
-		 files_charset_info,
-		 def_table_hash_size, 0, 0,query_cache_table_get_key, 0, 0);
+  (void) my_hash_init(&tables,
+                      lower_case_table_names ? &my_charset_bin :
+                      files_charset_info,
+                      def_table_hash_size, 0, 0,query_cache_table_get_key,
+                      0, 0);
 #endif
 
   queries_in_cache = 0;
@@ -2084,8 +2082,8 @@ void Query_cache::free_cache()
 
   my_free((uchar*) cache, MYF(MY_ALLOW_ZERO_PTR));
   make_disabled();
-  hash_free(&queries);
-  hash_free(&tables);
+  my_hash_free(&queries);
+  my_hash_free(&tables);
   DBUG_VOID_RETURN;
 }
 
@@ -2278,7 +2276,7 @@ void Query_cache::free_query(Query_cache
 		      query_block,
 		      query_block->query()->length() ));
 
-  hash_delete(&queries,(uchar *) query_block);
+  my_hash_delete(&queries,(uchar *) query_block);
   free_query_internal(query_block);
 
   DBUG_VOID_RETURN;
@@ -2632,7 +2630,7 @@ void
 Query_cache::invalidate_table_internal(THD *thd, uchar *key, uint32 key_length)
 {
   Query_cache_block *table_block=
-    (Query_cache_block*)hash_search(&tables, key, key_length);
+    (Query_cache_block*)my_hash_search(&tables, key, key_length);
   if (table_block)
   {
     Query_cache_block_table *list_root= table_block->table(0);
@@ -2831,7 +2829,7 @@ Query_cache::insert_table(uint key_len, 
   THD *thd= current_thd;
 
   Query_cache_block *table_block= 
-    (Query_cache_block *)hash_search(&tables, (uchar*) key, key_len);
+    (Query_cache_block *) my_hash_search(&tables, (uchar*) key, key_len);
 
   if (table_block &&
       table_block->table()->engine_data() != engine_data)
@@ -2947,7 +2945,7 @@ void Query_cache::unlink_table(Query_cac
     Query_cache_block *table_block= neighbour->block();
     double_linked_list_exclude(table_block,
                                &tables_blocks);
-    hash_delete(&tables,(uchar *) table_block);
+    my_hash_delete(&tables,(uchar *) table_block);
     free_memory_block(table_block);
   }
   DBUG_VOID_RETURN;
@@ -3475,7 +3473,7 @@ Query_cache::is_cacheable(THD *thd, size
                                                 tables_type)))
       DBUG_RETURN(0);
 
-    if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
+    if (thd->in_multi_stmt_transaction() &&
 	((*tables_type)&HA_CACHE_TBL_TRANSACT))
     {
       DBUG_PRINT("qcache", ("not in autocommin mode"));
@@ -3632,7 +3630,7 @@ my_bool Query_cache::move_by_type(uchar 
     uchar *key;
     size_t key_length;
     key=query_cache_table_get_key((uchar*) block, &key_length, 0);
-    hash_first(&tables, (uchar*) key, key_length, &record_idx);
+    my_hash_first(&tables, (uchar*) key, key_length, &record_idx);
 
     block->destroy();
     new_block->init(len);
@@ -3666,7 +3664,7 @@ my_bool Query_cache::move_by_type(uchar 
     /* Fix pointer to table name */
     new_block->table()->table(new_block->table()->db() + tablename_offset);
     /* Fix hash to point at moved block */
-    hash_replace(&tables, &record_idx, (uchar*) new_block);
+    my_hash_replace(&tables, &record_idx, (uchar*) new_block);
 
     DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p",
 			len, new_block, *border));
@@ -3692,7 +3690,7 @@ my_bool Query_cache::move_by_type(uchar 
     uchar *key;
     size_t key_length;
     key=query_cache_query_get_key((uchar*) block, &key_length, 0);
-    hash_first(&queries, (uchar*) key, key_length, &record_idx);
+    my_hash_first(&queries, (uchar*) key, key_length, &record_idx);
     // Move table of used tables 
     memmove((char*) new_block->table(0), (char*) block->table(0),
 	   ALIGN_SIZE(n_tables*sizeof(Query_cache_block_table)));
@@ -3760,7 +3758,7 @@ my_bool Query_cache::move_by_type(uchar 
       query_cache_tls->first_query_block= new_block;
     }
     /* Fix hash to point at moved block */
-    hash_replace(&queries, &record_idx, (uchar*) new_block);
+    my_hash_replace(&queries, &record_idx, (uchar*) new_block);
     DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p",
 			len, new_block, *border));
     break;
@@ -4174,13 +4172,13 @@ my_bool Query_cache::check_integrity(boo
   while (is_flushing())
     pthread_cond_wait(&COND_cache_status_changed,&structure_guard_mutex);
 
-  if (hash_check(&queries))
+  if (my_hash_check(&queries))
   {
     DBUG_PRINT("error", ("queries hash is damaged"));
     result = 1;
   }
 
-  if (hash_check(&tables))
+  if (my_hash_check(&tables))
   {
     DBUG_PRINT("error", ("tables hash is damaged"));
     result = 1;
@@ -4347,7 +4345,7 @@ my_bool Query_cache::check_integrity(boo
 			    block, (uint) block->type));
       size_t length;
       uchar *key = query_cache_query_get_key((uchar*) block, &length, 0);
-      uchar* val = hash_search(&queries, key, length);
+      uchar* val = my_hash_search(&queries, key, length);
       if (((uchar*)block) != val)
       {
 	DBUG_PRINT("error", ("block %p found in queries hash like %p",
@@ -4382,7 +4380,7 @@ my_bool Query_cache::check_integrity(boo
 			    block, (uint) block->type));
       size_t length;
       uchar *key = query_cache_table_get_key((uchar*) block, &length, 0);
-      uchar* val = hash_search(&tables, key, length);
+      uchar* val = my_hash_search(&tables, key, length);
       if (((uchar*)block) != val)
       {
 	DBUG_PRINT("error", ("block %p found in tables hash like %p",

=== modified file 'sql/sql_class.cc'
--- a/sql/sql_class.cc	2009-02-09 23:03:35 +0000
+++ b/sql/sql_class.cc	2009-02-11 12:11:20 +0000
@@ -447,7 +447,7 @@ THD::THD()
   killed= NOT_KILLED;
   col_access=0;
   is_slave_error= thread_specific_used= FALSE;
-  hash_clear(&handler_tables_hash);
+  my_hash_clear(&handler_tables_hash);
   tmp_table=0;
   used_tables=0;
   cuted_fields= 0L;
@@ -514,9 +514,9 @@ THD::THD()
   profiling.set_thd(this);
 #endif
   user_connect=(USER_CONN *)0;
-  hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
-	    (hash_get_key) get_var_key,
-	    (hash_free_key) free_user_var, 0);
+  my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
+               (my_hash_get_key) get_var_key,
+               (my_hash_free_key) free_user_var, 0);
 
   sp_proc_cache= NULL;
   sp_func_cache= NULL;
@@ -707,9 +707,9 @@ void THD::change_user(void)
   cleanup_done= 0;
   init();
   stmt_map.reset();
-  hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
-	    (hash_get_key) get_var_key,
-	    (hash_free_key) free_user_var, 0);
+  my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
+               (my_hash_get_key) get_var_key,
+               (my_hash_free_key) free_user_var, 0);
   sp_cache_clear(&sp_proc_cache);
   sp_cache_clear(&sp_func_cache);
 }
@@ -744,7 +744,7 @@ void THD::cleanup(void)
   wt_thd_destroy(&transaction.wt);
   mysql_ha_cleanup(this);
   delete_dynamic(&user_var_events);
-  hash_free(&user_vars);
+  my_hash_free(&user_vars);
   close_temporary_tables(this);
   my_free((char*) variables.time_format, MYF(MY_ALLOW_ZERO_PTR));
   my_free((char*) variables.date_format, MYF(MY_ALLOW_ZERO_PTR));
@@ -1232,8 +1232,7 @@ void THD::add_changed_table(TABLE *table
 {
   DBUG_ENTER("THD::add_changed_table(table)");
 
-  DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
-	      table->file->has_transactions());
+  DBUG_ASSERT(in_multi_stmt_transaction() && table->file->has_transactions());
   add_changed_table(table->s->table_cache_key.str,
                     (long) table->s->table_cache_key.length);
   DBUG_VOID_RETURN;
@@ -2411,12 +2410,12 @@ Statement_map::Statement_map() :
     START_STMT_HASH_SIZE = 16,
     START_NAME_HASH_SIZE = 16
   };
-  hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
-            get_statement_id_as_hash_key,
-            delete_statement_as_hash_key, MYF(0));
-  hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
-            (hash_get_key) get_stmt_name_hash_key,
-            NULL,MYF(0));
+  my_hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
+               get_statement_id_as_hash_key,
+               delete_statement_as_hash_key, MYF(0));
+  my_hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
+               (my_hash_get_key) get_stmt_name_hash_key,
+               NULL,MYF(0));
 }
 
 
@@ -2481,9 +2480,9 @@ int Statement_map::insert(THD *thd, Stat
 
 err_max:
   if (statement->name.str)
-    hash_delete(&names_hash, (uchar*) statement);
+    my_hash_delete(&names_hash, (uchar*) statement);
 err_names_hash:
-  hash_delete(&st_hash, (uchar*) statement);
+  my_hash_delete(&st_hash, (uchar*) statement);
 err_st_hash:
   return 1;
 }
@@ -2504,9 +2503,9 @@ void Statement_map::erase(Statement *sta
   if (statement == last_found_statement)
     last_found_statement= 0;
   if (statement->name.str)
-    hash_delete(&names_hash, (uchar *) statement);
+    my_hash_delete(&names_hash, (uchar *) statement);
 
-  hash_delete(&st_hash, (uchar *) statement);
+  my_hash_delete(&st_hash, (uchar *) statement);
   pthread_mutex_lock(&LOCK_prepared_stmt_count);
   DBUG_ASSERT(prepared_stmt_count > 0);
   prepared_stmt_count--;
@@ -2536,8 +2535,8 @@ Statement_map::~Statement_map()
   prepared_stmt_count-= st_hash.records;
   pthread_mutex_unlock(&LOCK_prepared_stmt_count);
 
-  hash_free(&names_hash);
-  hash_free(&st_hash);
+  my_hash_free(&names_hash);
+  my_hash_free(&st_hash);
 }
 
 bool select_dumpvar::send_data(List<Item> &items)
@@ -3051,15 +3050,15 @@ void xid_free_hash(void *ptr)
 bool xid_cache_init()
 {
   pthread_mutex_init(&LOCK_xid_cache, MY_MUTEX_INIT_FAST);
-  return hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
-                   xid_get_hash_key, xid_free_hash, 0) != 0;
+  return my_hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
+                      xid_get_hash_key, xid_free_hash, 0) != 0;
 }
 
 void xid_cache_free()
 {
-  if (hash_inited(&xid_cache))
+  if (my_hash_inited(&xid_cache))
   {
-    hash_free(&xid_cache);
+    my_hash_free(&xid_cache);
     pthread_mutex_destroy(&LOCK_xid_cache);
   }
 }
@@ -3067,7 +3066,8 @@ void xid_cache_free()
 XID_STATE *xid_cache_search(XID *xid)
 {
   pthread_mutex_lock(&LOCK_xid_cache);
-  XID_STATE *res=(XID_STATE *)hash_search(&xid_cache, xid->key(), xid->key_length());
+  XID_STATE *res=(XID_STATE *)my_hash_search(&xid_cache, xid->key(),
+                                             xid->key_length());
   pthread_mutex_unlock(&LOCK_xid_cache);
   return res;
 }
@@ -3078,7 +3078,7 @@ bool xid_cache_insert(XID *xid, enum xa_
   XID_STATE *xs;
   my_bool res;
   pthread_mutex_lock(&LOCK_xid_cache);
-  if (hash_search(&xid_cache, xid->key(), xid->key_length()))
+  if (my_hash_search(&xid_cache, xid->key(), xid->key_length()))
     res=0;
   else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME))))
     res=1;
@@ -3097,8 +3097,8 @@ bool xid_cache_insert(XID *xid, enum xa_
 bool xid_cache_insert(XID_STATE *xid_state)
 {
   pthread_mutex_lock(&LOCK_xid_cache);
-  DBUG_ASSERT(hash_search(&xid_cache, xid_state->xid.key(),
-                          xid_state->xid.key_length())==0);
+  DBUG_ASSERT(my_hash_search(&xid_cache, xid_state->xid.key(),
+                             xid_state->xid.key_length())==0);
   my_bool res=my_hash_insert(&xid_cache, (uchar*)xid_state);
   pthread_mutex_unlock(&LOCK_xid_cache);
   return res;
@@ -3108,7 +3108,7 @@ bool xid_cache_insert(XID_STATE *xid_sta
 void xid_cache_delete(XID_STATE *xid_state)
 {
   pthread_mutex_lock(&LOCK_xid_cache);
-  hash_delete(&xid_cache, (uchar *)xid_state);
+  my_hash_delete(&xid_cache, (uchar *)xid_state);
   pthread_mutex_unlock(&LOCK_xid_cache);
 }
 

=== modified file 'sql/sql_class.h'
--- a/sql/sql_class.h	2009-01-29 21:17:59 +0000
+++ b/sql/sql_class.h	2009-02-04 10:49:16 +0000
@@ -724,8 +724,8 @@ public:
   Statement *find_by_name(LEX_STRING *name)
   {
     Statement *stmt;
-    stmt= (Statement*)hash_search(&names_hash, (uchar*)name->str,
-                                  name->length);
+    stmt= (Statement*)my_hash_search(&names_hash, (uchar*)name->str,
+                                     name->length);
     return stmt;
   }
 
@@ -734,7 +734,7 @@ public:
     if (last_found_statement == 0 || id != last_found_statement->id)
     {
       Statement *stmt;
-      stmt= (Statement *) hash_search(&st_hash, (uchar *) &id, sizeof(id));
+      stmt= (Statement *) my_hash_search(&st_hash, (uchar *) &id, sizeof(id));
       if (stmt && stmt->name.str)
         return NULL;
       last_found_statement= stmt;
@@ -1954,6 +1954,21 @@ public:
   {
     return server_status & SERVER_STATUS_IN_TRANS;
   }
+  /**
+    Returns TRUE if session is in a multi-statement transaction mode.
+
+    OPTION_NOT_AUTOCOMMIT: When autocommit is off, a multi-statement
+    transaction is implicitly started on the first statement after a
+    previous transaction has been ended.
+
+    OPTION_BEGIN: Regardless of the autocommit status, a multi-statement
+    transaction can be explicitly started with the statements "START
+    TRANSACTION", "BEGIN [WORK]", "[COMMIT | ROLLBACK] AND CHAIN", etc.
+  */
+  inline bool in_multi_stmt_transaction()
+  {
+    return options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN);
+  }
   inline bool fill_derived_tables()
   {
     return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();

=== modified file 'sql/sql_connect.cc'
--- a/sql/sql_connect.cc	2008-12-24 10:48:24 +0000
+++ b/sql/sql_connect.cc	2009-01-31 16:21:19 +0000
@@ -61,7 +61,7 @@ static int get_or_create_user_conn(THD *
   user_len= strlen(user);
   temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1;
   (void) pthread_mutex_lock(&LOCK_user_conn);
-  if (!(uc = (struct  user_conn *) hash_search(&hash_user_connections,
+  if (!(uc = (struct  user_conn *) my_hash_search(&hash_user_connections,
 					       (uchar*) temp_user, temp_len)))
   {
     /* First connection for user; Create a user connection object */
@@ -191,7 +191,7 @@ void decrease_user_connections(USER_CONN
   if (!--uc->connections && !mqh_used)
   {
     /* Last connection for user; Delete it */
-    (void) hash_delete(&hash_user_connections,(uchar*) uc);
+    (void) my_hash_delete(&hash_user_connections,(uchar*) uc);
   }
   (void) pthread_mutex_unlock(&LOCK_user_conn);
   DBUG_VOID_RETURN;
@@ -537,10 +537,10 @@ extern "C" void free_user(struct user_co
 void init_max_user_conn(void)
 {
 #ifndef NO_EMBEDDED_ACCESS_CHECKS
-  (void) hash_init(&hash_user_connections,system_charset_info,max_connections,
-		   0,0,
-		   (hash_get_key) get_key_conn, (hash_free_key) free_user,
-		   0);
+  (void)
+    my_hash_init(&hash_user_connections,system_charset_info,max_connections,
+                 0,0, (my_hash_get_key) get_key_conn,
+                 (my_hash_free_key) free_user, 0);
 #endif
 }
 
@@ -548,7 +548,7 @@ void init_max_user_conn(void)
 void free_max_user_conn(void)
 {
 #ifndef NO_EMBEDDED_ACCESS_CHECKS
-  hash_free(&hash_user_connections);
+  my_hash_free(&hash_user_connections);
 #endif /* NO_EMBEDDED_ACCESS_CHECKS */
 }
 
@@ -566,8 +566,9 @@ void reset_mqh(LEX_USER *lu, bool get_th
     memcpy(temp_user,lu->user.str,lu->user.length);
     memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length);
     temp_user[lu->user.length]='\0'; temp_user[temp_len-1]=0;
-    if ((uc = (struct  user_conn *) hash_search(&hash_user_connections,
-						(uchar*) temp_user, temp_len)))
+    if ((uc = (struct  user_conn *) my_hash_search(&hash_user_connections,
+                                                   (uchar*) temp_user,
+                                                   temp_len)))
     {
       uc->questions=0;
       get_mqh(temp_user,&temp_user[lu->user.length+1],uc);
@@ -580,8 +581,8 @@ void reset_mqh(LEX_USER *lu, bool get_th
     /* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */
     for (uint idx=0;idx < hash_user_connections.records; idx++)
     {
-      USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections,
-						      idx);
+      USER_CONN *uc=(struct user_conn *)
+        my_hash_element(&hash_user_connections, idx);
       if (get_them)
 	get_mqh(uc->user,uc->host,uc);
       uc->questions=0;

=== modified file 'sql/sql_db.cc'
--- a/sql/sql_db.cc	2009-02-05 11:07:37 +0000
+++ b/sql/sql_db.cc	2009-02-11 12:11:20 +0000
@@ -105,8 +105,8 @@ static my_bool lock_db_insert(const char
   
   safe_mutex_assert_owner(&LOCK_lock_db);
 
-  if (!(opt= (my_dblock_t*) hash_search(&lock_db_cache,
-                                        (uchar*) dbname, length)))
+  if (!(opt= (my_dblock_t*) my_hash_search(&lock_db_cache,
+                                           (uchar*) dbname, length)))
   { 
     /* Db is not in the hash, insert it */
     char *tmp_name;
@@ -139,9 +139,9 @@ void lock_db_delete(const char *name, ui
 {
   my_dblock_t *opt;
   safe_mutex_assert_owner(&LOCK_lock_db);
-  if ((opt= (my_dblock_t *)hash_search(&lock_db_cache,
-                                       (const uchar*) name, length)))
-    hash_delete(&lock_db_cache, (uchar*) opt);
+  if ((opt= (my_dblock_t *)my_hash_search(&lock_db_cache,
+                                          (const uchar*) name, length)))
+    my_hash_delete(&lock_db_cache, (uchar*) opt);
 }
 
 
@@ -222,14 +222,14 @@ bool my_database_names_init(void)
   if (!dboptions_init)
   {
     dboptions_init= 1;
-    error= hash_init(&dboptions, lower_case_table_names ? 
-                     &my_charset_bin : system_charset_info,
-                     32, 0, 0, (hash_get_key) dboptions_get_key,
-                     free_dbopt,0) ||
-           hash_init(&lock_db_cache, lower_case_table_names ? 
-                     &my_charset_bin : system_charset_info,
-                     32, 0, 0, (hash_get_key) lock_db_get_key,
-                     lock_db_free_element,0);
+    error= my_hash_init(&dboptions, lower_case_table_names ?
+                        &my_charset_bin : system_charset_info,
+                        32, 0, 0, (my_hash_get_key) dboptions_get_key,
+                        free_dbopt,0) ||
+           my_hash_init(&lock_db_cache, lower_case_table_names ?
+                        &my_charset_bin : system_charset_info,
+                        32, 0, 0, (my_hash_get_key) lock_db_get_key,
+                        lock_db_free_element,0);
 
   }
   return error;
@@ -246,9 +246,9 @@ void my_database_names_free(void)
   if (dboptions_init)
   {
     dboptions_init= 0;
-    hash_free(&dboptions);
+    my_hash_free(&dboptions);
     (void) rwlock_destroy(&LOCK_dboptions);
-    hash_free(&lock_db_cache);
+    my_hash_free(&lock_db_cache);
   }
 }
 
@@ -260,11 +260,11 @@ void my_database_names_free(void)
 void my_dbopt_cleanup(void)
 {
   rw_wrlock(&LOCK_dboptions);
-  hash_free(&dboptions);
-  hash_init(&dboptions, lower_case_table_names ? 
-            &my_charset_bin : system_charset_info,
-            32, 0, 0, (hash_get_key) dboptions_get_key,
-            free_dbopt,0);
+  my_hash_free(&dboptions);
+  my_hash_init(&dboptions, lower_case_table_names ? 
+               &my_charset_bin : system_charset_info,
+               32, 0, 0, (my_hash_get_key) dboptions_get_key,
+               free_dbopt,0);
   rw_unlock(&LOCK_dboptions);
 }
 
@@ -290,7 +290,7 @@ static my_bool get_dbopt(const char *dbn
   length= (uint) strlen(dbname);
   
   rw_rdlock(&LOCK_dboptions);
-  if ((opt= (my_dbopt_t*) hash_search(&dboptions, (uchar*) dbname, length)))
+  if ((opt= (my_dbopt_t*) my_hash_search(&dboptions, (uchar*) dbname, length)))
   {
     create->default_table_charset= opt->charset;
     error= 0;
@@ -322,7 +322,8 @@ static my_bool put_dbopt(const char *dbn
   length= (uint) strlen(dbname);
   
   rw_wrlock(&LOCK_dboptions);
-  if (!(opt= (my_dbopt_t*) hash_search(&dboptions, (uchar*) dbname, length)))
+  if (!(opt= (my_dbopt_t*) my_hash_search(&dboptions, (uchar*) dbname,
+                                          length)))
   { 
     /* Options are not in the hash, insert them */
     char *tmp_name;
@@ -362,9 +363,9 @@ void del_dbopt(const char *path)
 {
   my_dbopt_t *opt;
   rw_wrlock(&LOCK_dboptions);
-  if ((opt= (my_dbopt_t *)hash_search(&dboptions, (const uchar*) path,
-                                      strlen(path))))
-    hash_delete(&dboptions, (uchar*) opt);
+  if ((opt= (my_dbopt_t *)my_hash_search(&dboptions, (const uchar*) path,
+                                         strlen(path))))
+    my_hash_delete(&dboptions, (uchar*) opt);
   rw_unlock(&LOCK_dboptions);
 }
 
@@ -1724,8 +1725,8 @@ lock_databases(THD *thd, const char *db1
 {
   pthread_mutex_lock(&LOCK_lock_db);
   while (!thd->killed &&
-         (hash_search(&lock_db_cache,(uchar*) db1, length1) ||
-          hash_search(&lock_db_cache,(uchar*) db2, length2)))
+         (my_hash_search(&lock_db_cache,(uchar*) db1, length1) ||
+          my_hash_search(&lock_db_cache,(uchar*) db2, length2)))
   {
     wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
     pthread_mutex_lock(&LOCK_lock_db);

=== modified file 'sql/sql_handler.cc'
--- a/sql/sql_handler.cc	2008-08-07 17:52:43 +0000
+++ b/sql/sql_handler.cc	2009-01-27 02:08:48 +0000
@@ -199,15 +199,15 @@ bool mysql_ha_open(THD *thd, TABLE_LIST 
                       tables->db, tables->table_name, tables->alias,
                       (int) reopen));
 
-  if (! hash_inited(&thd->handler_tables_hash))
+  if (! my_hash_inited(&thd->handler_tables_hash))
   {
     /*
       HASH entries are of type TABLE_LIST.
     */
-    if (hash_init(&thd->handler_tables_hash, &my_charset_latin1,
-                  HANDLER_TABLES_HASH_SIZE, 0, 0,
-                  (hash_get_key) mysql_ha_hash_get_key,
-                  (hash_free_key) mysql_ha_hash_free, 0))
+    if (my_hash_init(&thd->handler_tables_hash, &my_charset_latin1,
+                     HANDLER_TABLES_HASH_SIZE, 0, 0,
+                     (my_hash_get_key) mysql_ha_hash_get_key,
+                     (my_hash_free_key) mysql_ha_hash_free, 0))
     {
       DBUG_PRINT("exit",("ERROR"));
       DBUG_RETURN(TRUE);
@@ -215,8 +215,8 @@ bool mysql_ha_open(THD *thd, TABLE_LIST 
   }
   else if (! reopen) /* Otherwise we have 'tables' already. */
   {
-    if (hash_search(&thd->handler_tables_hash, (uchar*) tables->alias,
-                    strlen(tables->alias) + 1))
+    if (my_hash_search(&thd->handler_tables_hash, (uchar*) tables->alias,
+                       strlen(tables->alias) + 1))
     {
       DBUG_PRINT("info",("duplicate '%s'", tables->alias));
       DBUG_PRINT("exit",("ERROR"));
@@ -328,7 +328,7 @@ err:
   if (hash_tables->table)
     mysql_ha_close_table(thd, hash_tables);
   if (!reopen)
-    hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
+    my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
   DBUG_PRINT("exit",("ERROR"));
   DBUG_RETURN(TRUE);
 }
@@ -358,12 +358,12 @@ bool mysql_ha_close(THD *thd, TABLE_LIST
   DBUG_PRINT("enter",("'%s'.'%s' as '%s'",
                       tables->db, tables->table_name, tables->alias));
 
-  if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash,
+  if ((hash_tables= (TABLE_LIST*) my_hash_search(&thd->handler_tables_hash,
                                               (uchar*) tables->alias,
                                               strlen(tables->alias) + 1)))
   {
     mysql_ha_close_table(thd, hash_tables);
-    hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
+    my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
   }
   else
   {
@@ -430,9 +430,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST 
   it++;
 
 retry:
-  if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash,
-                                              (uchar*) tables->alias,
-                                              strlen(tables->alias) + 1)))
+  if ((hash_tables= (TABLE_LIST*) my_hash_search(&thd->handler_tables_hash,
+                                                 (uchar*) tables->alias,
+                                                 strlen(tables->alias) + 1)))
   {
     table= hash_tables->table;
     DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: %p",
@@ -703,7 +703,7 @@ static TABLE_LIST *mysql_ha_find(THD *th
   /* search for all handlers with matching table names */
   for (uint i= 0; i < thd->handler_tables_hash.records; i++)
   {
-    hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i);
+    hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i);
     for (tables= first; tables; tables= tables->next_local)
     {
       if ((! *tables->db ||
@@ -746,7 +746,7 @@ void mysql_ha_rm_tables(THD *thd, TABLE_
     next= hash_tables->next_local;
     if (hash_tables->table)
       mysql_ha_close_table(thd, hash_tables);
-    hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
+    my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
     hash_tables= next;
   }
 
@@ -772,7 +772,7 @@ void mysql_ha_flush(THD *thd)
 
   for (uint i= 0; i < thd->handler_tables_hash.records; i++)
   {
-    hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i);
+    hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i);
     /*
       TABLE::mdl_lock_data is 0 for temporary tables so we need extra check.
     */
@@ -806,12 +806,12 @@ void mysql_ha_cleanup(THD *thd)
 
   for (uint i= 0; i < thd->handler_tables_hash.records; i++)
   {
-    hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i);
+    hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i);
     if (hash_tables->table)
       mysql_ha_close_table(thd, hash_tables);
   }
 
-  hash_free(&thd->handler_tables_hash);
+  my_hash_free(&thd->handler_tables_hash);
 
   DBUG_VOID_RETURN;
 }

=== modified file 'sql/sql_lex.cc'
--- a/sql/sql_lex.cc	2009-01-29 21:17:59 +0000
+++ b/sql/sql_lex.cc	2009-02-05 12:49:39 +0000
@@ -2171,7 +2171,7 @@ void Query_tables_list::reset_query_tabl
       We delay real initialization of hash (and therefore related
       memory allocation) until first insertion into this hash.
     */
-    hash_clear(&sroutines);
+    my_hash_clear(&sroutines);
   }
   else if (sroutines.records)
   {
@@ -2194,7 +2194,7 @@ void Query_tables_list::reset_query_tabl
 
 void Query_tables_list::destroy_query_tables_list()
 {
-  hash_free(&sroutines);
+  my_hash_free(&sroutines);
 }
 
 

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2009-02-05 11:07:37 +0000
+++ b/sql/sql_parse.cc	2009-02-11 12:11:20 +0000
@@ -5478,7 +5478,7 @@ void mysql_reset_thd_for_next_command(TH
     OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG to not get warnings
     in ha_rollback_trans() about some tables couldn't be rolled back.
   */
-  if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+  if (!thd->in_multi_stmt_transaction())
   {
     thd->options&= ~OPTION_KEEP_LOG;
     thd->transaction.all.modified_non_trans_table= FALSE;

=== modified file 'sql/sql_partition.cc'
--- a/sql/sql_partition.cc	2009-01-09 13:25:38 +0000
+++ b/sql/sql_partition.cc	2009-02-04 13:08:05 +0000
@@ -4202,43 +4202,25 @@ uint prep_alter_part_table(THD *thd, TAB
     }
     if (alter_info->flags & ALTER_TABLE_REORG)
     {
-      uint new_part_no, curr_part_no;
+      DBUG_ASSERT(table->s->db_type()->partition_flags);
+      /* 'ALTER TABLE t REORG PARTITION' only allowed with auto partition */
       if (tab_part_info->part_type != HASH_PARTITION ||
-          tab_part_info->use_default_no_partitions)
+          !tab_part_info->use_default_no_partitions ||
+          (table->s->db_type()->partition_flags &&
+           !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)))
       {
         my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
         DBUG_RETURN(TRUE);
       }
-      new_part_no= table->file->get_default_no_partitions(create_info);
-      curr_part_no= tab_part_info->no_parts;
-      if (new_part_no == curr_part_no)
-      {
-        /*
-          No change is needed, we will have the same number of partitions
-          after the change as before. Thus we can reply ok immediately
-          without any changes at all.
-        */
-        *fast_alter_partition= TRUE;
-        DBUG_RETURN(FALSE);
-      }
-      else if (new_part_no > curr_part_no)
-      {
-        /*
-          We will add more partitions, we use the ADD PARTITION without
-          setting the flag for no default number of partitions
-        */
-        alter_info->flags|= ALTER_ADD_PARTITION;
-        thd->work_part_info->no_parts= new_part_no - curr_part_no;
-      }
-      else
-      {
-        /*
-          We will remove hash partitions, we use the COALESCE PARTITION
-          without setting the flag for no default number of partitions
-        */
-        alter_info->flags|= ALTER_COALESCE_PARTITION;
-        alter_info->no_parts= curr_part_no - new_part_no;
-      }
+      DBUG_ASSERT(!alt_part_info ||
+                  alt_part_info->part_type == NOT_A_PARTITION);
+      /*
+        This is really a table operation, handled by native engines.
+        NDB can handle this fast/online. Skip the partitioning path.
+      */
+      if (alt_part_info)
+        thd->work_part_info= NULL;
+      DBUG_RETURN(FALSE);
     }
     if (table->s->db_type()->alter_partition_flags &&
         (!(flags= table->s->db_type()->alter_partition_flags())))

=== modified file 'sql/sql_plugin.cc'
--- a/sql/sql_plugin.cc	2009-02-09 16:06:35 +0000
+++ b/sql/sql_plugin.cc	2009-02-11 12:11:20 +0000
@@ -566,14 +566,15 @@ static struct st_plugin_int *plugin_find
     for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
     {
       struct st_plugin_int *plugin= (st_plugin_int *)
-        hash_search(&plugin_hash[i], (const uchar *)name->str, name->length);
+        my_hash_search(&plugin_hash[i], (const uchar *)name->str, name->length);
       if (plugin)
         DBUG_RETURN(plugin);
     }
   }
   else
     DBUG_RETURN((st_plugin_int *)
-        hash_search(&plugin_hash[type], (const uchar *)name->str, name->length));
+        my_hash_search(&plugin_hash[type], (const uchar *)name->str,
+                       name->length));
   DBUG_RETURN(0);
 }
 
@@ -850,7 +851,7 @@ static void plugin_del(struct st_plugin_
   safe_mutex_assert_owner(&LOCK_plugin);
   /* Free allocated strings before deleting the plugin. */
   plugin_vars_free_values(plugin->system_vars);
-  hash_delete(&plugin_hash[plugin->plugin->type], (uchar*)plugin);
+  my_hash_delete(&plugin_hash[plugin->plugin->type], (uchar*)plugin);
   if (plugin->plugin_dl)
     plugin_dl_del(&plugin->plugin_dl->dl);
   plugin->state= PLUGIN_IS_FREED;
@@ -1111,8 +1112,8 @@ int plugin_init(int *argc, char **argv, 
   init_alloc_root(&plugin_mem_root, 4096, 4096);
   init_alloc_root(&tmp_root, 4096, 4096);
 
-  if (hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0,
-                  get_bookmark_hash_key, NULL, HASH_UNIQUE))
+  if (my_hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0,
+                   get_bookmark_hash_key, NULL, HASH_UNIQUE))
       goto err;
 
 
@@ -1126,8 +1127,8 @@ int plugin_init(int *argc, char **argv, 
 
   for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
   {
-    if (hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0,
-                  get_plugin_hash_key, NULL, HASH_UNIQUE))
+    if (my_hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0,
+                     get_plugin_hash_key, NULL, HASH_UNIQUE))
       goto err;
   }
 
@@ -1607,7 +1608,7 @@ void plugin_shutdown(void)
   /* Dispose of the memory */
 
   for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
-    hash_free(&plugin_hash[i]);
+    my_hash_free(&plugin_hash[i]);
   delete_dynamic(&plugin_array);
 
   count= plugin_dl_array.elements;
@@ -1619,7 +1620,7 @@ void plugin_shutdown(void)
   my_afree(dl);
   delete_dynamic(&plugin_dl_array);
 
-  hash_free(&bookmark_hash);
+  my_hash_free(&bookmark_hash);
   free_root(&plugin_mem_root, MYF(0));
 
   global_variables_dynamic_size= 0;
@@ -1800,7 +1801,7 @@ bool plugin_foreach_with_mask(THD *thd, 
     HASH *hash= plugin_hash + type;
     for (idx= 0; idx < total; idx++)
     {
-      plugin= (struct st_plugin_int *) hash_element(hash, idx);
+      plugin= (struct st_plugin_int *) my_hash_element(hash, idx);
       plugins[idx]= !(plugin->state & state_mask) ? plugin : NULL;
     }
   }
@@ -2199,8 +2200,8 @@ static st_bookmark *find_bookmark(const 
 
   varname[0]= flags & PLUGIN_VAR_TYPEMASK;
 
-  result= (st_bookmark*) hash_search(&bookmark_hash,
-                                     (const uchar*) varname, length - 1);
+  result= (st_bookmark*) my_hash_search(&bookmark_hash,
+                                        (const uchar*) varname, length - 1);
 
   my_afree(varname);
   return result;
@@ -2360,7 +2361,7 @@ static uchar *intern_sys_var_ptr(THD* th
     {
       sys_var_pluginvar *pi;
       sys_var *var;
-      st_bookmark *v= (st_bookmark*) hash_element(&bookmark_hash,idx);
+      st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx);
 
       if (v->version <= thd->variables.dynamic_variables_version ||
           !(var= intern_find_sys_var(v->key + 1, v->name_len, true)) ||
@@ -2454,7 +2455,7 @@ static void cleanup_variables(THD *thd, 
   rw_rdlock(&LOCK_system_variables_hash);
   for (idx= 0; idx < bookmark_hash.records; idx++)
   {
-    v= (st_bookmark*) hash_element(&bookmark_hash, idx);
+    v= (st_bookmark*) my_hash_element(&bookmark_hash, idx);
     if (v->version > vars->dynamic_variables_version ||
         !(var= intern_find_sys_var(v->key + 1, v->name_len, true)) ||
         !(pivar= var->cast_pluginvar()) ||

=== modified file 'sql/sql_prepare.cc'
--- a/sql/sql_prepare.cc	2009-01-18 23:21:43 +0000
+++ b/sql/sql_prepare.cc	2009-01-31 16:21:19 +0000
@@ -1100,9 +1100,9 @@ static bool insert_params_from_vars(Prep
   {
     Item_param *param= *it;
     varname= var_it++;
-    entry= (user_var_entry*)hash_search(&stmt->thd->user_vars,
-                                        (uchar*) varname->str,
-                                         varname->length);
+    entry= (user_var_entry*)my_hash_search(&stmt->thd->user_vars,
+                                           (uchar*) varname->str,
+                                           varname->length);
     if (param->set_from_user_var(stmt->thd, entry) ||
         param->convert_str_value(stmt->thd))
       DBUG_RETURN(1);
@@ -1147,8 +1147,8 @@ static bool insert_params_from_vars_with
     Item_param *param= *it;
     varname= var_it++;
 
-    entry= (user_var_entry *) hash_search(&thd->user_vars, (uchar*) varname->str,
-                                          varname->length);
+    entry= (user_var_entry *) my_hash_search(&thd->user_vars, (uchar*)
+                                             varname->str, varname->length);
     /*
       We have to call the setup_one_conversion_function() here to set
       the parameter's members that might be needed further
@@ -2216,9 +2216,9 @@ static const char *get_dynamic_sql_strin
       convert it for error messages to be uniform.
     */
     if ((entry=
-         (user_var_entry*)hash_search(&thd->user_vars,
-                                      (uchar*)lex->prepared_stmt_code.str,
-                                      lex->prepared_stmt_code.length))
+         (user_var_entry*)my_hash_search(&thd->user_vars,
+                                         (uchar*)lex->prepared_stmt_code.str,
+                                         lex->prepared_stmt_code.length))
         && entry->value)
     {
       my_bool is_var_null;

=== modified file 'sql/sql_repl.cc'
--- a/sql/sql_repl.cc	2009-01-29 21:17:59 +0000
+++ b/sql/sql_repl.cc	2009-02-05 12:49:39 +0000
@@ -353,8 +353,8 @@ static ulonglong get_heartbeat_period(TH
   my_bool null_value;
   LEX_STRING name=  { C_STRING_WITH_LEN("master_heartbeat_period")};
   user_var_entry *entry= 
-    (user_var_entry*) hash_search(&thd->user_vars, (uchar*) name.str,
-                                  name.length);
+    (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
+                                     name.length);
   return entry? entry->val_int(&null_value) : 0;
 }
 

=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc	2009-02-05 09:45:14 +0000
+++ b/sql/sql_select.cc	2009-02-11 12:11:20 +0000
@@ -19284,8 +19284,8 @@ static int remove_dup_with_hash_index(TH
     extra_length= ALIGN_SIZE(key_length)-key_length;
   }
 
-  if (hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0, 
-		key_length, (hash_get_key) 0, 0, 0))
+  if (my_hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0, 
+                   key_length, (my_hash_get_key) 0, 0, 0))
   {
     my_free((char*) key_buffer,MYF(0));
     DBUG_RETURN(1);
@@ -19326,7 +19326,7 @@ static int remove_dup_with_hash_index(TH
       key_pos+= *field_length++;
     }
     /* Check if it exists before */
-    if (hash_search(&hash, org_key_pos, key_length))
+    if (my_hash_search(&hash, org_key_pos, key_length))
     {
       /* Duplicated found ; Remove the row */
       if ((error=file->ha_delete_row(record)))
@@ -19337,14 +19337,14 @@ static int remove_dup_with_hash_index(TH
     key_pos+=extra_length;
   }
   my_free((char*) key_buffer,MYF(0));
-  hash_free(&hash);
+  my_hash_free(&hash);
   file->extra(HA_EXTRA_NO_CACHE);
   (void) file->ha_rnd_end();
   DBUG_RETURN(0);
 
 err:
   my_free((char*) key_buffer,MYF(0));
-  hash_free(&hash);
+  my_hash_free(&hash);
   file->extra(HA_EXTRA_NO_CACHE);
   (void) file->ha_rnd_end();
   if (error)

=== modified file 'sql/sql_servers.cc'
--- a/sql/sql_servers.cc	2008-12-04 16:50:07 +0000
+++ b/sql/sql_servers.cc	2009-01-27 02:08:48 +0000
@@ -120,8 +120,8 @@ bool servers_init(bool dont_read_servers
     DBUG_RETURN(TRUE);
 
   /* initialise our servers cache */
-  if (hash_init(&servers_cache, system_charset_info, 32, 0, 0,
-                (hash_get_key) servers_cache_get_key, 0, 0))
+  if (my_hash_init(&servers_cache, system_charset_info, 32, 0, 0,
+                   (my_hash_get_key) servers_cache_get_key, 0, 0))
   {
     return_val= TRUE; /* we failed, out of memory? */
     goto end;
@@ -644,9 +644,10 @@ delete_server_record_in_cache(LEX_SERVER
                      server_options->server_name_length));
 
 
-  if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache,
-                                     (uchar*) server_options->server_name,
-                                     server_options->server_name_length)))
+  if (!(server= (FOREIGN_SERVER *)
+        my_hash_search(&servers_cache,
+                       (uchar*) server_options->server_name,
+                       server_options->server_name_length)))
   {
     DBUG_PRINT("info", ("server_name %s length %d not found!",
                         server_options->server_name,
@@ -661,8 +662,8 @@ delete_server_record_in_cache(LEX_SERVER
                      server->server_name,
                      server->server_name_length));
 
-  hash_delete(&servers_cache, (uchar*) server);
-  
+  my_hash_delete(&servers_cache, (uchar*) server);
+
   error= 0;
 
 end:
@@ -769,7 +770,7 @@ int update_server_record_in_cache(FOREIG
   /*
     delete the existing server struct from the server cache
   */
-  hash_delete(&servers_cache, (uchar*)existing);
+  my_hash_delete(&servers_cache, (uchar*)existing);
 
   /*
     Insert the altered server struct into the server cache
@@ -964,8 +965,8 @@ int create_server(THD *thd, LEX_SERVER_O
   rw_wrlock(&THR_LOCK_servers);
 
   /* hit the memory first */
-  if (hash_search(&servers_cache, (uchar*) server_options->server_name,
-				   server_options->server_name_length))
+  if (my_hash_search(&servers_cache, (uchar*) server_options->server_name,
+                     server_options->server_name_length))
     goto end;
 
 
@@ -1013,9 +1014,9 @@ int alter_server(THD *thd, LEX_SERVER_OP
 
   rw_wrlock(&THR_LOCK_servers);
 
-  if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache,
-                                                 (uchar*) name.str,
-                                                 name.length)))
+  if (!(existing= (FOREIGN_SERVER *) my_hash_search(&servers_cache,
+                                                    (uchar*) name.str,
+                                                    name.length)))
     goto end;
 
   altered= (FOREIGN_SERVER *)alloc_root(&mem,
@@ -1194,7 +1195,7 @@ prepare_server_struct_for_update(LEX_SER
 void servers_free(bool end)
 {
   DBUG_ENTER("servers_free");
-  if (!hash_inited(&servers_cache))
+  if (!my_hash_inited(&servers_cache))
     DBUG_VOID_RETURN;
   if (!end)
   {
@@ -1204,7 +1205,7 @@ void servers_free(bool end)
   }
   rwlock_destroy(&THR_LOCK_servers);
   free_root(&mem,MYF(0));
-  hash_free(&servers_cache);
+  my_hash_free(&servers_cache);
   DBUG_VOID_RETURN;
 }
 
@@ -1285,9 +1286,9 @@ FOREIGN_SERVER *get_server_by_name(MEM_R
 
   DBUG_PRINT("info", ("locking servers_cache"));
   rw_rdlock(&THR_LOCK_servers);
-  if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache,
-                                               (uchar*) server_name,
-                                               server_name_length)))
+  if (!(server= (FOREIGN_SERVER *) my_hash_search(&servers_cache,
+                                                  (uchar*) server_name,
+                                                  server_name_length)))
   {
     DBUG_PRINT("info", ("server_name %s length %d not found!",
                         server_name, server_name_length));

=== modified file 'sql/sql_table.cc'
--- a/sql/sql_table.cc	2009-02-05 06:27:55 +0000
+++ b/sql/sql_table.cc	2009-02-11 12:11:20 +0000
@@ -3785,6 +3785,8 @@ static bool lock_table_name_if_not_cache
     }
     else
       *lock_data= 0;
+  } else {
+    DEBUG_SYNC(thd, "locked_table_name");
   }
   return FALSE;
 }
@@ -3813,7 +3815,7 @@ bool mysql_create_table(THD *thd, const 
   /* Wait for any database locks */
   pthread_mutex_lock(&LOCK_lock_db);
   while (!thd->killed &&
-         hash_search(&lock_db_cache,(uchar*) db, strlen(db)))
+         my_hash_search(&lock_db_cache,(uchar*) db, strlen(db)))
   {
     wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
     pthread_mutex_lock(&LOCK_lock_db);
@@ -4495,8 +4497,8 @@ send_result_message:
     switch (result_code) {
     case HA_ADMIN_NOT_IMPLEMENTED:
       {
-       char buf[MYSQL_ERRMSG_SIZE];
-       uint length=my_snprintf(buf, sizeof(buf),
+	char buf[MYSQL_ERRMSG_SIZE];
+	uint length=my_snprintf(buf, sizeof(buf),
 				ER(ER_CHECK_NOT_IMPLEMENTED), operator_name);
 	protocol->store(STRING_WITH_LEN("note"), system_charset_info);
 	protocol->store(buf, length, system_charset_info);

=== modified file 'sql/sql_test.cc'
--- a/sql/sql_test.cc	2009-01-27 14:53:22 +0000
+++ b/sql/sql_test.cc	2009-01-31 16:21:19 +0000
@@ -82,7 +82,7 @@ void print_cached_tables(void)
 
   for (idx=unused=0 ; idx < table_def_cache.records ; idx++)
   {
-    share= (TABLE_SHARE*) hash_element(&table_def_cache, idx);
+    share= (TABLE_SHARE*) my_hash_element(&table_def_cache, idx);
 
     I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables);
     while ((entry= it++))
@@ -121,7 +121,7 @@ void print_cached_tables(void)
     printf("Unused_links (%d) doesn't match table_def_cache: %d\n", count,
            unused);
   printf("\nCurrent refresh version: %ld\n",refresh_version);
-  if (hash_check(&table_def_cache))
+  if (my_hash_check(&table_def_cache))
     printf("Error: Table definition hash table is corrupted\n");
   fflush(stdout);
   pthread_mutex_unlock(&LOCK_open);

=== modified file 'sql/sql_udf.cc'
--- a/sql/sql_udf.cc	2009-01-16 11:53:32 +0000
+++ b/sql/sql_udf.cc	2009-01-27 02:08:48 +0000
@@ -124,10 +124,10 @@ void udf_init()
   init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0);
   THD *new_thd = new THD;
   if (!new_thd ||
-      hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))
+      my_hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))
   {
     sql_print_error("Can't allocate memory for udf structures");
-    hash_free(&udf_hash);
+    my_hash_free(&udf_hash);
     free_root(&mem,MYF(0));
     delete new_thd;
     DBUG_VOID_RETURN;
@@ -238,20 +238,20 @@ void udf_free()
   DBUG_ENTER("udf_free");
   for (uint idx=0 ; idx < udf_hash.records ; idx++)
   {
-    udf_func *udf=(udf_func*) hash_element(&udf_hash,idx);
+    udf_func *udf=(udf_func*) my_hash_element(&udf_hash,idx);
     if (udf->dlhandle)				// Not closed before
     {
       /* Mark all versions using the same handler as closed */
       for (uint j=idx+1 ;  j < udf_hash.records ; j++)
       {
-	udf_func *tmp=(udf_func*) hash_element(&udf_hash,j);
+	udf_func *tmp=(udf_func*) my_hash_element(&udf_hash,j);
 	if (udf->dlhandle == tmp->dlhandle)
 	  tmp->dlhandle=0;			// Already closed
       }
       dlclose(udf->dlhandle);
     }
   }
-  hash_free(&udf_hash);
+  my_hash_free(&udf_hash);
   free_root(&mem,MYF(0));
   if (initialized)
   {
@@ -267,7 +267,7 @@ static void del_udf(udf_func *udf)
   DBUG_ENTER("del_udf");
   if (!--udf->usage_count)
   {
-    hash_delete(&udf_hash,(uchar*) udf);
+    my_hash_delete(&udf_hash,(uchar*) udf);
     using_udf_functions=udf_hash.records != 0;
   }
   else
@@ -281,7 +281,7 @@ static void del_udf(udf_func *udf)
     uint name_length=udf->name.length;
     udf->name.str=(char*) "*";
     udf->name.length=1;
-    hash_update(&udf_hash,(uchar*) udf,(uchar*) name,name_length);
+    my_hash_update(&udf_hash,(uchar*) udf,(uchar*) name,name_length);
   }
   DBUG_VOID_RETURN;
 }
@@ -301,7 +301,7 @@ void free_udf(udf_func *udf)
       We come here when someone has deleted the udf function
       while another thread still was using the udf
     */
-    hash_delete(&udf_hash,(uchar*) udf);
+    my_hash_delete(&udf_hash,(uchar*) udf);
     using_udf_functions=udf_hash.records != 0;
     if (!find_udf_dl(udf->dl))
       dlclose(udf->dlhandle);
@@ -327,8 +327,8 @@ udf_func *find_udf(const char *name,uint
   else
     rw_rdlock(&THR_LOCK_udf);  /* Called during parsing */
 
-  if ((udf=(udf_func*) hash_search(&udf_hash,(uchar*) name,
-				   length ? length : (uint) strlen(name))))
+  if ((udf=(udf_func*) my_hash_search(&udf_hash,(uchar*) name,
+                                      length ? length : (uint) strlen(name))))
   {
     if (!udf->dlhandle)
       udf=0;					// Could not be opened
@@ -350,7 +350,7 @@ static void *find_udf_dl(const char *dl)
   */
   for (uint idx=0 ; idx < udf_hash.records ; idx++)
   {
-    udf_func *udf=(udf_func*) hash_element(&udf_hash,idx);
+    udf_func *udf=(udf_func*) my_hash_element(&udf_hash,idx);
     if (!strcmp(dl, udf->dl) && udf->dlhandle != NULL)
       DBUG_RETURN(udf->dlhandle);
   }
@@ -436,7 +436,7 @@ int mysql_create_function(THD *thd,udf_f
     thd->clear_current_stmt_binlog_row_based();
 
   rw_wrlock(&THR_LOCK_udf);
-  if ((hash_search(&udf_hash,(uchar*) udf->name.str, udf->name.length)))
+  if ((my_hash_search(&udf_hash,(uchar*) udf->name.str, udf->name.length)))
   {
     my_error(ER_UDF_EXISTS, MYF(0), udf->name.str);
     goto err;
@@ -540,8 +540,8 @@ int mysql_drop_function(THD *thd,const L
     thd->clear_current_stmt_binlog_row_based();
 
   rw_wrlock(&THR_LOCK_udf);  
-  if (!(udf=(udf_func*) hash_search(&udf_hash,(uchar*) udf_name->str,
-				    (uint) udf_name->length)))
+  if (!(udf=(udf_func*) my_hash_search(&udf_hash,(uchar*) udf_name->str,
+                                       (uint) udf_name->length)))
   {
     my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), udf_name->str);
     goto err;

=== modified file 'sql/table.cc'
--- a/sql/table.cc	2009-02-05 11:07:37 +0000
+++ b/sql/table.cc	2009-02-11 12:11:20 +0000
@@ -474,7 +474,7 @@ void free_table_share(TABLE_SHARE *share
   /* The mutex is initialized only for shares that are part of the TDC */
   if (share->tmp_table == NO_TMP_TABLE)
     pthread_mutex_destroy(&share->LOCK_ha_data);
-  hash_free(&share->name_hash);
+  my_hash_free(&share->name_hash);
 
   plugin_unlock(NULL, share->db_plugin);
   share->db_plugin= NULL;
@@ -1263,10 +1263,10 @@ static int open_binary_frm(THD *thd, TAB
 
   use_hash= share->fields >= MAX_FIELDS_BEFORE_HASH;
   if (use_hash)
-    use_hash= !hash_init(&share->name_hash,
-			 system_charset_info,
-			 share->fields,0,0,
-			 (hash_get_key) get_field_name,0,0);
+    use_hash= !my_hash_init(&share->name_hash,
+                            system_charset_info,
+                            share->fields,0,0,
+                            (my_hash_get_key) get_field_name,0,0);
 
   for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++)
   {
@@ -1710,7 +1710,7 @@ static int open_binary_frm(THD *thd, TAB
   delete handler_file;
 #ifndef DBUG_OFF
   if (use_hash)
-    (void) hash_check(&share->name_hash);
+    (void) my_hash_check(&share->name_hash);
 #endif
   if (buffbuff)
     my_free(buffbuff, MYF(0));
@@ -1725,7 +1725,7 @@ static int open_binary_frm(THD *thd, TAB
   x_free((uchar*) disk_buff);
   delete crypted;
   delete handler_file;
-  hash_free(&share->name_hash);
+  my_hash_free(&share->name_hash);
 
   open_table_error(share, error, share->open_errno, errarg);
   DBUG_RETURN(error);

=== modified file 'sql/transaction.cc'
--- a/sql/transaction.cc	2008-12-04 16:50:07 +0000
+++ b/sql/transaction.cc	2009-01-26 17:19:14 +0000
@@ -165,8 +165,7 @@ bool trans_commit_implicit(THD *thd)
   if (trans_check(thd))
     DBUG_RETURN(TRUE);
 
-  if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN |
-                      OPTION_TABLE_LOCK))
+  if (thd->in_multi_stmt_transaction() || (thd->options & OPTION_TABLE_LOCK))
   {
     /* Safety if one did "drop table" on locked tables */
     if (!thd->locked_tables_mode)
@@ -304,8 +303,8 @@ bool trans_savepoint(THD *thd, LEX_STRIN
   SAVEPOINT **sv, *newsv;
   DBUG_ENTER("trans_savepoint");
 
-  if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) ||
-        thd->in_sub_stmt) || !opt_using_transactions)
+  if (!(thd->in_multi_stmt_transaction() || thd->in_sub_stmt) ||
+      !opt_using_transactions)
     DBUG_RETURN(FALSE);
 
   sv= find_savepoint(thd, name);

=== modified file 'sql/tztime.cc'
--- a/sql/tztime.cc	2009-01-16 11:53:32 +0000
+++ b/sql/tztime.cc	2009-01-27 02:08:48 +0000
@@ -1580,17 +1580,17 @@ my_tz_init(THD *org_thd, const char *def
   thd->store_globals();
 
   /* Init all memory structures that require explicit destruction */
-  if (hash_init(&tz_names, &my_charset_latin1, 20,
-                0, 0, (hash_get_key) my_tz_names_get_key, 0, 0))
+  if (my_hash_init(&tz_names, &my_charset_latin1, 20,
+                   0, 0, (my_hash_get_key) my_tz_names_get_key, 0, 0))
   {
     sql_print_error("Fatal error: OOM while initializing time zones");
     goto end;
   }
-  if (hash_init(&offset_tzs, &my_charset_latin1, 26, 0, 0,
-                (hash_get_key)my_offset_tzs_get_key, 0, 0))
+  if (my_hash_init(&offset_tzs, &my_charset_latin1, 26, 0, 0,
+                   (my_hash_get_key)my_offset_tzs_get_key, 0, 0))
   {
     sql_print_error("Fatal error: OOM while initializing time zones");
-    hash_free(&tz_names);
+    my_hash_free(&tz_names);
     goto end;
   }
   init_sql_alloc(&tz_storage, 32 * 1024, 0);
@@ -1773,8 +1773,8 @@ void my_tz_free()
   {
     tz_inited= 0;
     pthread_mutex_destroy(&tz_LOCK);
-    hash_free(&offset_tzs);
-    hash_free(&tz_names);
+    my_hash_free(&offset_tzs);
+    my_hash_free(&tz_names);
     free_root(&tz_storage, MYF(0));
   }
 }
@@ -2238,9 +2238,9 @@ my_tz_find(THD *thd, const String *name)
   if (!str_to_offset(name->ptr(), name->length(), &offset))
   {
 
-    if (!(result_tz= (Time_zone_offset *)hash_search(&offset_tzs,
-                                                     (const uchar *)&offset,
-                                                     sizeof(long))))
+    if (!(result_tz= (Time_zone_offset *)my_hash_search(&offset_tzs,
+                                                        (const uchar *)&offset,
+                                                        sizeof(long))))
     {
       DBUG_PRINT("info", ("Creating new Time_zone_offset object"));
 
@@ -2256,9 +2256,10 @@ my_tz_find(THD *thd, const String *name)
   else
   {
     result_tz= 0;
-    if ((tmp_tzname= (Tz_names_entry *)hash_search(&tz_names,
-                                                   (const uchar *)name->ptr(),
-                                                   name->length())))
+    if ((tmp_tzname= (Tz_names_entry *)my_hash_search(&tz_names,
+                                                      (const uchar *)
+                                                      name->ptr(),
+                                                      name->length())))
       result_tz= tmp_tzname->tz;
     else if (time_zone_tables_exist)
     {

=== modified file 'storage/archive/ha_archive.cc'
--- a/storage/archive/ha_archive.cc	2009-01-19 13:16:25 +0000
+++ b/storage/archive/ha_archive.cc	2009-01-27 02:08:48 +0000
@@ -177,8 +177,8 @@ int archive_db_init(void *p)
 
   if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
     goto error;
-  if (hash_init(&archive_open_tables, table_alias_charset, 32, 0, 0,
-                (hash_get_key) archive_get_key, 0, 0))
+  if (my_hash_init(&archive_open_tables, table_alias_charset, 32, 0, 0,
+                (my_hash_get_key) archive_get_key, 0, 0))
   {
     pthread_mutex_destroy(&archive_mutex);
   }
@@ -203,7 +203,7 @@ error:
 
 int archive_db_done(void *p)
 {
-  hash_free(&archive_open_tables);
+  my_hash_free(&archive_open_tables);
   pthread_mutex_destroy(&archive_mutex);
 
   return 0;
@@ -293,9 +293,9 @@ ARCHIVE_SHARE *ha_archive::get_share(con
   pthread_mutex_lock(&archive_mutex);
   length=(uint) strlen(table_name);
 
-  if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
-                                           (uchar*) table_name,
-                                           length)))
+  if (!(share=(ARCHIVE_SHARE*) my_hash_search(&archive_open_tables,
+                                              (uchar*) table_name,
+                                              length)))
   {
     char *tmp_name;
     azio_stream archive_tmp;
@@ -381,7 +381,7 @@ int ha_archive::free_share()
   pthread_mutex_lock(&archive_mutex);
   if (!--share->use_count)
   {
-    hash_delete(&archive_open_tables, (uchar*) share);
+    my_hash_delete(&archive_open_tables, (uchar*) share);
     thr_lock_delete(&share->lock);
     pthread_mutex_destroy(&share->mutex);
     /* 

=== modified file 'storage/blackhole/ha_blackhole.cc'
--- a/storage/blackhole/ha_blackhole.cc	2008-12-17 18:40:14 +0000
+++ b/storage/blackhole/ha_blackhole.cc	2009-01-27 02:08:48 +0000
@@ -277,8 +277,9 @@ static st_blackhole_share *get_share(con
   length= (uint) strlen(table_name);
   pthread_mutex_lock(&blackhole_mutex);
     
-  if (!(share= (st_blackhole_share*) hash_search(&blackhole_open_tables,
-                                                 (uchar*) table_name, length)))
+  if (!(share= (st_blackhole_share*)
+        my_hash_search(&blackhole_open_tables,
+                       (uchar*) table_name, length)))
   {
     if (!(share= (st_blackhole_share*) my_malloc(sizeof(st_blackhole_share) +
                                                  length,
@@ -308,7 +309,7 @@ static void free_share(st_blackhole_shar
 {
   pthread_mutex_lock(&blackhole_mutex);
   if (!--share->use_count)
-    hash_delete(&blackhole_open_tables, (uchar*) share);
+    my_hash_delete(&blackhole_open_tables, (uchar*) share);
   pthread_mutex_unlock(&blackhole_mutex);
 }
 
@@ -335,16 +336,16 @@ static int blackhole_init(void *p)
   blackhole_hton->flags= HTON_CAN_RECREATE;
   
   pthread_mutex_init(&blackhole_mutex, MY_MUTEX_INIT_FAST);
-  (void) hash_init(&blackhole_open_tables, system_charset_info,32,0,0,
-                   (hash_get_key) blackhole_get_key,
-                   (hash_free_key) blackhole_free_key, 0);
+  (void) my_hash_init(&blackhole_open_tables, system_charset_info,32,0,0,
+                      (my_hash_get_key) blackhole_get_key,
+                      (my_hash_free_key) blackhole_free_key, 0);
 
   return 0;
 }
 
 static int blackhole_fini(void *p)
 {
-  hash_free(&blackhole_open_tables);
+  my_hash_free(&blackhole_open_tables);
   pthread_mutex_destroy(&blackhole_mutex);
 
   return 0;

=== modified file 'storage/csv/ha_tina.cc'
--- a/storage/csv/ha_tina.cc	2009-01-26 16:03:39 +0000
+++ b/storage/csv/ha_tina.cc	2009-02-05 12:49:39 +0000
@@ -109,8 +109,8 @@ static int tina_init_func(void *p)
 
   tina_hton= (handlerton *)p;
   pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST);
-  (void) hash_init(&tina_open_tables,system_charset_info,32,0,0,
-                   (hash_get_key) tina_get_key,0,0);
+  (void) my_hash_init(&tina_open_tables,system_charset_info,32,0,0,
+                      (my_hash_get_key) tina_get_key,0,0);
   tina_hton->state= SHOW_OPTION_YES;
   tina_hton->db_type= DB_TYPE_CSV_DB;
   tina_hton->create= tina_create_handler;
@@ -121,7 +121,7 @@ static int tina_init_func(void *p)
 
 static int tina_done_func(void *p)
 {
-  hash_free(&tina_open_tables);
+  my_hash_free(&tina_open_tables);
   pthread_mutex_destroy(&tina_mutex);
 
   return 0;
@@ -146,9 +146,9 @@ static TINA_SHARE *get_share(const char 
     If share is not present in the hash, create a new share and
     initialize its members.
   */
-  if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
-                                        (uchar*) table_name,
-                                       length)))
+  if (!(share=(TINA_SHARE*) my_hash_search(&tina_open_tables,
+                                           (uchar*) table_name,
+                                           length)))
   {
     if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
                          &share, sizeof(*share),
@@ -376,7 +376,7 @@ static int free_share(TINA_SHARE *share)
       share->tina_write_opened= FALSE;
     }
 
-    hash_delete(&tina_open_tables, (uchar*) share);
+    my_hash_delete(&tina_open_tables, (uchar*) share);
     thr_lock_delete(&share->lock);
     pthread_mutex_destroy(&share->mutex);
     my_free((uchar*) share, MYF(0));
@@ -1686,10 +1686,10 @@ int ha_tina::check(THD* thd, HA_CHECK_OP
 bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info,
 					   uint table_changes)
 {
-  if (table_changes == IS_EQUAL_NO)  
+  if (table_changes == IS_EQUAL_NO)
     return COMPATIBLE_DATA_NO;
   else
-    return COMPATIBLE_DATA_YES;    
+    return COMPATIBLE_DATA_YES;
 }
 
 struct st_mysql_storage_engine csv_storage_engine=

=== modified file 'storage/example/ha_example.cc'
--- a/storage/example/ha_example.cc	2008-04-09 00:56:49 +0000
+++ b/storage/example/ha_example.cc	2009-01-27 02:08:48 +0000
@@ -132,8 +132,8 @@ static int example_init_func(void *p)
 
   example_hton= (handlerton *)p;
   pthread_mutex_init(&example_mutex,MY_MUTEX_INIT_FAST);
-  (void) hash_init(&example_open_tables,system_charset_info,32,0,0,
-                   (hash_get_key) example_get_key,0,0);
+  (void) my_hash_init(&example_open_tables,system_charset_info,32,0,0,
+                      (my_hash_get_key) example_get_key,0,0);
 
   example_hton->state=   SHOW_OPTION_YES;
   example_hton->create=  example_create_handler;
@@ -150,7 +150,7 @@ static int example_done_func(void *p)
 
   if (example_open_tables.records)
     error= 1;
-  hash_free(&example_open_tables);
+  my_hash_free(&example_open_tables);
   pthread_mutex_destroy(&example_mutex);
 
   DBUG_RETURN(0);
@@ -174,9 +174,9 @@ static EXAMPLE_SHARE *get_share(const ch
   pthread_mutex_lock(&example_mutex);
   length=(uint) strlen(table_name);
 
-  if (!(share=(EXAMPLE_SHARE*) hash_search(&example_open_tables,
-                                           (uchar*) table_name,
-                                           length)))
+  if (!(share=(EXAMPLE_SHARE*) my_hash_search(&example_open_tables,
+                                              (uchar*) table_name,
+                                              length)))
   {
     if (!(share=(EXAMPLE_SHARE *)
           my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
@@ -221,7 +221,7 @@ static int free_share(EXAMPLE_SHARE *sha
   pthread_mutex_lock(&example_mutex);
   if (!--share->use_count)
   {
-    hash_delete(&example_open_tables, (uchar*) share);
+    my_hash_delete(&example_open_tables, (uchar*) share);
     thr_lock_delete(&share->lock);
     pthread_mutex_destroy(&share->mutex);
     my_free(share, MYF(0));

=== modified file 'storage/federated/ha_federated.cc'
--- a/storage/federated/ha_federated.cc	2009-01-26 16:03:39 +0000
+++ b/storage/federated/ha_federated.cc	2009-01-31 16:21:19 +0000
@@ -458,8 +458,8 @@ int federated_db_init(void *p)
 
   if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST))
     goto error;
-  if (!hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0,
-                    (hash_get_key) federated_get_key, 0, 0))
+  if (!my_hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0,
+                    (my_hash_get_key) federated_get_key, 0, 0))
   {
     DBUG_RETURN(FALSE);
   }
@@ -482,7 +482,7 @@ error:
 
 int federated_done(void *p)
 {
-  hash_free(&federated_open_tables);
+  my_hash_free(&federated_open_tables);
   pthread_mutex_destroy(&federated_mutex);
 
   return 0;
@@ -1492,10 +1492,10 @@ static FEDERATED_SHARE *get_share(const 
     goto error;
 
   /* TODO: change tmp_share.scheme to LEX_STRING object */
-  if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
-                                               (uchar*) tmp_share.share_key,
-                                               tmp_share.
-                                               share_key_length)))
+  if (!(share= (FEDERATED_SHARE *) my_hash_search(&federated_open_tables,
+                                                  (uchar*) tmp_share.share_key,
+                                                  tmp_share.
+                                                  share_key_length)))
   {
     query.set_charset(system_charset_info);
     query.append(STRING_WITH_LEN("SELECT "));
@@ -1557,7 +1557,7 @@ static int free_share(FEDERATED_SHARE *s
   pthread_mutex_lock(&federated_mutex);
   if (!--share->use_count)
   {
-    hash_delete(&federated_open_tables, (uchar*) share);
+    my_hash_delete(&federated_open_tables, (uchar*) share);
     thr_lock_delete(&share->lock);
     pthread_mutex_destroy(&share->mutex);
     free_root(&mem_root, MYF(0));

=== modified file 'storage/innobase/handler/ha_innodb.cc'
--- a/storage/innobase/handler/ha_innodb.cc	2009-02-07 16:00:57 +0000
+++ b/storage/innobase/handler/ha_innodb.cc	2009-02-11 12:11:20 +0000
@@ -1790,8 +1790,8 @@ innobase_init(
 		goto error;
 	}
 
-	(void) hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0,
-					(hash_get_key) innobase_get_key, 0, 0);
+        (void) my_hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0,
+                            (my_hash_get_key) innobase_get_key, 0, 0);
 	pthread_mutex_init(&innobase_share_mutex, MY_MUTEX_INIT_FAST);
 	pthread_mutex_init(&prepare_commit_mutex, MY_MUTEX_INIT_FAST);
 	pthread_mutex_init(&commit_threads_m, MY_MUTEX_INIT_FAST);
@@ -1828,7 +1828,7 @@ innobase_end(handlerton *hton, ha_panic_
 		if (innobase_shutdown_for_mysql() != DB_SUCCESS) {
 			err = 1;
 		}
-		hash_free(&innobase_open_tables);
+		my_hash_free(&innobase_open_tables);
 		my_free(internal_innobase_data_file_path,
 						MYF(MY_ALLOW_ZERO_PTR));
 		pthread_mutex_destroy(&innobase_share_mutex);
@@ -3468,7 +3468,6 @@ skip_field:
           prebuilt->idx_cond_func= NULL;
           prebuilt->n_index_fields= n_requested_fields;
         }
-       // file->in_range_read= FALSE;
 
 	if (index != clust_index && prebuilt->need_to_access_clustered) {
 		/* Change rec_field_no's to correspond to the clustered index
@@ -6677,7 +6676,6 @@ ha_innobase::extra(
                         /* Reset index condition pushdown state */
                         pushed_idx_cond= FALSE;
                         pushed_idx_cond_keyno= MAX_KEY;
-                        //in_range_read= FALSE;
                         prebuilt->idx_cond_func= NULL;
 			break;
 		case HA_EXTRA_NO_KEYREAD:
@@ -7344,7 +7342,7 @@ static INNOBASE_SHARE* get_share(const c
 	pthread_mutex_lock(&innobase_share_mutex);
 	uint length=(uint) strlen(table_name);
 
-	if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables,
+	if (!(share=(INNOBASE_SHARE*) my_hash_search(&innobase_open_tables,
 				(uchar*) table_name,
 				length))) {
 
@@ -7378,7 +7376,7 @@ static void free_share(INNOBASE_SHARE* s
 	pthread_mutex_lock(&innobase_share_mutex);
 
 	if (!--share->use_count) {
-		hash_delete(&innobase_open_tables, (uchar*) share);
+		my_hash_delete(&innobase_open_tables, (uchar*) share);
 		thr_lock_delete(&share->lock);
 		pthread_mutex_destroy(&share->mutex);
 		my_free(share, MYF(0));
@@ -8499,13 +8497,12 @@ mysql_declare_plugin_end;
 int ha_innobase::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
                           uint n_ranges, uint mode, HANDLER_BUFFER *buf)
 {
-  return ds_mrr.dsmrr_init(this, &table->key_info[active_index], 
-                           seq, seq_init_param, n_ranges, mode, buf);
+  return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf);
 }
 
 int ha_innobase::multi_range_read_next(char **range_info)
 {
-  return ds_mrr.dsmrr_next(this, range_info);
+  return ds_mrr.dsmrr_next(range_info);
 }
 
 ha_rows ha_innobase::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
@@ -8541,7 +8538,7 @@ C_MODE_START
 static my_bool index_cond_func_innodb(void *arg)
 {
   ha_innobase *h= (ha_innobase*)arg;
-  if (h->end_range) //was: h->in_range_read
+  if (h->end_range)
   {
     if (h->compare_key2(h->end_range) > 0)
       return 2; /* caller should return HA_ERR_END_OF_FILE already */
@@ -8571,11 +8568,7 @@ int ha_innobase::read_range_first(const 
                                 bool sorted /* ignored */)
 {
   int res;
-  //if (!eq_range_arg)
-    //in_range_read= TRUE;
   res= handler::read_range_first(start_key, end_key, eq_range_arg, sorted);
-  //if (res)
-  //  in_range_read= FALSE;
   return res;
 }
 
@@ -8583,8 +8576,6 @@ int ha_innobase::read_range_first(const 
 int ha_innobase::read_range_next()
 {
   int res= handler::read_range_next();
-  //if (res)
-  //  in_range_read= FALSE;
   return res;
 }
 

=== modified file 'storage/maria/ha_maria.cc'
--- a/storage/maria/ha_maria.cc	2009-02-05 07:01:39 +0000
+++ b/storage/maria/ha_maria.cc	2009-02-11 12:11:20 +0000
@@ -1457,7 +1457,7 @@ int ha_maria::preload_keys(THD * thd, HA
 
   if ((error= maria_preload(file, map, table_list->ignore_leaves)))
   {
-    char buf[128];
+    char buf[MYSYS_ERRMSG_SIZE];
     const char *errmsg;
 
     switch (error) {
@@ -3198,13 +3198,12 @@ int ha_maria::multi_range_read_init(RANG
                                      uint n_ranges, uint mode, 
                                      HANDLER_BUFFER *buf)
 {
-  return ds_mrr.dsmrr_init(this, &table->key_info[active_index], 
-                           seq, seq_init_param, n_ranges, mode, buf);
+  return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf);
 }
 
 int ha_maria::multi_range_read_next(char **range_info)
 {
-  return ds_mrr.dsmrr_next(this, range_info);
+  return ds_mrr.dsmrr_next(range_info);
 }
 
 ha_rows ha_maria::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,

=== modified file 'storage/maria/ma_init.c'
--- a/storage/maria/ma_init.c	2008-10-09 20:03:54 +0000
+++ b/storage/maria/ma_init.c	2009-01-27 02:08:48 +0000
@@ -66,8 +66,8 @@ int maria_init(void)
     trnman_end_trans_hook= _ma_trnman_end_trans_hook;
     my_handler_error_register();
   }
-  hash_init(&maria_stored_state, &my_charset_bin, 32,
-            0, sizeof(LSN), 0, (hash_free_key) history_state_free, 0);
+  my_hash_init(&maria_stored_state, &my_charset_bin, 32,
+            0, sizeof(LSN), 0, (my_hash_free_key) history_state_free, 0);
   DBUG_PRINT("info",("dummy_transaction_object: %p",
                      &dummy_transaction_object));
   return 0;
@@ -99,6 +99,6 @@ void maria_end(void)
     end_pagecache(maria_pagecache, TRUE);
     ma_control_file_end();
     pthread_mutex_destroy(&THR_LOCK_maria);
-    hash_free(&maria_stored_state);
+    my_hash_free(&maria_stored_state);
   }
 }

=== modified file 'storage/maria/ma_open.c'
--- a/storage/maria/ma_open.c	2008-12-09 13:11:48 +0000
+++ b/storage/maria/ma_open.c	2009-01-27 02:08:48 +0000
@@ -791,8 +791,8 @@ MARIA_HA *maria_open(const char *name, i
       /* Setup initial state that is visible for all */
       MARIA_STATE_HISTORY_CLOSED *history;
       if ((history= (MARIA_STATE_HISTORY_CLOSED *)
-           hash_search(&maria_stored_state,
-                       (uchar*) &share->state.create_rename_lsn, 0)))
+           my_hash_search(&maria_stored_state,
+                          (uchar*) &share->state.create_rename_lsn, 0)))
       {
         /*
           Move history from hash to share. This is safe to do as we
@@ -801,7 +801,7 @@ MARIA_HA *maria_open(const char *name, i
         share->state_history=
           _ma_remove_not_visible_states(history->state_history, 0, 0);
         history->state_history= 0;
-        (void) hash_delete(&maria_stored_state, (uchar*) history);
+        (void) my_hash_delete(&maria_stored_state, (uchar*) history);
       }
       else
       {

=== modified file 'storage/maria/ma_pagecache.c'
--- a/storage/maria/ma_pagecache.c	2008-11-24 18:40:52 +0000
+++ b/storage/maria/ma_pagecache.c	2009-01-27 02:08:48 +0000
@@ -729,10 +729,10 @@ ulong init_pagecache(PAGECACHE *pagecach
   if (! pagecache->inited)
   {
     if (pthread_mutex_init(&pagecache->cache_lock, MY_MUTEX_INIT_FAST) ||
-        hash_init(&pagecache->files_in_flush, &my_charset_bin, 32,
-                  offsetof(struct st_file_in_flush, file),
-                  sizeof(((struct st_file_in_flush *)NULL)->file),
-                  NULL, NULL, 0))
+        my_hash_init(&pagecache->files_in_flush, &my_charset_bin, 32,
+                     offsetof(struct st_file_in_flush, file),
+                     sizeof(((struct st_file_in_flush *)NULL)->file),
+                     NULL, NULL, 0))
       goto err;
     pagecache->inited= 1;
     pagecache->in_init= 0;
@@ -1129,7 +1129,7 @@ void end_pagecache(PAGECACHE *pagecache,
 
   if (cleanup)
   {
-    hash_free(&pagecache->files_in_flush);
+    my_hash_free(&pagecache->files_in_flush);
     pthread_mutex_destroy(&pagecache->cache_lock);
     pagecache->inited= pagecache->can_be_used= 0;
     PAGECACHE_DEBUG_CLOSE;
@@ -4359,8 +4359,8 @@ static int flush_pagecache_blocks_int(PA
     us_flusher.flush_queue.last_thread= NULL;
     us_flusher.first_in_switch= FALSE;
     while ((other_flusher= (struct st_file_in_flush *)
-            hash_search(&pagecache->files_in_flush, (uchar *)&file->file,
-                        sizeof(file->file))))
+            my_hash_search(&pagecache->files_in_flush, (uchar *)&file->file,
+                           sizeof(file->file))))
     {
       /*
         File is in flush already: wait, unless FLUSH_KEEP_LAZY. "Flusher"
@@ -4586,7 +4586,7 @@ restart:
     }
 #ifdef THREAD
     /* wake up others waiting to flush this file */
-    hash_delete(&pagecache->files_in_flush, (uchar *)&us_flusher);
+    my_hash_delete(&pagecache->files_in_flush, (uchar *)&us_flusher);
     if (us_flusher.flush_queue.last_thread)
       wqueue_release_queue(&us_flusher.flush_queue);
 #endif
@@ -4727,7 +4727,7 @@ my_bool pagecache_collect_changed_blocks
     struct st_file_in_flush *other_flusher;
     for (file_hash= 0;
          (other_flusher= (struct st_file_in_flush *)
-          hash_element(&pagecache->files_in_flush, file_hash)) != NULL &&
+          my_hash_element(&pagecache->files_in_flush, file_hash)) != NULL &&
            !other_flusher->first_in_switch;
          file_hash++)
     {}

=== modified file 'storage/maria/ma_recovery.c'
--- a/storage/maria/ma_recovery.c	2008-12-09 13:11:48 +0000
+++ b/storage/maria/ma_recovery.c	2009-01-27 02:08:48 +0000
@@ -435,7 +435,7 @@ err:
     delete_all_transactions();
 end:
   error_handler_hook= save_error_handler_hook;
-  hash_free(&all_dirty_pages);
+  my_hash_free(&all_dirty_pages);
   bzero(&all_dirty_pages, sizeof(all_dirty_pages));
   my_free(dirty_pages_pool, MYF(MY_ALLOW_ZERO_PTR));
   dirty_pages_pool= NULL;
@@ -2534,7 +2534,7 @@ static uint end_of_redo_phase(my_bool pr
   char llbuf[22];
   LSN addr;
 
-  hash_free(&all_dirty_pages);
+  my_hash_free(&all_dirty_pages);
   /*
     hash_free() can be called multiple times probably, but be safe if that
     changes
@@ -3043,10 +3043,10 @@ static LSN parse_checkpoint_record(LSN l
 
   ptr+= 8;
   tprint(tracef, "%lu dirty pages\n", (ulong) nb_dirty_pages);
-  if (hash_init(&all_dirty_pages, &my_charset_bin, (ulong)nb_dirty_pages,
-                offsetof(struct st_dirty_page, file_and_page_id),
-                sizeof(((struct st_dirty_page *)NULL)->file_and_page_id),
-                NULL, NULL, 0))
+  if (my_hash_init(&all_dirty_pages, &my_charset_bin, (ulong)nb_dirty_pages,
+                   offsetof(struct st_dirty_page, file_and_page_id),
+                   sizeof(((struct st_dirty_page *)NULL)->file_and_page_id),
+                   NULL, NULL, 0))
     return LSN_ERROR;
   dirty_pages_pool=
     (struct st_dirty_page *)my_malloc((size_t)nb_dirty_pages *

=== modified file 'storage/maria/ma_recovery_util.c'
--- a/storage/maria/ma_recovery_util.c	2008-01-29 21:20:59 +0000
+++ b/storage/maria/ma_recovery_util.c	2009-01-27 02:08:48 +0000
@@ -124,8 +124,8 @@ my_bool _ma_redo_not_needed_for_page(uin
     uint64 file_and_page_id=
       (((uint64)((index << 16) | shortid)) << 40) | page;
     struct st_dirty_page *dirty_page= (struct st_dirty_page *)
-      hash_search(&all_dirty_pages,
-                  (uchar *)&file_and_page_id, sizeof(file_and_page_id));
+      my_hash_search(&all_dirty_pages,
+                     (uchar *)&file_and_page_id, sizeof(file_and_page_id));
     DBUG_PRINT("info", ("in dirty pages list: %d", dirty_page != NULL));
     if ((dirty_page == NULL) ||
         cmp_translog_addr(lsn, dirty_page->rec_lsn) < 0)

=== modified file 'storage/maria/tablockman.c'
--- a/storage/maria/tablockman.c	2008-01-10 12:21:53 +0000
+++ b/storage/maria/tablockman.c	2009-01-27 13:04:31 +0000
@@ -225,13 +225,12 @@ struct st_table_lock {
   uchar  lock_type;
 };
 
-#define hash_insert my_hash_insert /* for consistency :) */
 
 static inline
 TABLE_LOCK *find_by_loid(LOCKED_TABLE *table, uint16 loid)
 {
-  return (TABLE_LOCK *)hash_search(& table->latest_locks,
-                                   (uchar *)& loid, sizeof(loid));
+  return (TABLE_LOCK *)my_hash_search(& table->latest_locks,
+                                      (uchar *)& loid, sizeof(loid));
 }
 
 static inline
@@ -485,8 +484,8 @@ tablockman_getlock(TABLOCKMAN *lm, TABLE
 
   /* update the latest_locks hash */
   if (old)
-    hash_delete(& table->latest_locks, (uchar *)old);
-  hash_insert(& table->latest_locks, (uchar *)new);
+    my_hash_delete(& table->latest_locks, (uchar *)old);
+  my_hash_insert(& table->latest_locks, (uchar *)new);
 
   new->upgraded_from= old;
 
@@ -569,7 +568,7 @@ void tablockman_release_locks(TABLOCKMAN
 
     /* TODO ? group locks by table to reduce the number of mutex locks */
     pthread_mutex_lock(mutex);
-    hash_delete(& cur->table->latest_locks, (uchar *)cur);
+    my_hash_delete(& cur->table->latest_locks, (uchar *)cur);
 
     if (cur->prev)
       cur->prev->next= cur->next;
@@ -632,9 +631,9 @@ void tablockman_init_locked_table(LOCKED
 {
   bzero(lt, sizeof(*lt));
   pthread_mutex_init(& lt->mutex, MY_MUTEX_INIT_FAST);
-  hash_init(& lt->latest_locks, & my_charset_bin, initial_hash_size,
-            offsetof(TABLE_LOCK, loid),
-            sizeof(((TABLE_LOCK*)0)->loid), 0, 0, 0);
+  my_hash_init(& lt->latest_locks, & my_charset_bin, initial_hash_size,
+               offsetof(TABLE_LOCK, loid),
+               sizeof(((TABLE_LOCK*)0)->loid), 0, 0, 0);
 }
 
 void tablockman_destroy_locked_table(LOCKED_TABLE *lt)
@@ -647,7 +646,7 @@ void tablockman_destroy_locked_table(LOC
   for (i= 0; i<LOCK_TYPES; i++)
      DBUG_ASSERT(lt->active_locks[i] == 0);
 
-  hash_free(& lt->latest_locks);
+  my_hash_free(& lt->latest_locks);
   pthread_mutex_destroy(& lt->mutex);
 }
 

=== modified file 'storage/myisam/ha_myisam.cc'
--- a/storage/myisam/ha_myisam.cc	2009-02-05 06:27:55 +0000
+++ b/storage/myisam/ha_myisam.cc	2009-02-11 12:11:20 +0000
@@ -2013,13 +2013,12 @@ int ha_myisam::multi_range_read_init(RAN
                                      uint n_ranges, uint mode, 
                                      HANDLER_BUFFER *buf)
 {
-  return ds_mrr.dsmrr_init(this, &table->key_info[active_index], 
-                           seq, seq_init_param, n_ranges, mode, buf);
+  return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf);
 }
 
 int ha_myisam::multi_range_read_next(char **range_info)
 {
-  return ds_mrr.dsmrr_next(this, range_info);
+  return ds_mrr.dsmrr_next(range_info);
 }
 
 ha_rows ha_myisam::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,

=== modified file 'storage/myisam/mi_create.c'
--- a/storage/myisam/mi_create.c	2008-11-12 15:23:22 +0000
+++ b/storage/myisam/mi_create.c	2009-01-31 16:21:19 +0000
@@ -651,8 +651,8 @@ int mi_create(const char *name,uint keys
     this would solve the problem.
  */
   DBUG_ASSERT((options & HA_OPTION_TMP_TABLE) || !mi_log_tables_physical ||
-              !hash_search(mi_log_tables_physical, filename,
-                           strlen(filename)));
+              !my_hash_search(mi_log_tables_physical, filename,
+                              strlen(filename)));
 
   if ((file= my_create_with_symlink(linkname_ptr, filename, 0, create_mode,
 				    MYF(MY_WME | create_flag))) < 0)

=== modified file 'storage/myisam/mi_log.c'
--- a/storage/myisam/mi_log.c	2008-07-09 07:12:43 +0000
+++ b/storage/myisam/mi_log.c	2009-01-27 02:08:48 +0000
@@ -667,7 +667,7 @@ static int mi_log_start_physical(const c
   int error;
   DBUG_ENTER("mi_log_start_physical");
   DBUG_ASSERT(log_filename != NULL);
-  DBUG_ASSERT(hash_inited(tables));
+  DBUG_ASSERT(my_hash_inited(tables));
 
   pthread_mutex_lock(&THR_LOCK_myisam);
   if (mi_log_tables_physical) /* physical logging already running */
@@ -690,8 +690,9 @@ static int mi_log_start_physical(const c
     MYISAM_SHARE *share= info->s;
     DBUG_PRINT("info",("table '%s' 0x%lx tested against hash",
                        share->unique_file_name, (ulong)info));
-    if (!hash_search(mi_log_tables_physical, (uchar *)share->unique_file_name,
-                     share->unique_name_length))
+    if (!my_hash_search(mi_log_tables_physical,
+                        (uchar *)share->unique_file_name,
+                        share->unique_name_length))
       continue;
     /* Backup kernel shouldn't ask for temporary table's backup */
     DBUG_ASSERT(!share->temporary);

=== modified file 'storage/myisam/mi_open.c'
--- a/storage/myisam/mi_open.c	2009-02-03 09:16:53 +0000
+++ b/storage/myisam/mi_open.c	2009-02-04 10:49:16 +0000
@@ -701,8 +701,8 @@ MI_INFO *mi_open(const char *name, int m
   thr_lock_data_init(&share->lock,&m_info->lock,(void*) m_info);
 #endif
   if (mi_log_tables_physical &&
-      hash_search(mi_log_tables_physical, (uchar *)share->unique_file_name,
-                  share->unique_name_length))
+      my_hash_search(mi_log_tables_physical, (uchar *)share->unique_file_name,
+                     share->unique_name_length))
     m_info->s->physical_logging= TRUE; /* set before publishing table */
   m_info->open_list.data=(void*) m_info;
   myisam_open_list=list_add(myisam_open_list,&m_info->open_list);

=== modified file 'storage/myisam/myisam_backup_engine.cc'
--- a/storage/myisam/myisam_backup_engine.cc	2009-01-21 15:00:23 +0000
+++ b/storage/myisam/myisam_backup_engine.cc	2009-02-04 10:49:16 +0000
@@ -466,7 +466,7 @@ Backup::~Backup()
   delete image;
   if (hash_of_tables)
   {
-    hash_free(hash_of_tables);
+    my_hash_free(hash_of_tables);
     delete hash_of_tables;
     hash_of_tables= NULL;
   }
@@ -542,9 +542,9 @@ result_t Backup::begin(const size_t)
   }
   hash_of_tables= new HASH;
   if (!hash_of_tables ||
-      hash_init(hash_of_tables, &my_charset_bin, m_tables.count(), 0, 0,
-                (hash_get_key)backup_get_table_from_hash_key,
-                (hash_free_key)backup_free_hash_key, 0))
+      my_hash_init(hash_of_tables, &my_charset_bin, m_tables.count(), 0, 0,
+                  (my_hash_get_key)backup_get_table_from_hash_key,
+                  (my_hash_free_key)backup_free_hash_key, 0))
     SET_STATE_TO_ERROR_AND_DBUG_RETURN;
   /* Build the hash of tables for the MyISAM layer (mi_backup_log.c etc) */
   for (uint n=0 ; n < m_tables.count() ; n++ )

=== modified file 'storage/ndb/include/mgmapi/mgmapi.h'
--- a/storage/ndb/include/mgmapi/mgmapi.h	2008-11-20 16:41:06 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi.h	2008-12-18 09:16:45 +0000
@@ -1191,23 +1191,23 @@ extern "C" {
      NDB_MGM_CLUSTERLOG_ALERT = 6,
      NDB_MGM_CLUSTERLOG_ALL = 7
   };
-  inline
+  static inline
   int ndb_mgm_filter_clusterlog(NdbMgmHandle h,
 				enum ndb_mgm_clusterlog_level s,
 				int e, struct ndb_mgm_reply* r)
   { return ndb_mgm_set_clusterlog_severity_filter(h,(enum ndb_mgm_event_severity)s,
 						  e,r); }
-  inline
+  static inline
   const unsigned int * ndb_mgm_get_logfilter(NdbMgmHandle h)
   { return ndb_mgm_get_clusterlog_severity_filter_old(h); }
 
-  inline
+  static inline
   int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle h, int n,
 				      enum ndb_mgm_event_category c,
 				      int l, struct ndb_mgm_reply* r)
   { return ndb_mgm_set_clusterlog_loglevel(h,n,c,l,r); }
 
-  inline
+  static inline
   const unsigned int * ndb_mgm_get_loglevel_clusterlog(NdbMgmHandle h)
   { return ndb_mgm_get_clusterlog_loglevel_old(h); }
 

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2008-05-29 13:09:49 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2008-12-18 09:16:45 +0000
@@ -191,9 +191,9 @@
 #define CFG_SCI_SEND_LIMIT            554
 #define CFG_SCI_BUFFER_MEM            555
 
-#define CFG_602                       602 // Removed: was OSE
-#define CFG_603                       603 // Removed: was OSE
-#define CFG_604                       604 // Removed: was OSE
+#define CFG_602                       602 /* Removed: was OSE */
+#define CFG_603                       603 /* Removed: was OSE */
+#define CFG_604                       604 /* Removed: was OSE */
 
 /**
  * API Config variables
@@ -220,6 +220,6 @@
 #define CONNECTION_TYPE_TCP           0
 #define CONNECTION_TYPE_SHM           1
 #define CONNECTION_TYPE_SCI           2
-#define CONNECTION_TYPE_OSE           3 // Removed.
+#define CONNECTION_TYPE_OSE           3 /* Removed. */
 
 #endif

=== modified file 'storage/ndb/include/mgmapi/mgmapi_error.h'
--- a/storage/ndb/include/mgmapi/mgmapi_error.h	2008-03-28 08:03:06 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_error.h	2008-12-18 09:16:45 +0000
@@ -81,40 +81,8 @@ extern "C" {
     enum ndb_mgm_error  code;
     const char *        msg;
   };
-  const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
-    { NDB_MGM_NO_ERROR, "No error" },
-
-    /* Request for service errors */
-    { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
-    { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
-    { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
-    { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
-    { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
-    { NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
-    { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
-    { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
-
-    /* Service errors - Start/Stop Node or System */
-    { NDB_MGM_START_FAILED, "Start failed" },
-    { NDB_MGM_STOP_FAILED, "Stop failed" },
-    { NDB_MGM_RESTART_FAILED, "Restart failed" },
-
-    /* Service errors - Backup */
-    { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
-    { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
-
-    /* Service errors - Single User Mode */
-    { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
-      "Could not enter single user mode" },
-    { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
-      "Could not exit single user mode" },
-
-    /* Usage errors */
-    { NDB_MGM_USAGE_ERROR,
-      "Usage error" }
-  };
-  const int ndb_mgm_noOfErrorMsgs =
-  sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
+  extern const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[];
+  extern const int ndb_mgm_noOfErrorMsgs;
 #endif
 
 #ifdef __cplusplus

=== modified file 'storage/ndb/include/mgmapi/ndb_logevent.h'
--- a/storage/ndb/include/mgmapi/ndb_logevent.h	2008-02-11 14:07:49 +0000
+++ b/storage/ndb/include/mgmapi/ndb_logevent.h	2008-12-18 09:16:45 +0000
@@ -393,6 +393,7 @@ extern "C" {
       } NDBStartCompleted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } STTORRYRecieved;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -437,6 +438,7 @@ extern "C" {
       } NDBStopForced;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } NDBStopAborted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -470,9 +472,11 @@ extern "C" {
       /* NODERESTART */
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } NR_CopyDict;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } NR_CopyDistr;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -518,12 +522,15 @@ extern "C" {
       } ArbitResult;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } GCP_TakeoverStarted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } GCP_TakeoverCompleted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } LCP_TakeoverStarted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -604,6 +611,7 @@ extern "C" {
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
 	/* TODO */
+        unsigned _todo;
       } WarningEvent;
 
       /* INFO */
@@ -618,6 +626,7 @@ extern "C" {
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
 	/* TODO */
+        unsigned _todo;
       } InfoEvent;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {

=== modified file 'storage/ndb/include/ndbapi/NdbScanOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbScanOperation.hpp	2008-11-08 20:40:15 +0000
+++ b/storage/ndb/include/ndbapi/NdbScanOperation.hpp	2009-02-09 13:28:30 +0000
@@ -54,6 +54,12 @@ public:
       each fragment, to get a single sorted result set.
     */
     SF_OrderBy = (1 << 24),
+    /**
+     * Same as order by, except that it will automatically 
+     *   add all key columns into the read-mask
+     */
+    SF_OrderByFull = (16 << 24),
+
     /* Index scan in descending order, instead of default ascending. */
     SF_Descending = (2 << 24),
     /*

=== modified file 'storage/ndb/include/util/Bitmask.hpp'
--- a/storage/ndb/include/util/Bitmask.hpp	2008-01-23 09:34:09 +0000
+++ b/storage/ndb/include/util/Bitmask.hpp	2008-12-18 08:41:41 +0000
@@ -159,9 +159,27 @@ public:
 		       unsigned pos, unsigned len, const Uint32 src[]);
   
   /**
+   * copyField - Copy bitfield from one position and length
+   * to another position and length.
+   * Undefined for overlapping bitfields
+   */
+  static void copyField(Uint32 dst[], unsigned destPos,
+                        const Uint32 src[], unsigned srcPos, unsigned len);
+  
+  /**
    * getText - Return as hex-digits (only for debug routines).
    */
   static char* getText(unsigned size, const Uint32 data[], char* buf);
+
+  /**
+   * Parse string with numbers format
+   *   1,2,3-5
+   * @return -1 if unparsable chars found, 
+   *         -2 str has number > bitmask size
+   *            else returns number of bits set 
+   */
+  static int parseMask(unsigned size, Uint32 data[], const char * str);
+
 private:
   static void getFieldImpl(const Uint32 data[], unsigned, unsigned, Uint32 []);
   static void setFieldImpl(Uint32 data[], unsigned, unsigned, const Uint32 []);
@@ -593,6 +611,9 @@ public:
    */
   static char* getText(const Uint32 data[], char* buf);
   char* getText(char* buf) const;
+
+  static int parseMask(Uint32 data[], const char * src);
+  int parseMask(const char * src);
 };
 
 template <unsigned size>
@@ -909,6 +930,21 @@ BitmaskPOD<size>::overlaps(BitmaskPOD<si
 }
 
 template <unsigned size>
+int
+BitmaskPOD<size>::parseMask(Uint32 data[], const char* buf)
+{
+  return BitmaskImpl::parseMask(size, data, buf);
+}
+
+template <unsigned size>
+inline
+int
+BitmaskPOD<size>::parseMask(const char* buf)
+{
+  return BitmaskPOD<size>::parseMask(rep.data, buf);
+}
+
+template <unsigned size>
 class Bitmask : public BitmaskPOD<size> {
 public:
   Bitmask() { this->clear();}
@@ -979,4 +1015,58 @@ BitmaskImpl::setField(unsigned size, Uin
   setFieldImpl(dst+1, used & 31, len-used, src+(used >> 5));
 }
 
+/* Three way min utiltiy for copyField below */
+inline unsigned minLength(unsigned a, unsigned b, unsigned c)
+{
+  return (a < b ? 
+          (a < c ? a : c) : 
+          (b < c ? b : c ));
+}
+
+inline void
+BitmaskImpl::copyField(Uint32 _dst[], unsigned dstPos,
+                       const Uint32 _src[], unsigned srcPos, unsigned len)
+{
+  /* Algorithm
+   * While (len > 0)
+   *  - Find the longest bit length we can copy from one 32-bit word
+   *    to another (which is the miniumum of remaining length, 
+   *    space in current src word and space in current dest word)
+   *  - Extract that many bits from src, and shift them to the correct
+   *    position to insert into dest
+   *  - Mask out the to-be-written words from dest (and any irrelevant 
+   *    words in src) and or them together
+   *  - Move onto next chunk
+   */
+  while (len > 0)
+  {
+    const Uint32* src= _src + (srcPos >> 5);
+    Uint32* dst= _dst + (dstPos >> 5);
+    unsigned srcOffset= srcPos & 31;
+    unsigned dstOffset= dstPos & 31;
+    unsigned srcBitsInWord= 32 - srcOffset; 
+    unsigned dstBitsInWord= 32 - dstOffset;
+    
+    /* How many bits can we copy at once? */
+    unsigned bits= minLength(dstBitsInWord, srcBitsInWord, len);
+    
+    /* Create mask for affected bits in dest */
+    Uint32 destMask= (~(Uint32)0 >> (32-bits) << dstOffset);
+    
+    /* Grab source data and shift to dest offset */
+    Uint32 data= ((*src) >> srcOffset) << dstOffset;
+    
+    /* Mask out affected bits in dest and irrelevant bits in source
+     * and combine
+     */
+    *dst= (*dst  & ~destMask) | (data & destMask);
+    
+    srcPos+= bits;
+    dstPos+= bits;
+    len-= bits;
+  }
+  
+  return;
+}
+
 #endif

=== modified file 'storage/ndb/src/common/portlib/NdbThread.c'
--- a/storage/ndb/src/common/portlib/NdbThread.c	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/common/portlib/NdbThread.c	2008-12-12 11:05:58 +0000
@@ -39,11 +39,10 @@ struct NdbThread 
 #ifdef NDB_SHM_TRANSPORTER
 void NdbThread_set_shm_sigmask(my_bool block)
 {
-  DBUG_ENTER("NdbThread_set_shm_sigmask");
   if (g_ndb_shm_signum)
   {
     sigset_t mask;
-    DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
+    // DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
     sigemptyset(&mask);
     sigaddset(&mask, g_ndb_shm_signum);
     if (block)
@@ -51,7 +50,7 @@ void NdbThread_set_shm_sigmask(my_bool b
     else
       pthread_sigmask(SIG_UNBLOCK, &mask, 0);
   }
-  DBUG_VOID_RETURN;
+  return;
 }
 #endif
 

=== modified file 'storage/ndb/src/common/util/Bitmask.cpp'
--- a/storage/ndb/src/common/util/Bitmask.cpp	2008-01-23 14:04:43 +0000
+++ b/storage/ndb/src/common/util/Bitmask.cpp	2008-12-18 08:41:41 +0000
@@ -115,3 +115,62 @@ BitmaskImpl::setFieldImpl(Uint32 dst[],
  * storage/ndb/test/ndbapi/testBitfield.cpp
  * to get coverage from automated testing
  */
+
+int
+BitmaskImpl::parseMask(unsigned size, Uint32 data[], const char * src)
+{
+  int cnt = 0;
+  BaseString tmp(src);
+  Vector<BaseString> list;
+  tmp.split(list, ",");
+  for (unsigned i = 0; i<list.size(); i++)
+  {
+    list[i].trim();
+    if (list[i].empty())
+      continue;
+    unsigned num = 0;
+    char * delim = (char*)strchr(list[i].c_str(), '-');
+    unsigned first = 0;
+    unsigned last = 0;
+    if (delim == 0)
+    {
+      int res = sscanf(list[i].c_str(), "%u", &first);
+      if (res != 1)
+      {
+        return -1;
+      }
+      last = first;
+    }
+    else
+    {
+      * delim = 0;
+      delim++;
+      int res0 = sscanf(list[i].c_str(), "%u", &first);
+      if (res0 != 1)
+      {
+        return -1;
+      }
+      int res1 = sscanf(delim, "%u", &last);
+      if (res1 != 1)
+      {
+        return -1;
+      }
+      if (first > last)
+      {
+        unsigned tmp = first;
+        first = last;
+        last = tmp;
+      }
+    }
+    
+    for (unsigned j = first; j<(last+1); j++)
+    {
+      if (j >= (size << 5))
+        return -2;
+
+      cnt++;
+      BitmaskImpl::set(size, data, j);
+    }
+  }
+  return cnt;
+}

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2009-01-29 10:56:52 +0000
@@ -1,4 +1,4 @@
-Next QMGR 937
+Next QMGR 938
 Next NDBCNTR 1002
 Next NDBFS 2000
 Next DBACC 3002

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2008-09-30 06:55:35 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2009-01-27 14:32:31 +0000
@@ -1076,20 +1076,24 @@ Backup::execBACKUP_REQ(Signal* signal)
   const Uint32 dataLen32 = req->backupDataLen; // In 32 bit words
   const Uint32 flags = signal->getLength() > 2 ? req->flags : 2;
 
-  if(getOwnNodeId() != getMasterNodeId()) {
+  if (getOwnNodeId() != getMasterNodeId())
+  {
     jam();
-    sendBackupRef(senderRef, flags, signal, senderData, BackupRef::IAmNotMaster);
+    sendBackupRef(senderRef, flags, signal, senderData,
+                  BackupRef::IAmNotMaster);
     return;
   }//if
 
   if (c_defaults.m_diskless)
   {
+    jam();
     sendBackupRef(senderRef, flags, signal, senderData, 
 		  BackupRef::CannotBackupDiskless);
     return;
   }
   
-  if(dataLen32 != 0) {
+  if (dataLen32 != 0)
+  {
     jam();
     sendBackupRef(senderRef, flags, signal, senderData, 
 		  BackupRef::BackupDefinitionNotImplemented);
@@ -1104,9 +1108,11 @@ Backup::execBACKUP_REQ(Signal* signal)
    */
   BackupRecordPtr ptr;
   c_backups.seize(ptr);
-  if(ptr.i == RNIL) {
+  if (ptr.i == RNIL)
+  {
     jam();
-    sendBackupRef(senderRef, flags, signal, senderData, BackupRef::OutOfBackupRecord);
+    sendBackupRef(senderRef, flags, signal, senderData,
+                  BackupRef::OutOfBackupRecord);
     return;
   }//if
 
@@ -1125,34 +1131,71 @@ Backup::execBACKUP_REQ(Signal* signal)
   ptr.p->backupDataLen = 0;
   ptr.p->masterData.errorCode = 0;
 
+  ptr.p->masterData.sequence.retriesLeft = 3;
+  sendUtilSequenceReq(signal, ptr);
+}
+
+void
+Backup::sendUtilSequenceReq(Signal* signal, BackupRecordPtr ptr, Uint32 delay)
+{
+  jam();
+
   UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
-    
   ptr.p->masterData.gsn = GSN_UTIL_SEQUENCE_REQ;
   utilReq->senderData  = ptr.i;
   utilReq->sequenceId  = NDB_BACKUP_SEQUENCE;
   utilReq->requestType = UtilSequenceReq::NextVal;
-  sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ, 
-	     signal, UtilSequenceReq::SignalLength, JBB);
+
+  if (delay == 0)
+  {
+    jam();
+    sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+               signal, UtilSequenceReq::SignalLength, JBB);
+  }
+  else
+  {
+    jam();
+    sendSignalWithDelay(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+                        signal, delay, UtilSequenceReq::SignalLength);
+  }
 }
 
 void
 Backup::execUTIL_SEQUENCE_REF(Signal* signal)
 {
-  BackupRecordPtr ptr LINT_SET_PTR;
   jamEntry();
+  BackupRecordPtr ptr LINT_SET_PTR;
   UtilSequenceRef * utilRef = (UtilSequenceRef*)signal->getDataPtr();
   ptr.i = utilRef->senderData;
   c_backupPool.getPtr(ptr);
   ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ);
+
+  if (utilRef->errorCode == UtilSequenceRef::TCError)
+  {
+    jam();
+    if (ptr.p->masterData.sequence.retriesLeft > 0)
+    {
+      jam();
+      infoEvent("BACKUP: retrying sequence on error %u",
+                utilRef->TCErrorCode);
+      ptr.p->masterData.sequence.retriesLeft--;
+      sendUtilSequenceReq(signal, ptr, 300);
+      return;
+    }
+  }
+  warningEvent("BACKUP: aborting due to sequence error (%u, %u)",
+               utilRef->errorCode,
+               utilRef->TCErrorCode);
+
   sendBackupRef(signal, ptr, BackupRef::SequenceFailure);
 }//execUTIL_SEQUENCE_REF()
 
-
 void
 Backup::sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode)
 {
   jam();
-  sendBackupRef(ptr.p->clientRef, ptr.p->flags, signal, ptr.p->clientData, errorCode);
+  sendBackupRef(ptr.p->clientRef, ptr.p->flags, signal,
+                ptr.p->clientData, errorCode);
   cleanup(signal, ptr);
 }
 
@@ -1163,6 +1206,7 @@ Backup::sendBackupRef(BlockReference sen
   jam();
   if (SEND_BACKUP_STARTED_FLAG(flags))
   {
+    jam();
     BackupRef* ref = (BackupRef*)signal->getDataPtrSend();
     ref->senderData = senderData;
     ref->errorCode = errorCode;
@@ -1170,7 +1214,9 @@ Backup::sendBackupRef(BlockReference sen
     sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB);
   }
 
-  if(errorCode != BackupRef::IAmNotMaster){
+  if (errorCode != BackupRef::IAmNotMaster)
+  {
+    jam();
     signal->theData[0] = NDB_LE_BackupFailedToStart;
     signal->theData[1] = senderRef;
     signal->theData[2] = errorCode;
@@ -2320,7 +2366,7 @@ Backup::stopBackupReply(Signal* signal, 
 
   sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupComplete);
   
-  if(!ptr.p->checkError())
+  if(!ptr.p->checkError() &&  ptr.p->masterData.errorCode == 0)
   {
     if (SEND_BACKUP_COMPLETED_FLAG(ptr.p->flags))
     {
@@ -4908,6 +4954,7 @@ Backup::execABORT_BACKUP_ORD(Signal* sig
   default:
 #endif
     ptr.p->setErrorCode(requestType);
+    ptr.p->masterData.errorCode = requestType;
     ok= true;
   }
   ndbrequire(ok);

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.hpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp	2008-04-07 10:15:36 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp	2009-01-27 16:11:49 +0000
@@ -501,6 +501,9 @@ public:
       SignalCounter sendCounter;
       Uint32 errorCode;
       union {
+        struct {
+          Uint32 retriesLeft;
+        } sequence;
 	struct {
 	  Uint32 startBackup;
 	} waitGCP;
@@ -667,6 +670,8 @@ public:
   void abort_scan(Signal*, BackupRecordPtr ptr);
   void removeBackup(Signal*, BackupRecordPtr ptr);
 
+  void sendUtilSequenceReq(Signal*, BackupRecordPtr ptr, Uint32 delay = 0);
+
   /*
     For periodic backup status reporting and explicit backup status reporting
   */

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2009-02-04 12:35:22 +0000
@@ -5328,6 +5328,12 @@ void Dbdih::startGcpMasterTakeOver(Signa
   signal->theData[0] = NDB_LE_GCP_TakeoverStarted;
   sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
 
+  /**
+   * save own value...
+   *   to be able to check values returned in MASTER_GCPCONF
+   */
+  m_gcp_save.m_master.m_new_gci = m_gcp_save.m_gci;
+
   setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER);
 }//Dbdih::handleNewMaster()
 
@@ -5628,6 +5634,17 @@ void Dbdih::execMASTER_GCPCONF(Signal* s
   ndbassert(ok); // Unhandled case...
 
   ok = false;
+  /**
+   * GCI should differ with atmost one
+   */
+  ndbrequire(saveGCI == m_gcp_save.m_gci ||
+             saveGCI == m_gcp_save.m_gci + 1 ||
+             saveGCI + 1 == m_gcp_save.m_gci);
+  if (saveGCI > m_gcp_save.m_master.m_new_gci)
+  {
+    jam();
+    m_gcp_save.m_master.m_new_gci = saveGCI;
+  }
   switch(saveState){
   case MasterGCPConf::GCP_SAVE_IDLE:
     jam();
@@ -5742,7 +5759,6 @@ void Dbdih::MASTER_GCPhandling(Signal* s
   else
   {
     ok = false;
-    m_gcp_save.m_master.m_new_gci = m_gcp_save.m_gci;
     switch(m_gcp_save.m_master.m_state){
     case GcpSave::GCP_SAVE_IDLE:
       jam();
@@ -8510,6 +8526,21 @@ void Dbdih::execGCP_NODEFINISH(Signal* s
     sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1);
     return;
   }
+  else if (ERROR_INSERTED(7216))
+  {
+    infoEvent("GCP_SAVE all/%u", c_error_insert_extra);
+    NodeRecordPtr nodePtr;
+    nodePtr.i = c_error_insert_extra;
+    ptrAss(nodePtr, nodeRecord);
+
+    removeAlive(nodePtr);
+    sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
+    insertAlive(nodePtr);
+    signal->theData[0] = 9999;
+    sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1);
+    c_GCP_SAVEREQ_Counter.setWaitingFor(c_error_insert_extra);
+    return;
+  }
 #endif
   
   sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
@@ -15489,8 +15520,21 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal
   }//if
   if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
     // Set time between LCP to min value
-    g_eventLogger->info("Set time between LCP to min value");
-    c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
+    if (signal->getLength() == 2)
+    {
+      Uint32 tmp;
+      const ndb_mgm_configuration_iterator * p = 
+	m_ctx.m_config.getOwnConfigIterator();
+      ndbrequire(p != 0);
+      ndb_mgm_get_int_parameter(p, CFG_DB_LCP_INTERVAL, &tmp);
+      g_eventLogger->info("Reset time between LCP to %u", tmp);
+      c_lcpState.clcpDelay = tmp;
+    }
+    else
+    {
+      g_eventLogger->info("Set time between LCP to min value");
+      c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
+    }
     return;
   }
   if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
@@ -15605,6 +15649,12 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal
     SET_ERROR_INSERT_VALUE2(7214, signal->theData[1]);
     return;
   }
+
+  DECLARE_DUMP0(DBDIH, 7216, "Set error 7216 with extra arg")
+  {
+    SET_ERROR_INSERT_VALUE2(7216, signal->theData[1]);
+    return;
+  }
 }//Dbdih::execDUMP_STATE_ORD()
 
 void

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2009-02-02 21:21:34 +0000
@@ -704,10 +704,12 @@ public:
      *       fragment operations on the fragment. 
      *       A maximum of four concurrently active is allowed.
      */
-    typedef Bitmask<4> ScanNumberMask;
+
+    typedef Bitmask<8> ScanNumberMask; // Max 255 KeyInfo20::ScanNo
     ScanNumberMask m_scanNumberMask;
     DLList<ScanRecord>::Head m_activeScans;
     DLFifoList<ScanRecord>::Head m_queuedScans;
+    DLFifoList<ScanRecord>::Head m_queuedTupScans;
 
     Uint16 srLqhLognode[4];
     /**

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2009-02-02 21:21:34 +0000
@@ -9832,6 +9832,7 @@ Uint32 Dblqh::initScanrec(const ScanFrag
   Uint32 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
   const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo);
   const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo);
+  const Uint32 accScan = (rangeScan == 0) && (tupScan == 0);
 
   scanptr.p->scanKeyinfoFlag = keyinfo;
   scanptr.p->scanLockHold = scanLockHold;
@@ -9847,12 +9848,7 @@ Uint32 Dblqh::initScanrec(const ScanFrag
   scanptr.p->m_max_batch_size_rows = max_rows;
   scanptr.p->m_max_batch_size_bytes = max_bytes;
 
-#if 0
-  if (! rangeScan)
-    tupScan = 1;
-#endif
-
-  if (! rangeScan && ! tupScan)
+  if (accScan)
     scanptr.p->scanBlockref = tcConnectptr.p->tcAccBlockref;
   else if (! tupScan)
     scanptr.p->scanBlockref = tcConnectptr.p->tcTuxBlockref;
@@ -9895,13 +9891,27 @@ Uint32 Dblqh::initScanrec(const ScanFrag
    * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1)  =  1-11
    *  idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
    */
-  tupScan = 0; // Make sure that close tup scan does not start acc scan incorrectly
-  Uint32 start = (rangeScan || tupScan) ? MAX_PARALLEL_SCANS_PER_FRAG : 1 ;
-  Uint32 stop = (rangeScan || tupScan) ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : 
-    MAX_PARALLEL_SCANS_PER_FRAG - 1;
-  stop += start;
+  Uint32 start, stop;
+  if (accScan)
+  {
+    start = 1;
+    stop = MAX_PARALLEL_SCANS_PER_FRAG - 1;
+  }
+  else if (rangeScan)
+  {
+    start = MAX_PARALLEL_SCANS_PER_FRAG;
+    stop = start + MAX_PARALLEL_INDEX_SCANS_PER_FRAG - 1;
+  }
+  else
+  {
+    ndbassert(tupScan);
+    start = MAX_PARALLEL_SCANS_PER_FRAG + MAX_PARALLEL_INDEX_SCANS_PER_FRAG;
+    stop = start + MAX_PARALLEL_INDEX_SCANS_PER_FRAG - 1;
+  }
+  ndbrequire((start < 32 * tFragPtr.p->m_scanNumberMask.Size) &&
+             (stop < 32 * tFragPtr.p->m_scanNumberMask.Size));
   Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
-    
+  
   if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
     jam();
     
@@ -9915,7 +9925,9 @@ Uint32 Dblqh::initScanrec(const ScanFrag
      */
     scanptr.p->scanState = ScanRecord::IN_QUEUE;
     LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
-				      fragptr.p->m_queuedScans);
+				      tupScan == 0 ? 
+                                      fragptr.p->m_queuedScans :
+                                      fragptr.p->m_queuedTupScans);
     queue.add(scanptr);
     return ZOK;
   }
@@ -9993,8 +10005,11 @@ void Dblqh::finishScanrec(Signal* signal
 {
   release_acc_ptr_list(scanptr.p);
 
+  Uint32 tupScan = scanptr.p->tupScan;
   LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
-				    fragptr.p->m_queuedScans);
+                                    tupScan == 0 ? 
+                                    fragptr.p->m_queuedScans :
+                                    fragptr.p->m_queuedTupScans);
   
   if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
     jam();

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2008-12-24 10:48:24 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2009-02-02 15:58:48 +0000
@@ -4064,13 +4064,13 @@ void Dbtc::sendtckeyconf(Signal* signal,
     tcKeyConf->apiConnectPtr = regApiPtr->ndbapiConnect;
     tcKeyConf->gci_hi = Uint32(regApiPtr->globalcheckpointid >> 32);
     Uint32* gci_lo = (Uint32*)&tcKeyConf->operations[TopWords >> 1];
-    * gci_lo = Uint32(regApiPtr->globalcheckpointid);
     tcKeyConf->confInfo = confInfo;
     tcKeyConf->transId1 = regApiPtr->transid[0];
     tcKeyConf->transId2 = regApiPtr->transid[1];
     copyFromToLen(&regApiPtr->tcSendArray[0],
 		  (UintR*)&tcKeyConf->operations,
 		  (UintR)ZTCOPCONF_SIZE);
+    * gci_lo = Uint32(regApiPtr->globalcheckpointid);
     sendSignal(regApiPtr->ndbapiBlockref,
 	       GSN_TCKEYCONF, signal, (TpacketLen - 1) + 1 /** gci_lo */, JBB);
     return;

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2007-10-25 09:00:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-01-27 14:32:31 +0000
@@ -1564,7 +1564,8 @@ DbUtil::execUTIL_SEQUENCE_REQ(Signal* si
     ndbrequire(opPtr.p->attrInfo.next(it));
     * it.data = 0;
   }
-  
+
+  transPtr.p->noOfRetries = 3;
   runTransaction(signal, transPtr);
 }
 
@@ -1673,6 +1674,7 @@ DbUtil::reportSequence(Signal* signal, c
   ret->sequenceId = transP->sequence.sequenceId;
   ret->requestType = transP->sequence.requestType;
   ret->errorCode = (Uint32)errCode;
+  ret->TCErrorCode = transP->errorCode;
   sendSignal(transP->clientRef, GSN_UTIL_SEQUENCE_REF, signal, 
 	     UtilSequenceRef::SignalLength, JBB);
 }

=== modified file 'storage/ndb/src/kernel/blocks/lgman.cpp'
--- a/storage/ndb/src/kernel/blocks/lgman.cpp	2008-04-22 19:36:05 +0000
+++ b/storage/ndb/src/kernel/blocks/lgman.cpp	2008-12-20 19:48:44 +0000
@@ -1380,11 +1380,13 @@ Lgman::flush_log(Signal* signal, Ptr<Log
 
       if (ptr.p->m_log_buffer_waiters.isEmpty() || ptr.p->m_outstanding_fs)
       {
+        jam();
 	force =  0;
       }
       
       if (force < 2)
       {
+        jam();
 	signal->theData[0] = LgmanContinueB::FLUSH_LOG;
 	signal->theData[1] = ptr.i;
 	signal->theData[2] = force + 1;
@@ -1394,6 +1396,7 @@ Lgman::flush_log(Signal* signal, Ptr<Log
       }
       else
       {
+        jam();
 	Buffer_idx pos= producer.m_current_pos;
 	GlobalPage *page = m_shared_page_pool.getPtr(pos.m_ptr_i);
 	
@@ -1417,7 +1420,7 @@ Lgman::flush_log(Signal* signal, Ptr<Log
 	ndbrequire(ptr.p->m_free_buffer_words > free);
 	ptr.p->m_free_file_words -= free;
 	ptr.p->m_free_buffer_words -= free;
-	
+         
 	validate_logfile_group(ptr, "force_log_flush");
 	
 	next_page(ptr.p, PRODUCER);
@@ -1438,17 +1441,25 @@ Lgman::flush_log(Signal* signal, Ptr<Log
   Uint32 tot= 0;
   while(!(consumer.m_current_page == producer.m_current_page) && !full)
   {
+    jam();
     validate_logfile_group(ptr, "before flush log");
 
     Uint32 cnt; // pages written
     Uint32 page= consumer.m_current_pos.m_ptr_i;
     if(consumer.m_current_page.m_ptr_i == producer.m_current_page.m_ptr_i)
     {
-      if(consumer.m_current_page.m_idx > producer.m_current_page.m_idx)
+      /**
+       * In same range
+       */
+      jam();
+
+      if(producer.m_current_pos.m_ptr_i > page)
       {
+        /**
+         * producer ahead of consumer in same chunk
+         */
 	jam();
-	Uint32 tmp= 
-	  consumer.m_current_page.m_idx - producer.m_current_page.m_idx;
+	Uint32 tmp= producer.m_current_pos.m_ptr_i - page;
 	cnt= write_log_pages(signal, ptr, page, tmp);
 	assert(cnt <= tmp);
 	
@@ -1458,8 +1469,9 @@ Lgman::flush_log(Signal* signal, Ptr<Log
       }
       else
       {
-	// Only 1 chunk
-	ndbrequire(ptr.p->m_buffer_pages.getSize() == 2); 
+        /**
+         * consumer ahead of producer in same chunk
+         */
 	Uint32 tmp= consumer.m_current_page.m_idx + 1;
 	cnt= write_log_pages(signal, ptr, page, tmp);
 	assert(cnt <= tmp);
@@ -1552,8 +1564,9 @@ Lgman::process_log_buffer_waiters(Signal
   bool removed= false;
   Ptr<Log_waiter> waiter;
   list.first(waiter);
+  Uint32 sz  = waiter.p->m_size;
   Uint32 logfile_group_id = ptr.p->m_logfile_group_id;
-  if(waiter.p->m_size + 2*File_formats::UNDO_PAGE_WORDS < free_buffer)
+  if(sz + 2*File_formats::UNDO_PAGE_WORDS < free_buffer)
   {
     removed= true;
     Uint32 block = waiter.p->m_block;
@@ -2058,7 +2071,6 @@ Logfile_client::add_entry(const Change* 
 	}
 	* (dst - 1) |= File_formats::Undofile::UNDO_NEXT_LSN << 16;
 	ptr.p->m_free_file_words += 2;
-	ptr.p->m_free_buffer_words += 2;
 	m_lgman->validate_logfile_group(ptr);
       }
       else

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2009-01-29 10:56:52 +0000
@@ -1010,6 +1010,13 @@ void Qmgr::execCM_REGCONF(Signal* signal
   c_start.m_gsn = GSN_CM_NODEINFOREQ;
   c_start.m_nodes = c_clusterNodes;
 
+  if (ERROR_INSERTED(937))
+  {
+    CLEAR_ERROR_INSERT_VALUE;
+    signal->theData[0] = 9999;
+    sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 500, 1);
+  }
+
   return;
 }//Qmgr::execCM_REGCONF()
 
@@ -2847,7 +2854,13 @@ void Qmgr::node_failed(Signal* signal, U
     jam();
     return;
   case ZSTARTING:
-    c_start.reset();
+    /**
+     * bug#42422
+     *   Force "real" failure handling
+     */
+    failedNodePtr.p->phase = ZRUNNING;
+    failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE);
+    return;
     // Fall-through
   default:
     jam();
@@ -3410,6 +3423,8 @@ void Qmgr::execPREP_FAILREQ(Signal* sign
   NodeRecPtr myNodePtr;
   jamEntry();
   
+  c_start.reset();
+  
   if (check_multi_node_shutdown(signal))
   {
     jam();

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2008-04-22 19:36:05 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2008-12-18 08:41:41 +0000
@@ -175,26 +175,18 @@ Configuration::init(int argc, char** arg
 
   if (_nowait_nodes)
   {
-    BaseString str(_nowait_nodes);
-    Vector<BaseString> arr;
-    str.split(arr, ",");
-    for (Uint32 i = 0; i<arr.size(); i++)
+    int res = g_nowait_nodes.parseMask(_nowait_nodes);
+    if(res == -2 || (res > 0 && g_nowait_nodes.get(0)))
     {
-      char *endptr = 0;
-      long val = strtol(arr[i].c_str(), &endptr, 10);
-      if (*endptr)
-      {
-	ndbout_c("Unable to parse nowait-nodes argument: %s : %s", 
-		 arr[i].c_str(), _nowait_nodes);
-	exit(-1);
-      }
-      if (! (val > 0 && val < MAX_NDB_NODES))
-      {
-	ndbout_c("Invalid nodeid specified in nowait-nodes: %ld : %s", 
-		 val, _nowait_nodes);
-	exit(-1);
-      }
-      g_nowait_nodes.set(val);
+      ndbout_c("Invalid nodeid specified in nowait-nodes: %s", 
+               _nowait_nodes);
+      exit(-1);
+    }
+    else if (res < 0)
+    {
+      ndbout_c("Unable to parse nowait-nodes argument: %s",
+               _nowait_nodes);
+      exit(-1);
     }
   }
 

=== modified file 'storage/ndb/src/mgmapi/Makefile.am'
--- a/storage/ndb/src/mgmapi/Makefile.am	2009-01-07 10:58:33 +0000
+++ b/storage/ndb/src/mgmapi/Makefile.am	2009-02-11 12:11:20 +0000
@@ -17,7 +17,8 @@ MYSQLCLUSTERdir=        .
 
 noinst_LTLIBRARIES = libmgmapi.la
 
-libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ConfigInfo.cpp
+libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ConfigInfo.cpp \
+                       mgmapi_error.c
 
 INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi
 

=== added file 'storage/ndb/src/mgmapi/mgmapi_error.c'
--- a/storage/ndb/src/mgmapi/mgmapi_error.c	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/mgmapi/mgmapi_error.c	2008-12-18 10:04:16 +0000
@@ -0,0 +1,53 @@
+ /* Copyright (C) 2003 MySQL AB
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+
+#include <mgmapi_error.h>
+
+const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = 
+{
+  { NDB_MGM_NO_ERROR, "No error" },
+  
+  /* Request for service errors */
+  { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
+  { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
+  { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
+  { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
+  { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
+  { NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
+  { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
+  { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
+  
+  /* Service errors - Start/Stop Node or System */
+  { NDB_MGM_START_FAILED, "Start failed" },
+  { NDB_MGM_STOP_FAILED, "Stop failed" },
+  { NDB_MGM_RESTART_FAILED, "Restart failed" },
+  
+  /* Service errors - Backup */
+  { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
+  { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
+  
+  /* Service errors - Single User Mode */
+  { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
+    "Could not enter single user mode" },
+  { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
+    "Could not exit single user mode" },
+  
+  /* Usage errors */
+  { NDB_MGM_USAGE_ERROR,
+    "Usage error" }
+};
+
+const int ndb_mgm_noOfErrorMsgs =
+  sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);

=== modified file 'storage/ndb/src/mgmapi/ndb_logevent.cpp'
--- a/storage/ndb/src/mgmapi/ndb_logevent.cpp	2008-01-24 11:21:39 +0000
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp	2009-01-14 10:47:05 +0000
@@ -19,8 +19,8 @@
 
 #include <NdbOut.hpp>
 #include <Properties.hpp>
-#include <socket_io.h>
 #include <InputStream.hpp>
+#include <NdbTick.h>
 
 #include <debugger/EventLogger.hpp>
 #include <kernel/NodeBitmask.hpp>
@@ -442,11 +442,15 @@ int ndb_logevent_get_next(const NdbLogEv
 
   SocketInputStream in(h->socket, timeout_in_milliseconds);
 
-  Properties p;
+  /*
+    Read log event header until header received
+    or timeout expired. The MGM server will continusly
+    send <PING>'s that should be ignored.
+  */
   char buf[256];
-
-  /* header */
-  while (1) {
+  NDB_TICKS start = NdbTick_CurrentMillisecond();
+  while(1)
+  {
     if (in.gets(buf,sizeof(buf)) == 0)
     {
       h->m_error= NDB_LEH_READ_ERROR;
@@ -466,9 +470,14 @@ int ndb_logevent_get_next(const NdbLogEv
 
     if(in.timedout())
         return 0;
-  }
 
-  /* read name-value pairs into properties object */
+    if ((NdbTick_CurrentMillisecond() - start) > timeout_in_milliseconds)
+      return 0;
+
+  };
+
+  /* Read name-value pairs until empty new line */
+  Properties p;
   while (1)
   {
     if (in.gets(buf,sizeof(buf)) == 0)

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2009-01-08 14:35:49 +0000
@@ -3046,22 +3046,28 @@ MgmtSrvr::getConnectionDbParameter(int n
   DBUG_RETURN(1);
 }
 
-void MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd)
+
+bool MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd)
 {
-  if (theFacade->get_registry()->connect_server(sockfd))
-  {
-    /**
-     * Force an update_connections() so that the
-     * ClusterMgr and TransporterFacade is up to date
-     * with the new connection.
-     * Important for correct node id reservation handling
-     */
-    NdbMutex_Lock(theFacade->theMutexPtr);
-    theFacade->get_registry()->update_connections();
-    NdbMutex_Unlock(theFacade->theMutexPtr);
-  }
+  DBUG_ENTER("MgmtSrvr::transporter_connect");
+  TransporterRegistry* tr= theFacade->get_registry();
+  if (!tr->connect_server(sockfd))
+    DBUG_RETURN(false);
+
+  /*
+    Force an update_connections() so that the
+    ClusterMgr and TransporterFacade is up to date
+    with the new connection.
+    Important for correct node id reservation handling
+  */
+  theFacade->lock_mutex();
+  tr->update_connections();
+  theFacade->unlock_mutex();
+
+  DBUG_RETURN(true);
 }
 
+
 int MgmtSrvr::connect_to_self(const char * bindaddress)
 {
   int r= 0;

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.hpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2008-03-14 13:32:49 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2009-01-08 14:35:49 +0000
@@ -444,7 +444,7 @@ public:
 
   int connect_to_self(const char* bindaddress = 0);
 
-  void transporter_connect(NDB_SOCKET_TYPE sockfd);
+  bool transporter_connect(NDB_SOCKET_TYPE sockfd);
 
   ConfigRetriever *get_config_retriever() { return m_config_retriever; };
 

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2009-01-23 11:03:00 +0000
@@ -294,7 +294,7 @@ struct PurgeStruct
 #define SLEEP_ERROR_INSERTED(x) if(ERROR_INSERTED(x)){NdbSleep_SecSleep(10);}
 
 MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock, Uint64 session_id)
-  : SocketServer::Session(sock), m_mgmsrv(mgm)
+  : SocketServer::Session(sock), m_mgmsrv(mgm), m_name("unknown:0")
 {
   DBUG_ENTER("MgmApiSession::MgmApiSession");
   m_input = new SocketInputStream(sock, 30000);
@@ -306,6 +306,13 @@ MgmApiSession::MgmApiSession(class MgmtS
   m_session_id= session_id;
   m_mutex= NdbMutex_Create();
   m_errorInsert= 0;
+
+  struct sockaddr_in addr;
+  SOCKET_SIZE_TYPE addrlen= sizeof(addr);
+  if (getpeername(sock, (struct sockaddr*)&addr, &addrlen) == 0)
+    m_name.assfmt("%s:%d", inet_ntoa(addr.sin_addr), ntohs(addr.sin_port));
+  DBUG_PRINT("info", ("new connection from: %s", m_name.c_str()));
+
   DBUG_VOID_RETURN;
 }
 
@@ -1674,11 +1681,28 @@ void
 MgmApiSession::transporter_connect(Parser_t::Context &ctx,
 				   Properties const &args)
 {
-  m_mgmsrv.transporter_connect(m_socket);
 
-  m_stop= true;
-  m_stopped= true; // force a stop (no closing socket)
-  m_socket= NDB_INVALID_SOCKET;   // so nobody closes it
+  if (!m_mgmsrv.transporter_connect(m_socket))
+  {
+    // Connection not allowed or failed
+    g_eventLogger->warning("Failed to convert connection "
+                           "from '%s' to transporter",
+                           name());
+
+    // Close the socket to indicate failure to other side
+  }
+  else
+  {
+    /*
+      Conversion to transporter suceeded
+      Stop this session thread and release resources
+      but don't close the socket, it's been taken over
+      by the transporter
+    */
+    m_socket= NDB_INVALID_SOCKET;   // so nobody closes it
+  }
+
+  m_stop= true; // Stop the session
 }
 
 void

=== modified file 'storage/ndb/src/mgmsrv/Services.hpp'
--- a/storage/ndb/src/mgmsrv/Services.hpp	2007-03-22 11:33:07 +0000
+++ b/storage/ndb/src/mgmsrv/Services.hpp	2009-01-08 14:35:49 +0000
@@ -48,6 +48,9 @@ private:
 
   int m_errorInsert;
 
+  BaseString m_name;
+  const char* name() { return m_name.c_str(); }
+
   const char *get_error_text(int err_no)
   { return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); }
 

=== modified file 'storage/ndb/src/ndbapi/ClusterMgr.cpp'
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp	2008-08-08 09:40:47 +0000
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp	2009-01-08 11:57:59 +0000
@@ -556,8 +556,7 @@ ClusterMgr::reportNodeFailed(NodeId node
     theFacade.ReportNodeDead(nodeId);
   }
   
-  theNode.nfCompleteRep = false;
-  if(noOfAliveNodes == 0)
+  if (noOfConnectedNodes == 0)
   {
     if (!global_flag_skip_invalidate_cache &&
         theFacade.m_globalDictCache)
@@ -568,6 +567,10 @@ ClusterMgr::reportNodeFailed(NodeId node
       m_connect_count ++;
       m_cluster_state = CS_waiting_for_clean_cache;
     }
+  }
+  theNode.nfCompleteRep = false;
+  if(noOfAliveNodes == 0)
+  {
     NFCompleteRep rep;
     for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
       if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){

=== modified file 'storage/ndb/src/ndbapi/ClusterMgr.hpp'
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp	2007-05-09 14:31:16 +0000
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp	2008-12-16 20:51:49 +0000
@@ -85,7 +85,6 @@ public:
   
   const Node &  getNodeInfo(NodeId) const;
   Uint32        getNoOfConnectedNodes() const;
-  bool          isClusterAlive() const;
   void          hb_received(NodeId);
 
   Uint32        m_connect_count;
@@ -145,11 +144,6 @@ ClusterMgr::getNoOfConnectedNodes() cons
 }
 
 inline
-bool
-ClusterMgr::isClusterAlive() const {
-  return noOfAliveNodes != 0;
-}
-inline
 void
 ClusterMgr::hb_received(NodeId nodeId) {
   theNodes[nodeId].m_info.m_heartbeat_cnt= 0;

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2008-09-25 10:55:39 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2009-01-08 11:57:59 +0000
@@ -1080,6 +1080,8 @@ NdbEventBuffer::NdbEventBuffer(Ndb *ndb)
   // initialize lists
   bzero(&g_empty_gci_container, sizeof(Gci_container));
   init_gci_containers();
+
+  m_alive_node_bit_mask.clear();
 }
 
 NdbEventBuffer::~NdbEventBuffer()
@@ -1836,11 +1838,16 @@ NdbEventBuffer::complete_bucket(Gci_cont
 
 void
 NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep,
-                                         Uint32 len)
+                                         Uint32 len, int complete_cluster_failure)
 {
-  if (unlikely(m_active_op_count == 0))
+  if (!complete_cluster_failure)
   {
-    return;
+    m_alive_node_bit_mask.set(refToNode(rep->senderRef));
+
+    if (unlikely(m_active_op_count == 0))
+    {
+      return;
+    }
   }
   
   DBUG_ENTER_EVENT("NdbEventBuffer::execSUB_GCP_COMPLETE_REP");
@@ -2089,13 +2096,15 @@ NdbEventBuffer::report_node_connected(Ui
 }
 
 void
-NdbEventBuffer::report_node_failure(Uint32 node_id)
+NdbEventBuffer::report_node_failure_completed(Uint32 node_id)
 {
+  m_alive_node_bit_mask.clear(node_id);
+
   NdbEventOperation* op= m_ndb->getEventOperation(0);
   if (op == 0)
     return;
 
-  DBUG_ENTER("NdbEventBuffer::report_node_failure");
+  DBUG_ENTER("NdbEventBuffer::report_node_failure_completed");
   SubTableData data;
   LinearSectionPtr ptr[3];
   bzero(&data, sizeof(data));
@@ -2120,20 +2129,17 @@ NdbEventBuffer::report_node_failure(Uint
    */
   // no need to lock()/unlock(), receive thread calls this
   insert_event(&op->m_impl, data, ptr, data.senderData);
-  DBUG_VOID_RETURN;
-}
 
-void
-NdbEventBuffer::completeClusterFailed()
-{
-  NdbEventOperation* op= m_ndb->getEventOperation(0);
-  if (op == 0)
-    return;
+  if (!m_alive_node_bit_mask.isclear())
+    DBUG_VOID_RETURN;
 
-  DBUG_ENTER("NdbEventBuffer::completeClusterFailed");
+  /*
+   * Cluster failure
+   */
 
+  DBUG_PRINT("info", ("Cluster failure"));
 
-  Uint64 gci = Uint64((m_latestGCI >> 32) + 1) << 32;
+  gci = Uint64((m_latestGCI >> 32) + 1) << 32;
   bool found = find_max_known_gci(&gci);
 
   Uint64 * array = m_known_gci.getBase();
@@ -2169,18 +2175,10 @@ NdbEventBuffer::completeClusterFailed()
   /**
    * Inject new event
    */
-  SubTableData data;
-  LinearSectionPtr ptr[3];
-  bzero(&data, sizeof(data));
-  bzero(ptr, sizeof(ptr));
-
   data.tableId = ~0;
   data.requestInfo = 0;
   SubTableData::setOperation(data.requestInfo,
 			     NdbDictionary::Event::_TE_CLUSTER_FAILURE);
-  data.flags = SubTableData::LOG;
-  data.gci_hi = Uint32(gci >> 32);
-  data.gci_lo = Uint32(gci);
 
   /**
    * Insert this event for each operation
@@ -2212,7 +2210,7 @@ NdbEventBuffer::completeClusterFailed()
   rep.gci_lo= gci & 0xFFFFFFFF;
   rep.gcp_complete_rep_count= cnt;
   rep.flags = 0;
-  execSUB_GCP_COMPLETE_REP(&rep, SubGcpCompleteRep::SignalLength);
+  execSUB_GCP_COMPLETE_REP(&rep, SubGcpCompleteRep::SignalLength, 1);
 
   DBUG_VOID_RETURN;
 }

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp	2008-02-11 13:24:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp	2008-12-16 20:51:49 +0000
@@ -424,8 +424,7 @@ public:
     and added to all event ops listed as active or pending delete
     in m_dropped_ev_op using insertDataL, includeing the blob
     event ops referenced by a regular event op.
-    - NdbEventBuffer::report_node_failure
-    - NdbEventBuffer::completeClusterFailed
+    - NdbEventBuffer::report_node_failure_completed
 
     TE_ACTIVE is sent from the kernel on initial execute/start of the
     event op, but is also internally generetad on node connect like
@@ -528,12 +527,12 @@ public:
   int insertDataL(NdbEventOperationImpl *op,
 		  const SubTableData * const sdata, Uint32 len,
 		  LinearSectionPtr ptr[3]);
-  void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const, Uint32 len);
+  void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const, Uint32 len,
+                                int complete_cluster_failure= 0);
   void complete_outof_order_gcis();
   
   void report_node_connected(Uint32 node_id);
-  void report_node_failure(Uint32 node_id);
-  void completeClusterFailed();
+  void report_node_failure_completed(Uint32 node_id);
 
   // used by user thread 
   Uint64 getLatestGCI();
@@ -664,6 +663,8 @@ private:
   void complete_bucket(Gci_container*);
   bool find_max_known_gci(Uint64 * res) const;
   void resize_known_gci();
+
+  Bitmask<(unsigned int)_NDB_NODE_BITMASK_SIZE> m_alive_node_bit_mask;
 };
 
 inline

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2009-02-10 08:24:37 +0000
@@ -788,7 +788,10 @@ NdbIndexScanOperation::scanIndexImpl(con
     return -1;
   }
 
-  if (scan_flags & NdbScanOperation::SF_OrderBy)
+  result_record->copyMask(m_read_mask, result_mask);
+
+  if (scan_flags & (NdbScanOperation::SF_OrderBy | 
+                    NdbScanOperation::SF_OrderByFull))
   {
     /**
      * For ordering, we need all keys in the result row.
@@ -796,19 +799,34 @@ NdbIndexScanOperation::scanIndexImpl(con
      * So for each key column, check that it is included in the result
      * NdbRecord.
      */
+#define MASKSZ ((NDB_MAX_ATTRIBUTES_IN_TABLE+31)>>5)
+    Uint32 keymask[MASKSZ];
+    BitmaskImpl::clear(MASKSZ, keymask);
+
     for (i = 0; i < key_record->key_index_length; i++)
     {
-      const NdbRecord::Attr *key_col =
-        &key_record->columns[key_record->key_indexes[i]];
-      if (key_col->attrId >= result_record->m_attrId_indexes_length ||
-          result_record->m_attrId_indexes[key_col->attrId] < 0)
+      Uint32 attrId = key_record->columns[key_record->key_indexes[i]].attrId;
+      if (attrId >= result_record->m_attrId_indexes_length ||
+          result_record->m_attrId_indexes[attrId] < 0)
       {
         setErrorCodeAbort(4292);
         return -1;
       }
+
+      BitmaskImpl::set(MASKSZ, keymask, attrId);
     }
-  }
 
+    if (scan_flags & NdbScanOperation::SF_OrderByFull)
+    {
+      BitmaskImpl::bitOR(MASKSZ, m_read_mask, keymask);
+    }
+    else if (!BitmaskImpl::contains(MASKSZ, m_read_mask, keymask))
+    {
+      setErrorCodeAbort(4341);
+      return -1;
+    }
+  }
+  
   if (!(key_record->flags & NdbRecord::RecIsIndex))
   {
     setErrorCodeAbort(4283);
@@ -833,8 +851,6 @@ NdbIndexScanOperation::scanIndexImpl(con
   if (res==-1)
     return -1;
 
-  result_record->copyMask(m_read_mask, result_mask);
-
   /* Fix theStatus as set in processIndexScanDefs(). */
   theStatus= NdbOperation::UseNdbRecord;
 
@@ -853,11 +869,8 @@ NdbIndexScanOperation::scanIndexImpl(con
       But cannot mask pseudo columns, nor key columns in ordered scans.
     */
     attrId= col->attrId;
-    if ( result_mask &&
-         !(attrId & AttributeHeader::PSEUDO) &&
-         !( (scan_flags & NdbScanOperation::SF_OrderBy) &&
-            (col->flags & NdbRecord::IsKey) ) &&
-         !(result_mask[attrId>>3] & (1<<(attrId & 7))) )
+    if ( !(attrId & AttributeHeader::PSEUDO) &&
+         !BitmaskImpl::get(MASKSZ, m_read_mask, attrId))
     {
       continue;
     }
@@ -1022,7 +1035,7 @@ NdbScanOperation::processTableScanDefs(N
     tupScan = false;
   }
   
-  if (rangeScan && (scan_flags & SF_OrderBy))
+  if (rangeScan && (scan_flags & (SF_OrderBy | SF_OrderByFull)))
     parallel = fragCount; // Note we assume fragcount of base table==
                           // fragcount of index.
   
@@ -1295,39 +1308,74 @@ NdbScanOperation::executeCursor(int node
    * Call finaliseScanOldApi() for old style scans before
    * proceeding
    */  
-  if (m_scanUsingOldApi &&
-      finaliseScanOldApi() == -1) 
-    return -1;
-
-  NdbTransaction * tCon = theNdbCon;
+  bool locked = false;
   TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
-  Guard guard(tp->theMutexPtr);
 
-  Uint32 seq = tCon->theNodeSequence;
+  int res = 0;
+  if (m_scanUsingOldApi && finaliseScanOldApi() == -1)
+  {
+    res = -1;
+    goto done;
+  }
 
-  if (tp->get_node_alive(nodeId) &&
-      (tp->getNodeSequence(nodeId) == seq)) {
+  {
+    locked = true;
+    NdbTransaction * tCon = theNdbCon;
+    NdbMutex_Lock(tp->theMutexPtr);
+    
+    Uint32 seq = tCon->theNodeSequence;
+    
+    if (tp->get_node_alive(nodeId) &&
+        (tp->getNodeSequence(nodeId) == seq)) {
+      
+      tCon->theMagicNumber = 0x37412619;
+      
+      if (doSendScan(nodeId) == -1)
+      {
+        res = -1;
+        goto done;
+      }
+      
+      m_executed= true; // Mark operation as executed
+    } 
+    else
+    {
+      if (!(tp->get_node_stopping(nodeId) &&
+            (tp->getNodeSequence(nodeId) == seq)))
+      {
+        TRACE_DEBUG("The node is hard dead when attempting to start a scan");
+        setErrorCode(4029);
+        tCon->theReleaseOnClose = true;
+      } 
+      else 
+      {
+        TRACE_DEBUG("The node is stopping when attempting to start a scan");
+        setErrorCode(4030);
+      }//if
+      res = -1;
+      tCon->theCommitStatus = NdbTransaction::Aborted;
+    }//if
+  }
 
-    tCon->theMagicNumber = 0x37412619;
+done:
+    /**
+   * Set pointers correctly
+   *   so that nextResult will handle it correctly
+   *   even if doSendScan was never called
+   *   bug#42454
+   */
+  m_curr_row = 0;
+  m_sent_receivers_count = theParallelism;
+  if(m_ordered)
+  {
+    m_current_api_receiver = theParallelism;
+    m_api_receivers_count = theParallelism;
+  }
 
-    if (doSendScan(nodeId) == -1)
-      return -1;
+  if (locked)
+    NdbMutex_Unlock(tp->theMutexPtr);
 
-    m_executed= true; // Mark operation as executed
-    return 0;
-  } else {
-    if (!(tp->get_node_stopping(nodeId) &&
-          (tp->getNodeSequence(nodeId) == seq))){
-      TRACE_DEBUG("The node is hard dead when attempting to start a scan");
-      setErrorCode(4029);
-      tCon->theReleaseOnClose = true;
-    } else {
-      TRACE_DEBUG("The node is stopping when attempting to start a scan");
-      setErrorCode(4030);
-    }//if
-    tCon->theCommitStatus = NdbTransaction::Aborted;
-  }//if
-  return -1;
+  return res;
 }
 
 
@@ -1791,7 +1839,7 @@ int NdbScanOperation::finaliseScanOldApi
      * don't
      */
     const unsigned char * resultMask= 
-      ((m_savedScanFlagsOldApi & SF_OrderBy) !=0) ? 
+      ((m_savedScanFlagsOldApi & (SF_OrderBy | SF_OrderByFull)) !=0) ? 
       m_accessTable->m_pkMask : 
       emptyMask;
 
@@ -2046,14 +2094,6 @@ NdbScanOperation::doSendScan(int aProces
   }    
   theStatus = WaitResponse;  
 
-  m_curr_row = 0;
-  m_sent_receivers_count = theParallelism;
-  if(m_ordered)
-  {
-    m_current_api_receiver = theParallelism;
-    m_api_receivers_count = theParallelism;
-  }
-  
   return tSignalCount;
 }//NdbOperation::doSendScan()
 
@@ -3004,7 +3044,7 @@ NdbIndexScanOperation::processIndexScanD
                                             Uint32 parallel,
                                             Uint32 batch)
 {
-  const bool order_by = scan_flags & SF_OrderBy;
+  const bool order_by = scan_flags & (SF_OrderBy | SF_OrderByFull);
   const bool order_desc = scan_flags & SF_Descending;
   const bool read_range_no = scan_flags & SF_ReadRangeNo;
   m_multi_range = scan_flags & SF_MultiRange;

=== modified file 'storage/ndb/src/ndbapi/Ndbif.cpp'
--- a/storage/ndb/src/ndbapi/Ndbif.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp	2008-12-16 20:51:49 +0000
@@ -269,13 +269,7 @@ Ndb::report_node_failure_completed(Uint3
   {
     // node failed
     // eventOperations in the ndb object should be notified
-    theEventBuffer->report_node_failure(node_id);
-    if(!theImpl->m_transporter_facade->theClusterMgr->isClusterAlive())
-    {
-      // cluster is unavailable, 
-      // eventOperations in the ndb object should be notified
-      theEventBuffer->completeClusterFailed();
-    }
+    theEventBuffer->report_node_failure_completed(node_id);
   }
   
   abortTransactionsAfterNodeFailure(node_id);

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.hpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.hpp	2008-05-16 13:08:36 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp	2009-01-29 16:24:04 +0000
@@ -323,7 +323,9 @@ TransporterFacade::unlock_mutex()
 inline
 unsigned Ndb_cluster_connection_impl::get_connect_count() const
 {
-  return m_transporter_facade->theClusterMgr->m_connect_count;
+  if (m_transporter_facade->theClusterMgr)
+    return m_transporter_facade->theClusterMgr->m_connect_count;
+  return 0;
 }
 
 inline
@@ -351,9 +353,12 @@ TransporterFacade::getNodeGrp(NodeId n) 
 inline
 bool
 TransporterFacade::get_node_alive(NodeId n) const {
-
-  const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n);
-  return node.m_alive;
+  if (theClusterMgr)
+  {
+    const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n);
+    return node.m_alive;
+  }
+  return 0;
 }
 
 inline

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2009-02-09 13:34:12 +0000
@@ -365,7 +365,7 @@ ErrorBundle ErrorCodes[] = {
   { 708,  DMEC, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
   { 709,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
   { 710,  DMEC, SE, "Internal: Get by table name not supported, use table id." },
-  { 721,  HA_ERR_TABLE_EXIST,   OE, "Table or index with given name already exists" },
+  { 721,  HA_ERR_TABLE_EXIST,   OE, "Schema object with given name already exists" },
   { 723,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
   { 736,  DMEC, SE, "Unsupported array size" },
   { 737,  HA_WRONG_CREATE_OPTION, SE, "Attribute array size too big" },
@@ -678,6 +678,7 @@ ErrorBundle ErrorCodes[] = {
   { 4290, DMEC, AE, "Missing column specification in NdbDictionary::RecordSpecification" },
   { 4291, DMEC, AE, "Duplicate column specification in NdbDictionary::RecordSpecification" },
   { 4292, DMEC, AE, "NdbRecord for tuple access is not an index key NdbRecord" },
+  { 4341, DMEC, AE, "Not all keys read when using option SF_OrderBy" },
   { 4293, DMEC, AE, "Error returned from application scanIndex() callback" },
   { 4294, DMEC, AE, "Scan filter is too large, discarded" },
   { 4295, DMEC, AE, "Column is NULL in Get/SetValueSpec structure" },
@@ -686,6 +687,7 @@ ErrorBundle ErrorCodes[] = {
   { 4298, DMEC, AE, "Invalid or unsupported ScanOptions structure" },
   { 4299, DMEC, AE, "Incorrect combination of ScanOption flags, extraGetValues ptr and numExtraGetValues" },
   { 2810, DMEC, TR, "No space left on the device" },
+  { 2811, DMEC, TR, "Error with file permissions, please check file system" },
   { 2815, DMEC, TR, "Error in reading files, please check file system" },
 
   { NO_CONTACT_WITH_PROCESS, DMEC, AE,

=== modified file 'storage/ndb/test/include/DbUtil.hpp'
--- a/storage/ndb/test/include/DbUtil.hpp	2008-03-03 15:10:42 +0000
+++ b/storage/ndb/test/include/DbUtil.hpp	2008-12-12 08:48:37 +0000
@@ -102,7 +102,7 @@ public:
   bool doQuery(BaseString& str, SqlResultSet& result);
   bool doQuery(BaseString& str, const Properties& args, SqlResultSet& result);
 
-  bool waitConnected(int timeout);
+  bool waitConnected(int timeout = 120);
 
   bool  databaseLogin(const char * host,
                       const char * user,

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2007-10-15 08:09:00 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2008-12-16 17:12:00 +0000
@@ -1796,9 +1796,7 @@ TESTCASE("InsertError2", "" ){
 }
 TESTCASE("Fill", 
 	 "Verify what happens when we fill the db" ){
-  INITIALIZER(runFillTable);
-  INITIALIZER(runPkRead);
-  FINALIZER(runClearTable2);
+  STEP(runFillTable);
 }
 TESTCASE("Bug25090", 
 	 "Verify what happens when we fill the db" ){

=== modified file 'storage/ndb/test/ndbapi/testMgm.cpp'
--- a/storage/ndb/test/ndbapi/testMgm.cpp	2007-06-13 12:54:00 +0000
+++ b/storage/ndb/test/ndbapi/testMgm.cpp	2009-01-14 10:47:05 +0000
@@ -787,6 +787,56 @@ done:
   return result;
 }
 
+// Enabled in 6.4
+#if NDB_VERSION_D > 60400
+int runTestBug40922(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbMgmd mgmd;
+
+  if (!mgmd.connect())
+    return NDBT_FAILED;
+
+  int filter[] = {
+    15, NDB_MGM_EVENT_CATEGORY_BACKUP,
+    1, NDB_MGM_EVENT_CATEGORY_STARTUP,
+    0
+  };
+  NdbLogEventHandle le_handle =
+    ndb_mgm_create_logevent_handle(mgmd.handle(), filter);
+  if (!le_handle)
+    return NDBT_FAILED;
+
+  g_info << "Calling ndb_log_event_get_next" << endl;
+
+  struct ndb_logevent le_event;
+  int r = ndb_logevent_get_next(le_handle,
+                                &le_event,
+                                2000);
+  g_info << "ndb_log_event_get_next returned " << r << endl;
+
+  int result = NDBT_FAILED;
+  if (r == 0)
+  {
+    // Got timeout
+    g_info << "ndb_logevent_get_next returned timeout" << endl;
+    result = NDBT_OK;
+  }
+  else
+  {
+    if(r>0)
+      g_err << "ERROR: Receieved unexpected event: "
+            << le_event.type << endl;
+    if(r<0)
+      g_err << "ERROR: ndb_logevent_get_next returned error: "
+            << r << endl;
+  }
+
+  ndb_mgm_destroy_logevent_handle(&le_handle);
+
+  return result;
+}
+#endif
+
 NDBT_TESTSUITE(testMgm);
 TESTCASE("SingleUserMode", 
 	 "Test single user mode"){
@@ -828,6 +878,14 @@ TESTCASE("ApiMgmStructEventTimeout",
   INITIALIZER(runTestMgmApiStructEventTimeout);
 
 }
+// Enabled in 6.4
+#if 0
+TESTCASE("Bug40922",
+	 "Make sure that ndb_logevent_get_next returns when "
+         "called with a timeout"){
+  INITIALIZER(runTestBug40922);
+}
+#endif
 NDBT_TESTSUITE_END(testMgm);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2009-01-30 10:41:42 +0000
@@ -281,7 +281,7 @@ int runRestarter(NDBT_Context* ctx, NDBT
     return NDBT_FAILED;
   }
   
-  loops *= restarter.getNumDbNodes();
+  loops *= (restarter.getNumDbNodes() > 4 ? 4 : restarter.getNumDbNodes());
   while(i<loops && result != NDBT_FAILED && !ctx->isTestStopped()){
 
     int id = lastId % restarter.getNumDbNodes();
@@ -2506,7 +2506,10 @@ runMNF(NDBT_Context* ctx, NDBT_Step* ste
     {
       for (int i = 0; i<cnt; i++)
       {
-        res.insertErrorInNode(nodes[i], 7180);
+        if (res.getNextMasterNodeId(master) == nodes[i])
+          res.insertErrorInNode(nodes[i], 7180);
+        else
+          res.insertErrorInNode(nodes[i], 7205);
       }
 
       int lcp = 7099;
@@ -3117,6 +3120,105 @@ runBug41295(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+int
+runBug41469(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 4)
+  {
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+
+  int loops = ctx->getNumLoops();
+
+  int val0[] = { 7216, 0 }; 
+  int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+  for (int i = 0; i<loops; i++)
+  {
+    int master = res.getMasterNodeId();
+    int next = res.getNextMasterNodeId(master);
+    
+    if (res.dumpStateOneNode(master, val2, 2))
+      return NDBT_FAILED;
+    
+    ndbout_c("stopping %u, err 7216 (next: %u)", master, next);
+    val0[1] = next;
+    if (res.dumpStateOneNode(master, val0, 2))
+      return NDBT_FAILED;
+    
+    res.waitNodesNoStart(&master, 1);
+    res.startNodes(&master, 1);
+    ndbout_c("waiting for cluster started");
+    if (res.waitClusterStarted())
+    {
+      return NDBT_FAILED;
+    }
+  }
+  ctx->stopTest();
+  return NDBT_OK;
+}
+
+int
+runBug42422(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter res;
+  
+  if (res.getNumDbNodes() < 4)
+  {
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+  
+  int loops = ctx->getNumLoops();
+  while (--loops >= 0)
+  {
+    int master = res.getMasterNodeId();
+    ndbout_c("master: %u", master);
+    int nodeId = res.getRandomNodeSameNodeGroup(master, rand()); 
+    ndbout_c("target: %u", nodeId);
+    int node2 = res.getRandomNodeOtherNodeGroup(nodeId, rand());
+    ndbout_c("node 2: %u", node2);
+    
+    res.restartOneDbNode(nodeId,
+                         /** initial */ false, 
+                         /** nostart */ true,
+                         /** abort   */ true);
+    
+    res.waitNodesNoStart(&nodeId, 1);
+    
+    int dump[] = { 9000, 0 };
+    dump[1] = node2;
+    
+    if (res.dumpStateOneNode(nodeId, dump, 2))
+      return NDBT_FAILED;
+    
+    int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+    if (res.dumpStateOneNode(nodeId, val2, 2))
+      return NDBT_FAILED;
+    
+    res.insertErrorInNode(nodeId, 937);
+    ndbout_c("%u : starting %u", __LINE__, nodeId);
+    res.startNodes(&nodeId, 1);
+    NdbSleep_SecSleep(3);
+    ndbout_c("%u : waiting for %u to not get not-started", __LINE__, nodeId);
+    res.waitNodesNoStart(&nodeId, 1);
+    
+    ndbout_c("%u : starting %u", __LINE__, nodeId);
+    res.startNodes(&nodeId, 1);
+    
+    ndbout_c("%u : waiting for cluster started", __LINE__);
+    if (res.waitClusterStarted())
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ctx->stopTest();
+  return NDBT_OK;
+}
+
 NDBT_TESTSUITE(testNodeRestart);
 TESTCASE("NoLoad", 
 	 "Test that one node at a time can be stopped and then restarted "\
@@ -3564,6 +3666,15 @@ TESTCASE("Bug41295", "")
   STEP(runBug41295);
   FINALIZER(runClearTable);
 }
+TESTCASE("Bug41469", ""){
+  INITIALIZER(runLoadTable);
+  STEP(runBug41469);
+  STEP(runScanUpdateUntilStopped);
+  FINALIZER(runClearTable);
+}
+TESTCASE("Bug42422", ""){
+  INITIALIZER(runBug42422);
+}
 NDBT_TESTSUITE_END(testNodeRestart);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testScan.cpp'
--- a/storage/ndb/test/ndbapi/testScan.cpp	2008-04-28 14:17:28 +0000
+++ b/storage/ndb/test/ndbapi/testScan.cpp	2009-02-04 12:35:22 +0000
@@ -1235,6 +1235,96 @@ runBug24447(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+int runBug42545(NDBT_Context* ctx, NDBT_Step* step){
+
+  int loops = ctx->getNumLoops();
+
+  Ndb* pNdb = GETNDB(step);
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 2)
+  {
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+
+  const NdbDictionary::Index * pIdx = 
+    GETNDB(step)->getDictionary()->getIndex(orderedPkIdxName, 
+					    ctx->getTab()->getName());
+  
+
+  int i = 0;
+  while (pIdx && i++ < loops && !ctx->isTestStopped()) 
+  {
+    g_info << i << ": ";
+    NdbTransaction* pTrans = pNdb->startTransaction();
+    int nodeId = pTrans->getConnectedNodeId();
+    
+    {
+      Uint32 cnt = 0;
+      Vector<NdbTransaction*> translist;
+      while (cnt < 3)
+      {
+        NdbTransaction* p2 = pNdb->startTransaction();
+        translist.push_back(p2);
+        if (p2->getConnectedNodeId() == (Uint32)nodeId)
+          cnt++;
+      }
+      
+      for (size_t t = 0; t < translist.size(); t++)
+        translist[t]->close();
+      translist.clear();
+    }
+
+    NdbIndexScanOperation* 
+      pOp = pTrans->getNdbIndexScanOperation(pIdx, ctx->getTab());
+    
+    int r0 = pOp->readTuples(NdbOperation::LM_CommittedRead,
+                             NdbScanOperation::SF_OrderBy);
+
+    ndbout << "Restart node " << nodeId << endl; 
+    res.restartOneDbNode(nodeId,
+                         /** initial */ false, 
+                         /** nostart */ true,
+                         /** abort   */ true);
+    
+    res.waitNodesNoStart(&nodeId, 1);
+    res.startNodes(&nodeId, 1);
+    res.waitNodesStarted(&nodeId, 1);
+
+    int r1 = pTrans->execute(NdbTransaction::NoCommit);
+
+    int r2;
+    while ((r2 = pOp->nextResult()) == 0);
+
+    ndbout_c("r0: %d r1: %d r2: %d", r0, r1, r2);
+
+    pTrans->close();
+  }
+  
+  return NDBT_OK;
+}
+
+int
+initBug42559(NDBT_Context* ctx, NDBT_Step* step){
+  
+  int dump[] = { 7017  }; // Max LCP speed
+  NdbRestarter res;
+  res.dumpStateAllNodes(dump, 1);
+
+  return NDBT_OK;
+}
+int
+finalizeBug42559(NDBT_Context* ctx, NDBT_Step* step){
+  
+  int dump[] = { 7017, 1  }; // Restore config value
+  NdbRestarter res;
+  res.dumpStateAllNodes(dump, 2);
+
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testScan);
 TESTCASE("ScanRead", 
 	 "Verify scan requirement: It should be possible "\
@@ -1725,6 +1815,24 @@ TESTCASE("Bug36124",
   STEP(runBug36124);
   FINALIZER(runClearTable);
 }
+TESTCASE("Bug42545", "")
+{
+  INITIALIZER(createOrderedPkIndex);
+  INITIALIZER(runLoadTable);
+  STEP(runBug42545);
+  FINALIZER(createOrderedPkIndex_Drop);
+  FINALIZER(runClearTable);
+}
+TESTCASE("Bug42559", "") 
+{
+  INITIALIZER(initBug42559);
+  INITIALIZER(createOrderedPkIndex);
+  INITIALIZER(runLoadTable);
+  STEPS(runScanReadIndex, 70);
+  FINALIZER(createOrderedPkIndex_Drop);
+  FINALIZER(finalizeBug42559);
+  FINALIZER(runClearTable);
+}
 NDBT_TESTSUITE_END(testScan);
 
 int main(int argc, const char** argv){
@@ -1734,3 +1842,4 @@ int main(int argc, const char** argv){
 }
 
 template class Vector<Attrib*>;
+template class Vector<NdbTransaction*>;

=== modified file 'storage/ndb/test/ndbapi/testUpgrade.cpp'
--- a/storage/ndb/test/ndbapi/testUpgrade.cpp	2008-02-21 13:57:42 +0000
+++ b/storage/ndb/test/ndbapi/testUpgrade.cpp	2008-12-12 09:40:06 +0000
@@ -45,7 +45,7 @@ int runUpgrade_NR1(NDBT_Context* ctx, ND
     g_err << "Cluster '" << clusters.column("name")
           << "@" << tmp_result.column("connectstring") << "'" << endl;
 
-    if (restarter.waitClusterStarted(1))
+    if (restarter.waitClusterStarted())
       return NDBT_FAILED;
 
     // Restart ndb_mgmd(s)
@@ -65,7 +65,7 @@ int runUpgrade_NR1(NDBT_Context* ctx, ND
     }
 
     ndbout << "Waiting for started"<< endl;
-    if (restarter.waitClusterStarted(1))
+    if (restarter.waitClusterStarted())
       return NDBT_FAILED;
     ndbout << "Started"<< endl;
 
@@ -126,7 +126,7 @@ int runUpgrade_NR2(NDBT_Context* ctx, ND
     g_err << "Cluster '" << clusters.column("name")
           << "@" << tmp_result.column("connectstring") << "'" << endl;
 
-    if(restarter.waitClusterStarted(1))
+    if(restarter.waitClusterStarted())
       return NDBT_FAILED;
 
     // Restart ndb_mgmd(s)
@@ -144,6 +144,8 @@ int runUpgrade_NR2(NDBT_Context* ctx, ND
         return NDBT_FAILED;
     }
 
+    NdbSleep_SecSleep(5); // TODO, handle arbitration
+
     // Restart one ndbd in each node group
     SqlResultSet ndbds;
     if (!atrt.getNdbds(clusterId, ndbds))
@@ -239,7 +241,7 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
     g_err << "Cluster '" << clusters.column("name")
           << "@" << tmp_result.column("connectstring") << "'" << endl;
 
-    if(restarter.waitClusterStarted(1))
+    if(restarter.waitClusterStarted())
       return NDBT_FAILED;
 
     // Restart ndb_mgmd(s)
@@ -257,6 +259,8 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
         return NDBT_FAILED;
     }
 
+    NdbSleep_SecSleep(5); // TODO, handle arbitration
+
     // Restart one ndbd in each node group
     SqlResultSet ndbds;
     if (!atrt.getNdbds(clusterId, ndbds))
@@ -338,14 +342,14 @@ int runCheckStarted(NDBT_Context* ctx, N
 
   // Check cluster is started
   NdbRestarter restarter;
-  if(restarter.waitClusterStarted(1) != 0){
+  if(restarter.waitClusterStarted() != 0){
     g_err << "All nodes was not started " << endl;
     return NDBT_FAILED;
   }
 
   // Check atrtclient is started
   AtrtClient atrt;
-  if(!atrt.waitConnected(60)){
+  if(!atrt.waitConnected()){
     g_err << "atrt server was not started " << endl;
     return NDBT_FAILED;
   }
@@ -357,7 +361,7 @@ int runCheckStarted(NDBT_Context* ctx, N
 
   while (procs.next())
   {
-    if (procs.columnAsInt("node_id") == -1){
+    if (procs.columnAsInt("node_id") == (unsigned)-1){
       ndbout << "Found one process with node_id -1, "
              << "use --fix-nodeid=1 to atrt to fix this" << endl;
       return NDBT_FAILED;
@@ -367,118 +371,21 @@ int runCheckStarted(NDBT_Context* ctx, N
   return NDBT_OK;
 }
 
-
-int runRestoreProcs(NDBT_Context* ctx, NDBT_Step* step){
-  AtrtClient atrt;
-  g_err << "Starting to reset..." << endl;
-
-  SqlResultSet clusters;
-  if (!atrt.getClusters(clusters))
-    return NDBT_FAILED;
-
-  while (clusters.next())
-  {
-    uint clusterId= clusters.columnAsInt("id");
-    SqlResultSet tmp_result;
-    if (!atrt.getConnectString(clusterId, tmp_result))
-      return NDBT_FAILED;
-
-    NdbRestarter restarter(tmp_result.column("connectstring"));
-    restarter.setReconnect(true); // Restarting mgmd
-    g_err << "Cluster '" << clusters.column("name")
-          << "@" << tmp_result.column("connectstring") << "'" << endl;
-
-    if(restarter.waitClusterStarted(1))
-      return NDBT_FAILED;
-
-    // Reset ndb_mgmd(s)
-    SqlResultSet mgmds;
-    if (!atrt.getMgmds(clusterId, mgmds))
-      return NDBT_FAILED;
-
-    while (mgmds.next())
-    {
-      ndbout << "Reset mgmd" << mgmds.columnAsInt("node_id") << endl;
-      if (!atrt.resetProc(mgmds.columnAsInt("id")))
-        return NDBT_FAILED;
-
-      if(restarter.waitConnected() != 0)
-        return NDBT_FAILED;
-    }
-
-    if(restarter.waitClusterStarted(1))
-      return NDBT_FAILED;
-
-    // Reset ndbd(s)
-    SqlResultSet ndbds;
-    if (!atrt.getNdbds(clusterId, ndbds))
-      return NDBT_FAILED;
-
-    while(ndbds.next())
-    {
-      int nodeId = ndbds.columnAsInt("node_id");
-      int processId = ndbds.columnAsInt("id");
-      ndbout << "Reset node " << nodeId << endl;
-
-      if (!atrt.resetProc(processId))
-        return NDBT_FAILED;
-
-    }
-
-    if (restarter.waitClusterNoStart())
-      return NDBT_FAILED;
-
-  }
-
-
-  // All nodes are in no start, start them up again
-  clusters.reset();
-  while (clusters.next())
-  {
-    uint clusterId= clusters.columnAsInt("id");
-    SqlResultSet tmp_result;
-    if (!atrt.getConnectString(clusterId, tmp_result))
-      return NDBT_FAILED;
-
-    NdbRestarter restarter(tmp_result.column("connectstring"));
-    g_err << "Cluster '" << clusters.column("name")
-          << "@" << tmp_result.column("connectstring") << "'" << endl;
-
-    if (restarter.waitClusterNoStart())
-      return NDBT_FAILED;
-
-    ndbout << "Starting and wait for started..." << endl;
-    if (restarter.startAll())
-      return NDBT_FAILED;
-
-    if (restarter.waitClusterStarted())
-      return NDBT_FAILED;
-  }
-
-  ctx->stopTest();
-  return NDBT_OK;
-}
-
-
-
 NDBT_TESTSUITE(testUpgrade);
 TESTCASE("Upgrade_NR1",
 	 "Test that one node at a time can be upgraded"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR1);
-  FINALIZER(runRestoreProcs);
 }
 TESTCASE("Upgrade_NR2",
 	 "Test that one node in each nodegroup can be upgradde simultaneously"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR2);
-  FINALIZER(runRestoreProcs);
 }
 TESTCASE("Upgrade_NR3",
 	 "Test that one node in each nodegroup can be upgrade simultaneously"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR3);
-  FINALIZER(runRestoreProcs);
 }
 NDBT_TESTSUITE_END(testUpgrade);
 

=== modified file 'storage/ndb/test/run-test/Makefile.am'
--- a/storage/ndb/test/run-test/Makefile.am	2009-01-07 10:58:33 +0000
+++ b/storage/ndb/test/run-test/Makefile.am	2009-02-11 12:11:20 +0000
@@ -23,7 +23,8 @@ include $(top_srcdir)/storage/ndb/config
 test_PROGRAMS = atrt
 test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
           conf-ndbmaster.cnf \
-          conf-dl145a.cnf test-tests.txt conf-test.cnf db.sql
+          conf-dl145a.cnf test-tests.txt conf-test.cnf db.sql \
+          conf-upgrade.cnf upgrade-tests.txt
 
 test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
           atrt-clear-result.sh autotest-run.sh atrt-backtrace.sh

=== modified file 'storage/ndb/test/run-test/atrt-gather-result.sh'
--- a/storage/ndb/test/run-test/atrt-gather-result.sh	2007-02-16 20:09:38 +0000
+++ b/storage/ndb/test/run-test/atrt-gather-result.sh	2008-12-12 09:40:06 +0000
@@ -12,5 +12,18 @@ do
   shift
 done
 
-
-
+#
+# clean tables...not to make results too large
+#
+lst=$(find . -name '*.frm')
+if [ "$lst" ]
+then
+    for i in $lst
+    do
+	basename=$(echo $i | sed 's!\.frm!!')
+	if [ "$basename" ]
+	then
+	    rm -f $basename.*
+	fi
+    done
+fi

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2008-12-17 18:40:14 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2009-01-08 11:57:59 +0000
@@ -149,10 +149,17 @@ bool setup_hosts(atrt_config&);
 
 bool do_command(atrt_config& config);
 
-bool
-start_process(atrt_process & proc);
-bool
-stop_process(atrt_process & proc);
+bool start_process(atrt_process & proc);
+bool stop_process(atrt_process & proc);
+
+/**
+ * check configuration if any changes has been 
+ *   done for the duration of the latest running test
+ *   if so, return true, and reset those changes
+ *   (true, indicates that a restart is needed to actually
+ *    reset the running processes)
+ */
+bool reset_config(atrt_config&);
 
 NdbOut&
 operator<<(NdbOut& out, const atrt_process& proc);

=== modified file 'storage/ndb/test/run-test/autotest-boot.sh'
--- a/storage/ndb/test/run-test/autotest-boot.sh	2008-12-10 15:18:06 +0000
+++ b/storage/ndb/test/run-test/autotest-boot.sh	2008-12-16 17:12:00 +0000
@@ -166,9 +166,9 @@ fi
 
 if [ -z "$tag1" ]
 then
-    dst_place1=${build_dir}/clone-$clone1-$DATE.$$
+    dst_place1=${build_dir}/clone1-$clone1-$DATE.$$
 else
-    dst_place1=${build_dir}/clone-$tag1-$DATE.$$
+    dst_place1=${build_dir}/clone1-$tag1-$DATE.$$
     extra_args="$extra_args --clone1=$tag1"
     extra_clone1="-r$tag1"
 fi
@@ -226,7 +226,7 @@ if [ "$build" ]
 then
     rm -rf $dst_place0
 
-    if [ "$dst_place1" ]
+    if [ "$clone1" ]
     then
 	rm -rf $dst_place1
     fi

=== modified file 'storage/ndb/test/run-test/autotest-run.sh'
--- a/storage/ndb/test/run-test/autotest-run.sh	2008-12-10 15:18:06 +0000
+++ b/storage/ndb/test/run-test/autotest-run.sh	2008-12-12 11:09:03 +0000
@@ -213,8 +213,9 @@ choose_conf(){
 #########################################
 
 count_hosts(){
-    cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \
-    if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l`
+    ch="CHOOSE_host"
+    cnt=$(for i in `grep $ch $1 | sed 's!,! !g'` ; do echo $i; done\
+          | grep $ch | sort | uniq | wc -l)
     echo $cnt
 }
 
@@ -247,18 +248,22 @@ cd $run_dir
 choose $conf $hosts > d.tmp.$$
 sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf
 
+prefix="--prefix=$install_dir0"
+if [ "$install_dir1" ]
+then
+    prefix="$prefix --prefix1=$install_dir1"
+fi
+
+
 # Setup configuration
-$atrt Cdq my.cnf
+$atrt Cdq $prefix my.cnf
 
 # Start...
 args=""
 args="--report-file=report.txt"
 args="$args --log-file=log.txt"
 args="$args --testcase-file=$test_dir/$RUN-tests.txt"
-if [ "$install_dir1" ]
-then
-    args="$args --prefix=$install_dir0 --prefix1=$install_dir1"
-fi
+args="$args $prefix"
 $atrt $args my.cnf
 
 # Make tar-ball

=== modified file 'storage/ndb/test/run-test/command.cpp'
--- a/storage/ndb/test/run-test/command.cpp	2008-11-27 18:03:09 +0000
+++ b/storage/ndb/test/run-test/command.cpp	2008-12-12 08:48:37 +0000
@@ -83,10 +83,12 @@ do_change_version(atrt_config& config, S
   atrt_process& proc= *config.m_processes[process_id];
 
   // Save current proc state
-  assert(proc.m_save.m_saved == false);
-  proc.m_save.m_proc= proc.m_proc;
-  proc.m_save.m_saved= true;
-
+  if (proc.m_save.m_saved == false)
+  {
+    proc.m_save.m_proc= proc.m_proc;
+    proc.m_save.m_saved= true;
+  }
+  
   g_logger.info("stopping process...");
   if (!stop_process(proc))
     return false;
@@ -145,7 +147,7 @@ do_reset_proc(atrt_config& config, SqlRe
   {
     ndbout << "process has not changed" << endl;
   }
-
+  
   g_logger.info("starting process...");
   if (!start_process(proc))
     return false;

=== added file 'storage/ndb/test/run-test/conf-upgrade.cnf'
--- a/storage/ndb/test/run-test/conf-upgrade.cnf	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/conf-upgrade.cnf	2008-12-15 20:06:12 +0000
@@ -0,0 +1,32 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+mysqld = CHOOSE_host1
+fix-nodeid=1
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+loose-skip-bdb
+socket=mysql.sock
+
+[client]
+protocol=tcp
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1,CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 50M 
+DataMemory = 100M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
+NoOfFragmentLogFiles = 4
+FragmentLogFileSize = 64M
+

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2009-02-04 13:08:05 +0000
@@ -309,6 +309,10 @@ max-time: 500
 cmd: testScan
 args: -n ScanRead488O -l 10 T6 D1 D2 
 
+max-time: 500
+cmd: testScan
+args: -n Bug42559 T6 D1 D2 
+
 max-time: 1000
 cmd: testScan
 args: -n ScanRead488T -l 10 T6 D1 D2 
@@ -990,6 +994,10 @@ max-time: 5000
 cmd: testNodeRestart
 args: -n GCP T1
 
+max-time: 1200
+cmd: testNodeRestart
+args: -n Bug41469 T1
+
 max-time: 180
 cmd: testIndex
 args: -n Bug28804 T1 T6
@@ -1159,7 +1167,7 @@ args: -l 100 -n Bug37158
 # EOF 2008-06-03
 max-time: 500
 cmd: test_event
-args -n bug37672 T1
+args: -n bug37672 T1
 
 #EOF 2008-07-04
 max-time: 500
@@ -1169,10 +1177,18 @@ args: 
 #EOF 2008-07-09
 max-time: 600
 cmd: test_event
-args -r 5000 -n Bug30780 T1
+args: -r 5000 -n Bug30780 T1
 
 #EOF 2008-08-11
 max-time: 1200
 cmd: testNodeRestart
-args -n Bug41295 T1
+args: -n Bug41295 T1
+
+max-time: 1200
+cmd: testNodeRestart
+args: -n Bug42422 -l 1 T1
+
+max-time: 300
+cmd: testScan
+args: -n Bug42545 -l 1 T1
 

=== modified file 'storage/ndb/test/run-test/db.cpp'
--- a/storage/ndb/test/run-test/db.cpp	2008-11-10 11:41:44 +0000
+++ b/storage/ndb/test/run-test/db.cpp	2008-12-19 06:40:28 +0000
@@ -132,14 +132,23 @@ connect_mysqld(atrt_process* proc)
 
   const char * port = find(proc, "--port=");
   const char * socket = find(proc, "--socket=");
-  assert(port);
+  if (port == 0 && socket == 0)
+  {
+    g_logger.error("Neither socket nor port specified...cant connect to mysql");
+    return false;
+  }
   
+  if (port)
+  {
+    mysql_protocol_type val = MYSQL_PROTOCOL_TCP;
+    mysql_options(&proc->m_mysql, MYSQL_OPT_PROTOCOL, &val);
+  }
   for (size_t i = 0; i<20; i++)
   {
     if (mysql_real_connect(&proc->m_mysql,
 			   proc->m_host->m_hostname.c_str(),
 			   "root", "", "test",
-			   atoi(port),
+			   port ? atoi(port) : 0,
 			   socket,
 			   0))
     {
@@ -152,8 +161,8 @@ connect_mysqld(atrt_process* proc)
   
   g_logger.error("Failed to connect to mysqld err: >%s< >%s:%u:%s<",
 		 mysql_error(&proc->m_mysql),
-		 proc->m_host->m_hostname.c_str(),atoi(port),
-		 socket);
+		 proc->m_host->m_hostname.c_str(), port ? atoi(port) : 0,
+		 socket ? socket : "<null>");
   return false;
 }
 
@@ -246,7 +255,7 @@ populate_options(MYSQL* mysql, MYSQL_STM
     
     if (mysql_stmt_execute(stmt))
     {
-      g_logger.error("Failed to execute: %s", mysql_error(mysql));
+      g_logger.error("0 Failed to execute: %s", mysql_error(mysql));
       return false;
     }
     kk++;
@@ -286,11 +295,11 @@ populate_db(atrt_config& config, atrt_pr
       
       if (mysql_stmt_execute(stmt))
       {
-	g_logger.error("Failed to execute: %s", mysql_error(&mysqld->m_mysql));
+	g_logger.error("1 Failed to execute: %s", mysql_error(&mysqld->m_mysql));
 	return false;
       }
-      mysql_stmt_close(stmt);
     }
+    mysql_stmt_close(stmt);
   }
 
   {
@@ -319,7 +328,7 @@ populate_db(atrt_config& config, atrt_pr
       
       if (mysql_stmt_execute(stmt))
       {
-	g_logger.error("Failed to execute: %s", mysql_error(&mysqld->m_mysql));
+	g_logger.error("2 Failed to execute: %s", mysql_error(&mysqld->m_mysql));
 	return false;
       }
     }
@@ -386,7 +395,7 @@ populate_db(atrt_config& config, atrt_pr
       
       if (mysql_stmt_execute(stmt))
       {
-	g_logger.error("Failed to execute: %s", mysql_error(&mysqld->m_mysql));
+	g_logger.error("3 Failed to execute: %s", mysql_error(&mysqld->m_mysql));
 	return false;
       }
 

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2008-08-23 20:29:50 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2008-12-11 13:47:52 +0000
@@ -116,6 +116,11 @@ setup_files(atrt_config& config, int set
   BaseString mycnf;
   mycnf.assfmt("%s/my.cnf", g_basedir);
   
+  if (!create_directory(g_basedir))
+  {
+    return false;
+  }
+
   if (mycnf != g_my_cnf)
   {
     struct stat sbuf;

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2008-09-25 10:55:39 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2009-01-08 11:57:59 +0000
@@ -254,15 +254,18 @@ main(int argc, char ** argv)
   /**
    * Main loop
    */
-  while(!feof(g_test_case_file)){
+  while(!feof(g_test_case_file))
+  {
     /**
      * Do we need to restart ndb
      */
-    if(restart){
+    if(restart)
+    {
+      restart = false;
       g_logger.info("(Re)starting server processes");
       if(!stop_processes(g_config, ~0))
 	goto end;
-
+      
       if (!setup_directories(g_config, 2))
 	goto end;
       
@@ -276,7 +279,7 @@ main(int argc, char ** argv)
       {
         g_logger.info("Failed to start server processes");
         g_logger.info("Gathering logs and saving them as test %u", test_no);
-
+        
         int tmp;
         if(!gather_result(g_config, &tmp))
           goto end;
@@ -327,31 +330,37 @@ main(int argc, char ** argv)
     
     const time_t start = time(0);
     time_t now = start;
-    do {
+    do 
+    {
       if(!update_status(g_config, atrt_process::AP_ALL))
 	goto end;
-
-      if(is_running(g_config, p_ndb) != 2){
+      
+      if(is_running(g_config, p_ndb) != 2)
+      {
 	result = ERR_NDB_FAILED;
 	break;
       }
-
-      if(is_running(g_config, p_servers) != 2){
+      
+      if(is_running(g_config, p_servers) != 2)
+      {
 	result = ERR_SERVERS_FAILED;
 	break;
       }
 
-      if(is_running(g_config, p_clients) == 0){
+      if(is_running(g_config, p_clients) == 0)
+      {
 	break;
       }
 
-      if (!do_command(g_config)){
+      if (!do_command(g_config))
+      {
         result = ERR_COMMAND_FAILED;
 	break;
       }
 
       now = time(0);
-      if(now  > (start + test_case.m_max_time)){
+      if(now  > (start + test_case.m_max_time))
+      {
 	result = ERR_MAX_TIME_ELAPSED;
 	break;
       }
@@ -371,18 +380,20 @@ main(int argc, char ** argv)
 		  test_no, 
 		  (result == 0 ? "OK" : "FAILED"), result);
 
-    if(g_report_file != 0){
+    if(g_report_file != 0)
+    {
       fprintf(g_report_file, "%s ; %d ; %d ; %ld\n",
 	      test_case.m_name.c_str(), test_no, result, elapsed);
       fflush(g_report_file);
     }    
 
-    if(g_mode == 0 && result){
+    if(g_mode == 0 && result)
+    {
       g_logger.info
 	("Encountered failed test in interactive mode - terminating");
       break;
     }
-
+    
     BaseString resdir;
     resdir.assfmt("result.%d", test_no);
     remove_dir(resdir.c_str(), true);
@@ -400,11 +411,15 @@ main(int argc, char ** argv)
     {
       remove_dir("result", true);
     }
+   
+    if (reset_config(g_config))
+    {
+      restart = true;
+    }
     
-    if(result != 0){
+    if(result != 0)
+    {
       restart = true;
-    } else {
-      restart = false;
     }
     test_no++;
   }
@@ -858,7 +873,8 @@ next:
 bool
 start_process(atrt_process & proc){
   if(proc.m_proc.m_id != -1){
-    g_logger.critical("starting already started process: %d", proc.m_index);
+    g_logger.critical("starting already started process: %u", 
+                      (unsigned)proc.m_index);
     return false;
   }
   
@@ -1126,13 +1142,23 @@ setup_test_case(atrt_config& config, con
     if(proc.m_type == atrt_process::AP_NDB_API || 
        proc.m_type == atrt_process::AP_CLIENT)
     {
-      proc.m_proc.m_path = "";
+      BaseString cmd;
       if (tc.m_command.c_str()[0] != '/')
       {
-	proc.m_proc.m_path.appfmt("%s/bin/", g_prefix);
+        cmd.appfmt("%s/bin/", g_prefix);
+      }
+      cmd.append(tc.m_command.c_str());
+
+      if (0) // valgrind
+      {
+        proc.m_proc.m_path = "/usr/bin/valgrind";
+        proc.m_proc.m_args.appfmt("%s %s", cmd.c_str(), tc.m_args.c_str());
+      }
+      else
+      {
+        proc.m_proc.m_path = cmd;
+        proc.m_proc.m_args.assign(tc.m_args);
       }
-      proc.m_proc.m_path.append(tc.m_command.c_str());
-      proc.m_proc.m_args.assign(tc.m_args);
       if(!tc.m_run_all)
         break;
     }
@@ -1208,28 +1234,38 @@ setup_hosts(atrt_config& config){
   return true;
 }
 
+static
+bool
+do_rsync(const char *dir, const char *dst)
+{
+  BaseString tmp = g_setup_progname;
+  tmp.appfmt(" %s %s/ %s", dst, dir, dir);
+  
+  g_logger.info("rsyncing %s to %s", dir, dst);
+  g_logger.debug("system(%s)", tmp.c_str());
+  const int r1 = system(tmp.c_str());
+  if(r1 != 0)
+  {
+    g_logger.critical("Failed to rsync %s to %s", dir, dst);
+    return false;
+  }
+  
+  return true;
+}
+
 bool
 deploy(atrt_config & config)
 {
   for (size_t i = 0; i<config.m_hosts.size(); i++)
   {
-    BaseString tmp = g_setup_progname;
-    tmp.appfmt(" %s %s/ %s",
-	       config.m_hosts[i]->m_hostname.c_str(),
-	       g_prefix,
-	       g_prefix);
-  
-    g_logger.info("rsyncing %s to %s", g_prefix,
-		  config.m_hosts[i]->m_hostname.c_str());
-    g_logger.debug("system(%s)", tmp.c_str());
-    const int r1 = system(tmp.c_str());
-    if(r1 != 0)
-    {
-      g_logger.critical("Failed to rsync %s to %s", 
-			g_prefix,
-			config.m_hosts[i]->m_hostname.c_str());
+    if (!do_rsync(g_basedir, config.m_hosts[i]->m_hostname.c_str()))
+      return false;
+
+    if (!do_rsync(g_prefix, config.m_hosts[i]->m_hostname.c_str()))
+      return false;
+    
+    if (g_prefix1 && !do_rsync(g_prefix1, config.m_hosts[i]->m_hostname.c_str()))
       return false;
-    }
   }
   
   return true;
@@ -1328,6 +1364,27 @@ require(bool x)
     abort();
 }
 
+bool
+reset_config(atrt_config & config)
+{
+  bool changed = false;
+  for(size_t i = 0; i<config.m_processes.size(); i++)
+  {
+    atrt_process & proc = *config.m_processes[i]; 
+    if (proc.m_save.m_saved)
+    {
+      if (!stop_process(proc))
+        return false;
+      
+      changed = true;
+      proc.m_save.m_saved = false;
+      proc.m_proc = proc.m_save.m_proc;
+      proc.m_proc.m_id = -1;
+    }
+  }
+  return changed;
+}
+
 template class Vector<Vector<SimpleCpcClient::Process> >;
 template class Vector<atrt_host*>;
 template class Vector<atrt_cluster*>;

=== modified file 'storage/ndb/test/run-test/setup.cpp'
--- a/storage/ndb/test/run-test/setup.cpp	2008-11-27 18:03:09 +0000
+++ b/storage/ndb/test/run-test/setup.cpp	2008-12-12 08:04:28 +0000
@@ -49,14 +49,14 @@ bool
 setup_config(atrt_config& config, const char* atrt_mysqld)
 {
   BaseString tmp(g_clusters);
-  Vector<BaseString> clusters;
-  tmp.split(clusters, ",");
-
+  
   if (atrt_mysqld)
   {
-    clusters.push_back(BaseString(".atrt"));
+    tmp.appfmt(",.atrt");
   }
-  
+  Vector<BaseString> clusters;
+  tmp.split(clusters, ",");
+
   bool fqpn = clusters.size() > 1 || g_fqpn;
   
   size_t j,k;
@@ -131,6 +131,7 @@ setup_config(atrt_config& config, const 
       proc_args[1].value = 0;
       proc_args[2].value = 0;      
       proc_args[3].value = 0;      
+      proc_args[4].value = atrt_mysqld;
     }
 
     /**
@@ -154,10 +155,11 @@ setup_config(atrt_config& config, const 
       /**
        * Load cluster options
        */
-      
-      argc = 1;
+      int argc = 1;
+      const char * argv[] = { "atrt", 0, 0 };
       argv[argc++] = buf.c_str();
       const char *groups[] = { "mysql_cluster", 0 };
+      char ** tmp = (char**)argv;
       ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
       
       if (ret)
@@ -201,7 +203,7 @@ load_process(atrt_config& config, atrt_c
 {
   atrt_host * host_ptr = find(hostname, config.m_hosts);
   atrt_process *proc_ptr = new atrt_process;
-  
+
   config.m_processes.push_back(proc_ptr);
   host_ptr->m_processes.push_back(proc_ptr);
   cluster.m_processes.push_back(proc_ptr);
@@ -212,6 +214,7 @@ load_process(atrt_config& config, atrt_c
   proc.m_index = idx;
   proc.m_type = type;
   proc.m_host = host_ptr;
+  proc.m_save.m_saved = false;
   if (g_fix_nodeid)
     proc.m_nodeid= cluster.m_next_nodeid++;
   else

=== added file 'storage/ndb/test/run-test/upgrade-tests.txt'
--- a/storage/ndb/test/run-test/upgrade-tests.txt	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/upgrade-tests.txt	2008-12-12 08:56:22 +0000
@@ -0,0 +1,12 @@
+cmd: testUpgrade
+args: -n Upgrade_NR1 T1
+max-time: 600
+
+cmd: testUpgrade
+args: -n Upgrade_NR2 T1
+max-time: 600
+
+cmd: testUpgrade
+args: -n Upgrade_NR3 T1
+max-time: 600
+

=== modified file 'storage/ndb/test/src/DbUtil.cpp'
--- a/storage/ndb/test/src/DbUtil.cpp	2008-11-10 10:53:34 +0000
+++ b/storage/ndb/test/src/DbUtil.cpp	2008-12-12 08:04:28 +0000
@@ -358,7 +358,7 @@ DbUtil::runQuery(const char* sql,
   MYSQL_BIND *bind_param = new MYSQL_BIND[params];
   NdbAutoObjArrayPtr<MYSQL_BIND> _guard(bind_param);
 
-  bzero(bind_param, sizeof(bind_param));
+  bzero(bind_param, params * sizeof(MYSQL_BIND));
 
   for(uint i= 0; i < mysql_stmt_param_count(stmt); i++)
   {
@@ -429,7 +429,7 @@ DbUtil::runQuery(const char* sql,
     uint num_fields= mysql_num_fields(res);
     MYSQL_BIND *bind_result = new MYSQL_BIND[num_fields];
     NdbAutoObjArrayPtr<MYSQL_BIND> _guard1(bind_result);
-    bzero(bind_result, sizeof(bind_result));
+    bzero(bind_result, num_fields * sizeof(MYSQL_BIND));
 
     for (uint i= 0; i < num_fields; i++)
     {
@@ -437,6 +437,8 @@ DbUtil::runQuery(const char* sql,
 
       switch(fields[i].type){
       case MYSQL_TYPE_STRING:
+        buf_len = fields[i].length + 1;
+        break;
       case MYSQL_TYPE_VARCHAR:
       case MYSQL_TYPE_VAR_STRING:
         buf_len= fields[i].max_length + 1;
@@ -444,14 +446,18 @@ DbUtil::runQuery(const char* sql,
       case MYSQL_TYPE_LONGLONG:
         buf_len= sizeof(long long);
         break;
+      case MYSQL_TYPE_LONG:
+        buf_len = sizeof(long);
+        break;
       default:
         break;
       }
-
+      
       bind_result[i].buffer_type= fields[i].type;
       bind_result[i].buffer= malloc(buf_len);
       bind_result[i].buffer_length= buf_len;
-
+      bind_result[i].is_null = (my_bool*)malloc(sizeof(my_bool));
+      * bind_result[i].is_null = 0;
     }
 
     if (mysql_stmt_bind_result(stmt, bind_result)){
@@ -464,8 +470,11 @@ DbUtil::runQuery(const char* sql,
     {
       Properties curr(true);
       for (uint i= 0; i < num_fields; i++){
+        if (* bind_result[i].is_null)
+          continue;
         switch(fields[i].type){
         case MYSQL_TYPE_STRING:
+	  ((char*)bind_result[i].buffer)[fields[i].max_length] = 0;
         case MYSQL_TYPE_VARCHAR:
         case MYSQL_TYPE_VAR_STRING:
           curr.put(fields[i].name, (char*)bind_result[i].buffer);
@@ -479,7 +488,7 @@ DbUtil::runQuery(const char* sql,
         default:
           curr.put(fields[i].name, *(int*)bind_result[i].buffer);
           break;
-       }
+        }
       }
       rows.put("row", row++, &curr);
     }
@@ -487,8 +496,10 @@ DbUtil::runQuery(const char* sql,
     mysql_free_result(res);
 
     for (uint i= 0; i < num_fields; i++)
+    {
       free(bind_result[i].buffer);
-
+      free(bind_result[i].is_null);
+    }
   }
 
   // Save stats in result set

=== modified file 'storage/ndb/test/src/HugoTransactions.cpp'
--- a/storage/ndb/test/src/HugoTransactions.cpp	2008-11-17 09:26:25 +0000
+++ b/storage/ndb/test/src/HugoTransactions.cpp	2009-02-04 12:32:27 +0000
@@ -51,8 +51,8 @@ HugoTransactions::scanReadRecords(Ndb* p
   while (true){
 
     if (retryAttempt >= m_retryMax){
-      g_err << "ERROR: has retried this operation " << retryAttempt 
-	    << " times, failing!" << endl;
+      g_err << __LINE__ << " ERROR: has retried this operation " 
+            << retryAttempt << " times, failing!" << endl;
       return NDBT_FAILED;
     }
 
@@ -154,6 +154,18 @@ HugoTransactions::scanReadRecords(Ndb* p
 	  // Too many active scans, no limit on number of retry attempts
 	  break;
 	default:
+          if (err.classification == NdbError::TimeoutExpired)
+          {
+            if (retryAttempt >= (m_retryMax / 10) && 
+                (parallelism == 0 || parallelism > 1))
+            {
+              /**
+               * decrease parallelism
+               */
+              parallelism = 1;
+              ndbout_c("decrease parallelism");
+            }
+          }
 	  retryAttempt++;
 	}
 	continue;
@@ -195,8 +207,10 @@ HugoTransactions::scanReadRecords(Ndb* p
   while (true){
 
     if (retryAttempt >= m_retryMax){
-      g_err << "ERROR: has retried this operation " << retryAttempt 
-	    << " times, failing!" << endl;
+      g_err << __LINE__ << " ERROR: has retried this operation " 
+            << retryAttempt  << " times, failing!" << endl;
+      g_err << "lm: " << Uint32(lm) << " flags: H'" << hex << scan_flags
+            << endl;
       return NDBT_FAILED;
     }
 
@@ -298,6 +312,18 @@ HugoTransactions::scanReadRecords(Ndb* p
 	  // Too many active scans, no limit on number of retry attempts
 	  break;
 	default:
+          if (err.classification == NdbError::TimeoutExpired)
+          {
+            if (retryAttempt >= (m_retryMax / 10) && 
+                (parallelism == 0 || parallelism > 1))
+            {
+              /**
+               * decrease parallelism
+               */
+              parallelism = 1;
+              ndbout_c("decrease parallelism");
+            }
+          }
 	  retryAttempt++;
 	}
 	continue;

=== modified file 'storage/ndb/test/src/NDBT_Tables.cpp'
--- a/storage/ndb/test/src/NDBT_Tables.cpp	2008-11-17 09:26:25 +0000
+++ b/storage/ndb/test/src/NDBT_Tables.cpp	2008-12-20 19:48:44 +0000
@@ -868,11 +868,21 @@ NDBT_Tables::create_default_tablespace(N
   NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
 
   int res;
+  Uint32 mb = 8;
+  {
+    char buf[256];
+    if (NdbEnv_GetEnv("UNDOBUFFER", buf, sizeof(buf)))
+    {
+      mb = atoi(buf);
+      ndbout_c("Using %umb dd-undo-buffer", mb);
+    }
+  }
+
   NdbDictionary::LogfileGroup lg = pDict->getLogfileGroup("DEFAULT-LG");
   if (strcmp(lg.getName(), "DEFAULT-LG") != 0)
   {
     lg.setName("DEFAULT-LG");
-    lg.setUndoBufferSize(8*1024*1024);
+    lg.setUndoBufferSize(mb*1024*1024);
     res = pDict->createLogfileGroup(lg);
     if(res != 0){
       g_err << "Failed to create logfilegroup:"
@@ -881,7 +891,7 @@ NDBT_Tables::create_default_tablespace(N
     }
   }
 
-  Uint32 mb = 96;
+  mb = 96;
   Uint32 files = 13;
 
   {

=== modified file 'storage/ndb/test/tools/log_listner.cpp'
--- a/storage/ndb/test/tools/log_listner.cpp	2007-04-10 08:27:02 +0000
+++ b/storage/ndb/test/tools/log_listner.cpp	2009-01-14 13:33:03 +0000
@@ -7,15 +7,11 @@ NDB_STD_OPTS_VARS;
 
 static struct my_option my_long_options[] =
 {
-  NDB_STD_OPTS("ndb_logevent_listen"),
+  NDB_STD_OPTS("eventlog"),
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 static void usage()
 {
-  char desc[] = 
-    "tabname\n"\
-    "This program list all properties of table(s) in NDB Cluster.\n"\
-    "  ex: desc T1 T2 T4\n";
   ndb_std_print_version();
   my_print_help(my_long_options);
   my_print_variables(my_long_options);
@@ -40,7 +36,7 @@ main(int argc, char** argv)
   load_defaults("my",load_default_groups,&argc,&argv);
   int ho_error;
 #ifndef DBUG_OFF
-  opt_debug= "d:t:O,/tmp/ndb_desc.trace";
+  opt_debug= "d:t:O,/tmp/eventlog.trace";
 #endif
   if ((ho_error=handle_options(&argc, &argv, my_long_options, 
 			       ndb_std_get_one_option)))

=== modified file 'storage/ndb/tools/waiter.cpp'
--- a/storage/ndb/tools/waiter.cpp	2008-08-12 18:56:42 +0000
+++ b/storage/ndb/tools/waiter.cpp	2009-01-08 11:57:59 +0000
@@ -25,12 +25,15 @@
 
 #include <NDBT.hpp>
 
+#include <kernel/NodeBitmask.hpp>
+
 static int
 waitClusterStatus(const char* _addr, ndb_mgm_node_status _status);
 
 enum ndb_waiter_options {
   OPT_WAIT_STATUS_NOT_STARTED = NDB_STD_OPTIONS_LAST,
   OPT_WAIT_STATUS_SINGLE_USER
+  ,OPT_NOWAIT_NODES
 };
 NDB_STD_OPTS_VARS;
 
@@ -38,6 +41,8 @@ static int _no_contact = 0;
 static int _not_started = 0;
 static int _single_user = 0;
 static int _timeout = 120;
+static const char* _nowait_nodes = 0;
+static NdbNodeBitmask nowait_nodes_bitmask;
 
 const char *load_default_groups[]= { "mysql_cluster",0 };
 
@@ -57,6 +62,10 @@ static struct my_option my_long_options[
   { "timeout", 't', "Timeout to wait in seconds",
     (uchar**) &_timeout, (uchar**) &_timeout, 0,
     GET_INT, REQUIRED_ARG, 120, 0, 0, 0, 0, 0 }, 
+  { "nowait-nodes", OPT_NOWAIT_NODES, 
+    "Nodes that will not be waited for",
+    (uchar**) &_nowait_nodes, (uchar**) &_nowait_nodes, 0,
+    GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 
@@ -107,6 +116,23 @@ int main(int argc, char** argv){
     wait_status= NDB_MGM_NODE_STATUS_STARTED;
   }
 
+  if (_nowait_nodes)
+  {
+    int res = nowait_nodes_bitmask.parseMask(_nowait_nodes);
+    if(res == -2 || (res > 0 && nowait_nodes_bitmask.get(0)))
+    {
+      ndbout_c("Invalid nodeid specified in nowait-nodes: %s", 
+               _nowait_nodes);
+      exit(-1);
+    }
+    else if (res < 0)
+    {
+      ndbout_c("Unable to parse nowait-nodes argument: %s",
+               _nowait_nodes);
+      exit(-1);
+    }
+  }
+
   if (waitClusterStatus(_hostName, wait_status) != 0)
     return NDBT_ProgramExit(NDBT_FAILED);
   return NDBT_ProgramExit(NDBT_OK);
@@ -148,7 +174,8 @@ getStatus(){
       node = &status->node_states[i];      
       switch(node->node_type){
       case NDB_MGM_NODE_TYPE_NDB:
-	ndbNodes.push_back(*node);
+        if (!nowait_nodes_bitmask.get(node->node_id))
+          ndbNodes.push_back(*node);
 	break;
       case NDB_MGM_NODE_TYPE_MGM:
         /* Don't care about MGM nodes */

Thread
bzr push into mysql-6.0-bugteam branch (davi:3040 to 3041) Davi Arnaut11 Feb