MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:He Zhenxing Date:February 12 2009 10:05am
Subject:bzr commit into mysql-6.0-rpl branch (zhenxing.he:2814) Bug#27004 Bug#28077
Bug#32662 Bug#33696 Bug#34526 Bug#40386 Bug#40922 Bug#41462 Bug#41469
Bu...
View as plain text  
#At file:///media/sdb2/hezx/work/mysql/bzrwork/merge/6.0-rpl/

 2814 He Zhenxing	2009-02-12 [merge]
      Auto merge 6.0 -> 6.0-rpl
removed:
  mysql-test/suite/ndb/r/ndb_discover_db2.result
  mysql-test/suite/ndb/t/ndb_discover_db2-master.opt
  mysql-test/suite/ndb/t/ndb_discover_db2.test
  mysql-test/suite/ndb/t/ndb_partition_error2-master.opt
  mysql-test/suite/ndb/t/ndb_restore_partition-master.opt
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt
  mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt
added:
  mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result
  mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test
  mysql-test/suite/ndb_binlog/my.cnf
  mysql-test/suite/ndb_team/my.cnf
  mysql-test/suite/rpl_ndb_big/my.cnf
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt
  storage/ndb/src/mgmapi/mgmapi_error.c
  storage/ndb/test/run-test/conf-upgrade.cnf
  storage/ndb/test/run-test/upgrade-tests.txt
modified:
  .bzrignore
  Makefile.am
  configure.in
  extra/perror.c
  mysql-test/Makefile.am
  mysql-test/lib/v1/mysql-test-run.pl
  mysql-test/lib/v1/ndb_config_1_node.ini
  mysql-test/lib/v1/ndb_config_2_node.ini
  mysql-test/mysql-test-run.pl
  mysql-test/r/partition_mgm.result
  mysql-test/std_data/ndb_config_config.ini
  mysql-test/suite/ndb/my.cnf
  mysql-test/suite/ndb/r/bug36547.result
  mysql-test/suite/ndb/r/ndb_basic.result
  mysql-test/suite/ndb/r/ndb_config.result
  mysql-test/suite/ndb/r/ndb_dbug_lock.result
  mysql-test/suite/ndb/r/ndb_dd_ddl.result
  mysql-test/suite/ndb/r/ndb_discover_db.result
  mysql-test/suite/ndb/r/ndb_read_multi_range.result
  mysql-test/suite/ndb/t/bug36547.test
  mysql-test/suite/ndb/t/ndb_dbug_lock.test
  mysql-test/suite/ndb/t/ndb_dd_ddl.test
  mysql-test/suite/ndb/t/ndb_dd_dump.test
  mysql-test/suite/ndb/t/ndb_discover_db.test
  mysql-test/suite/ndb/t/ndb_read_multi_range.test
  mysql-test/suite/ndb/t/ndb_restore_partition.test
  mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result
  mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result
  mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test
  mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result
  mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test
  mysql-test/suite/parts/r/partition_auto_increment_ndb.result
  mysql-test/suite/rpl_ndb/my.cnf
  mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result
  mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result
  mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test
  mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test
  mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test
  mysql-test/t/partition_mgm.test
  scripts/make_binary_distribution.sh
  scripts/mysql_system_tables.sql
  sql/ha_ndbcluster.cc
  sql/ha_ndbcluster_binlog.cc
  sql/mysqld.cc
  sql/set_var.cc
  sql/slave.cc
  sql/sql_partition.cc
  storage/csv/ha_tina.cc
  storage/ndb/include/mgmapi/mgmapi.h
  storage/ndb/include/mgmapi/mgmapi_config_parameters.h
  storage/ndb/include/mgmapi/mgmapi_error.h
  storage/ndb/include/mgmapi/ndb_logevent.h
  storage/ndb/include/ndbapi/NdbScanOperation.hpp
  storage/ndb/include/util/Bitmask.hpp
  storage/ndb/src/common/portlib/NdbThread.c
  storage/ndb/src/common/util/Bitmask.cpp
  storage/ndb/src/kernel/blocks/ERROR_codes.txt
  storage/ndb/src/kernel/blocks/backup/Backup.cpp
  storage/ndb/src/kernel/blocks/backup/Backup.hpp
  storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
  storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
  storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
  storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
  storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
  storage/ndb/src/kernel/blocks/lgman.cpp
  storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
  storage/ndb/src/kernel/vm/Configuration.cpp
  storage/ndb/src/mgmapi/Makefile.am
  storage/ndb/src/mgmapi/ndb_logevent.cpp
  storage/ndb/src/mgmsrv/MgmtSrvr.cpp
  storage/ndb/src/mgmsrv/MgmtSrvr.hpp
  storage/ndb/src/mgmsrv/Services.cpp
  storage/ndb/src/mgmsrv/Services.hpp
  storage/ndb/src/ndbapi/ClusterMgr.cpp
  storage/ndb/src/ndbapi/ClusterMgr.hpp
  storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
  storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
  storage/ndb/src/ndbapi/NdbScanOperation.cpp
  storage/ndb/src/ndbapi/Ndbif.cpp
  storage/ndb/src/ndbapi/TransporterFacade.hpp
  storage/ndb/src/ndbapi/ndberror.c
  storage/ndb/test/include/DbUtil.hpp
  storage/ndb/test/ndbapi/testBasic.cpp
  storage/ndb/test/ndbapi/testMgm.cpp
  storage/ndb/test/ndbapi/testNodeRestart.cpp
  storage/ndb/test/ndbapi/testScan.cpp
  storage/ndb/test/ndbapi/testUpgrade.cpp
  storage/ndb/test/run-test/Makefile.am
  storage/ndb/test/run-test/atrt-gather-result.sh
  storage/ndb/test/run-test/atrt.hpp
  storage/ndb/test/run-test/autotest-boot.sh
  storage/ndb/test/run-test/autotest-run.sh
  storage/ndb/test/run-test/command.cpp
  storage/ndb/test/run-test/daily-basic-tests.txt
  storage/ndb/test/run-test/db.cpp
  storage/ndb/test/run-test/files.cpp
  storage/ndb/test/run-test/main.cpp
  storage/ndb/test/run-test/setup.cpp
  storage/ndb/test/src/DbUtil.cpp
  storage/ndb/test/src/HugoTransactions.cpp
  storage/ndb/test/src/NDBT_Tables.cpp
  storage/ndb/test/tools/log_listner.cpp
  storage/ndb/tools/waiter.cpp

=== modified file '.bzrignore'
--- a/.bzrignore	2009-02-04 22:35:10 +0000
+++ b/.bzrignore	2009-02-05 12:49:39 +0000
@@ -760,6 +760,7 @@ mysql-test/dump.txt
 mysql-test/funcs_1-ps.log
 mysql-test/funcs_1.log
 mysql-test/funcs_1.tar
+mysql-test/gmon.out
 mysql-test/install_test_db
 mysql-test/lib/My/SafeProcess/my_safe_process
 mysql-test/lib/init_db.sql

=== modified file 'Makefile.am'
--- a/Makefile.am	2009-01-09 11:37:00 +0000
+++ b/Makefile.am	2009-02-02 15:58:48 +0000
@@ -134,6 +134,10 @@ smoke:
 	cd mysql-test ; \
 	    @PERL@ ./mysql-test-run.pl --do-test=s
 
+smoke:
+	cd mysql-test ; \
+	    @PERL@ ./mysql-test-run.pl --do-test=s
+
 test-full:	test test-nr test-ps
 
 test-force:

=== modified file 'configure.in'
--- a/configure.in	2009-02-03 09:16:53 +0000
+++ b/configure.in	2009-02-04 12:34:03 +0000
@@ -17,7 +17,7 @@ AM_CONFIG_HEADER([include/config.h:confi
 NDB_VERSION_MAJOR=6
 NDB_VERSION_MINOR=2
 NDB_VERSION_BUILD=17
-NDB_VERSION_STATUS="-GA"
+NDB_VERSION_STATUS="-alpha"
 
 PROTOCOL_VERSION=10
 DOT_FRM_VERSION=6

=== modified file 'extra/perror.c'
--- a/extra/perror.c	2008-11-27 13:36:48 +0000
+++ b/extra/perror.c	2009-01-08 11:57:59 +0000
@@ -26,6 +26,7 @@
 #include "../storage/ndb/src/ndbapi/ndberror.c"
 #include "../storage/ndb/src/kernel/error/ndbd_exit_codes.c"
 #include "../storage/ndb/include/mgmapi/mgmapi_error.h"
+#include "../storage/ndb/src/mgmapi/mgmapi_error.c"
 #endif
 
 static my_bool verbose, print_all_codes;

=== modified file 'mysql-test/Makefile.am'
--- a/mysql-test/Makefile.am	2009-02-01 19:21:44 +0000
+++ b/mysql-test/Makefile.am	2009-02-02 22:37:44 +0000
@@ -95,12 +95,14 @@ TEST_DIRS = t r include std_data std_dat
 	suite/funcs_2/t \
 	suite/jp suite/jp/t suite/jp/r suite/jp/std_data \
 	suite/manual/t suite/manual/r \
-	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \
 	suite/rpl suite/rpl/data suite/rpl/include suite/rpl/r \
 	suite/rpl/t \
 	suite/stress/include suite/stress/t suite/stress/r \
 	suite/ndb suite/ndb/t suite/ndb/r \
+	suite/ndb_binlog suite/ndb_binlog/t suite/ndb_binlog/r \
+	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \
 	suite/rpl_ndb suite/rpl_ndb/t suite/rpl_ndb/r \
+	suite/rpl_ndb_big suite/rpl_ndb_big/t suite/rpl_ndb_big/r \
 	suite/falcon suite/falcon/t suite/falcon/r \
 	suite/falcon_team suite/falcon_team/t suite/falcon_team/r \
 	suite/parts suite/parts/t suite/parts/r suite/parts/inc

=== modified file 'mysql-test/lib/v1/mysql-test-run.pl'
--- a/mysql-test/lib/v1/mysql-test-run.pl	2008-11-14 08:45:32 +0000
+++ b/mysql-test/lib/v1/mysql-test-run.pl	2009-02-01 21:05:19 +0000
@@ -135,7 +135,7 @@ our $default_vardir;
 
 our $opt_usage;
 our $opt_suites;
-our $opt_suites_default= "main,binlog,rpl,rpl_ndb,ndb"; # Default suites to run
+our $opt_suites_default= "ndb,ndb_binlog,rpl_ndb,main,binlog,rpl"; # Default suites to run
 our $opt_script_debug= 0;  # Script debugging, enable with --script-debug
 our $opt_verbose= 0;  # Verbose output, enable with --verbose
 
@@ -410,12 +410,13 @@ sub main () {
       # Check for any extra suites to enable based on the path name
       my %extra_suites=
 	(
-	 "mysql-5.1-new-ndb"              => "ndb_team",
-	 "mysql-5.1-new-ndb-merge"        => "ndb_team",
-	 "mysql-5.1-telco-6.2"            => "ndb_team",
-	 "mysql-5.1-telco-6.2-merge"      => "ndb_team",
-	 "mysql-5.1-telco-6.3"            => "ndb_team",
-	 "mysql-6.0-ndb"                  => "ndb_team",
+	 "bzr_mysql-5.1-ndb"                  => "ndb_team",
+	 "bzr_mysql-5.1-ndb-merge"            => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.2"            => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.2-merge"      => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.3"            => "ndb_team",
+	 "bzr_mysql-5.1-telco-6.4"            => "ndb_team",
+	 "bzr_mysql-6.0-ndb"                  => "ndb_team,rpl_ndb_big",
 	);
 
       foreach my $dir ( reverse splitdir($glob_basedir) )
@@ -1577,16 +1578,22 @@ sub executable_setup_ndb () {
 				"$glob_basedir/storage/ndb",
 				"$glob_basedir/bin");
 
+  # Some might be found in sbin, not bin.
+  my $daemon_path= mtr_file_exists("$glob_basedir/ndb",
+				   "$glob_basedir/storage/ndb",
+				   "$glob_basedir/sbin",
+				   "$glob_basedir/bin");
+
   $exe_ndbd=
     mtr_exe_maybe_exists("$ndb_path/src/kernel/ndbd",
-			 "$ndb_path/ndbd",
+			 "$daemon_path/ndbd",
 			 "$glob_basedir/libexec/ndbd");
   $exe_ndb_mgm=
     mtr_exe_maybe_exists("$ndb_path/src/mgmclient/ndb_mgm",
 			 "$ndb_path/ndb_mgm");
   $exe_ndb_mgmd=
     mtr_exe_maybe_exists("$ndb_path/src/mgmsrv/ndb_mgmd",
-			 "$ndb_path/ndb_mgmd",
+			 "$daemon_path/ndb_mgmd",
 			 "$glob_basedir/libexec/ndb_mgmd");
   $exe_ndb_waiter=
     mtr_exe_maybe_exists("$ndb_path/tools/ndb_waiter",
@@ -2814,7 +2821,7 @@ sub ndbd_start ($$$) {
   mtr_add_arg($args, "$extra_args");
 
   my $nodeid= $cluster->{'ndbds'}->[$idx]->{'nodeid'};
-  my $path_ndbd_log= "$cluster->{'data_dir'}/ndb_${nodeid}.log";
+  my $path_ndbd_log= "$cluster->{'data_dir'}/ndb_${nodeid}_out.log";
   $pid= mtr_spawn($exe_ndbd, $args, "",
 		  $path_ndbd_log,
 		  $path_ndbd_log,
@@ -3973,9 +3980,12 @@ sub mysqld_arguments ($$$$) {
       mtr_add_arg($args, "%s--ndbcluster", $prefix);
       mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
 		  $cluster->{'connect_string'});
+      mtr_add_arg($args, "%s--ndb-wait-connected=20", $prefix);
+      mtr_add_arg($args, "%s--ndb-cluster-connection-pool=3", $prefix);
+      mtr_add_arg($args, "%s--slave-allow-batching", $prefix);
       if ( $mysql_version_id >= 50100 )
       {
-	mtr_add_arg($args, "%s--ndb-extra-logging", $prefix);
+	mtr_add_arg($args, "%s--ndb-log-orig", $prefix);
       }
     }
     else
@@ -4046,10 +4056,12 @@ sub mysqld_arguments ($$$$) {
       mtr_add_arg($args, "%s--ndbcluster", $prefix);
       mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
 		  $cluster->{'connect_string'});
-
+      mtr_add_arg($args, "%s--ndb-wait-connected=20", $prefix);
+      mtr_add_arg($args, "%s--ndb-cluster-connection-pool=3", $prefix);
+      mtr_add_arg($args, "%s--slave-allow-batching", $prefix);
       if ( $mysql_version_id >= 50100 )
       {
-	mtr_add_arg($args, "%s--ndb-extra-logging", $prefix);
+	mtr_add_arg($args, "%s--ndb-log-orig", $prefix);
       }
     }
     else
@@ -4310,6 +4322,7 @@ sub stop_all_servers () {
   {
     rm_ndbcluster_tables($mysqld->{'path_myddir'});
   }
+
 }
 
 
@@ -4619,22 +4632,6 @@ sub run_testcase_start_servers($) {
 	 $tinfo->{'master_num'} > 1 )
     {
       # Test needs cluster, start an extra mysqld connected to cluster
-
-      if ( $mysql_version_id >= 50100 )
-      {
-	# First wait for first mysql server to have created ndb system
-	# tables ok FIXME This is a workaround so that only one mysqld
-	# create the tables
-	if ( ! sleep_until_file_created(
-		  "$master->[0]->{'path_myddir'}/mysql/ndb_apply_status.ndb",
-					$master->[0]->{'start_timeout'},
-					$master->[0]->{'pid'}))
-	{
-
-	  $tinfo->{'comment'}= "Failed to create 'mysql/ndb_apply_status' table";
-	  return 1;
-	}
-      }
       mysqld_start($master->[1],$tinfo->{'master_opt'},[]);
     }
 

=== modified file 'mysql-test/lib/v1/ndb_config_1_node.ini'
--- a/mysql-test/lib/v1/ndb_config_1_node.ini	2008-11-14 08:45:32 +0000
+++ b/mysql-test/lib/v1/ndb_config_1_node.ini	2009-02-01 21:05:19 +0000
@@ -10,6 +10,7 @@ DataDir= CHOOSE_FILESYSTEM
 MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
 MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
 TimeBetweenGlobalCheckpoints= 500
+TimeBetweenEpochs=0
 NoOfFragmentLogFiles= 8
 FragmentLogFileSize= 6M
 DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory 

=== modified file 'mysql-test/lib/v1/ndb_config_2_node.ini'
--- a/mysql-test/lib/v1/ndb_config_2_node.ini	2008-11-14 08:45:32 +0000
+++ b/mysql-test/lib/v1/ndb_config_2_node.ini	2009-02-01 21:05:19 +0000
@@ -1,6 +1,6 @@
 [ndbd default]
 NoOfReplicas= 2
-MaxNoOfConcurrentTransactions= 64
+MaxNoOfConcurrentTransactions= 2048
 MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations
 DataMemory= CHOOSE_DataMemory
 IndexMemory= CHOOSE_IndexMemory
@@ -9,7 +9,8 @@ TimeBetweenWatchDogCheck= 30000
 DataDir= CHOOSE_FILESYSTEM
 MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
 MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes
-TimeBetweenGlobalCheckpoints= 500
+TimeBetweenGlobalCheckpoints= 3000
+TimeBetweenEpochs=100
 NoOfFragmentLogFiles= 4
 FragmentLogFileSize=12M
 DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory
@@ -53,3 +54,16 @@ PortNumber= CHOOSE_PORT_MGM
 [mysqld]
 
 [mysqld]
+NodeId: 63
+
+[mysqld]
+NodeId: 127
+
+[mysqld]
+NodeId: 192
+
+[mysqld]
+NodeId: 228
+
+[mysqld]
+NodeId: 255

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2009-02-02 11:05:02 +0000
+++ b/mysql-test/mysql-test-run.pl	2009-02-03 06:49:48 +0000
@@ -259,17 +259,18 @@ sub main {
     # Check for any extra suites to enable based on the path name
     my %extra_suites=
       (
-       "mysql-5.1-new-ndb"              => "ndb_team",
-       "mysql-5.1-new-ndb-merge"        => "ndb_team",
-       "mysql-5.1-telco-6.2"            => "ndb_team",
-       "mysql-5.1-telco-6.2-merge"      => "ndb_team",
-       "mysql-5.1-telco-6.3"            => "ndb_team",
-       "mysql-6.0-ndb"                  => "ndb_team",
-       "mysql-6.0-falcon"               => "falcon_team",
-       "mysql-6.0-falcon-team"          => "falcon_team",
-       "mysql-6.0-falcon-wlad"          => "falcon_team",
-       "mysql-6.0-falcon-chris"         => "falcon_team",
-       "mysql-6.0-falcon-kevin"         => "falcon_team",
+       "bzr_mysql-5.1-ndb"              => "ndb_team",
+       "bzr_mysql-5.1-ndb-merge"        => "ndb_team",
+       "bzr_mysql-5.1-telco-6.2"        => "ndb_team",
+       "bzr_mysql-5.1-telco-6.2-merge"  => "ndb_team",
+       "bzr_mysql-5.1-telco-6.3"        => "ndb_team",
+       "bzr_mysql-5.1-telco-6.4"        => "ndb_team",
+       "bzr_mysql-6.0-ndb"              => "ndb_team,rpl_ndb_big,ndb_binlog",
+       "bzr_mysql-6.0-falcon"           => "falcon_team",
+       "bzr_mysql-6.0-falcon-team"      => "falcon_team",
+       "bzr_mysql-6.0-falcon-wlad"      => "falcon_team",
+       "bzr_mysql-6.0-falcon-chris"     => "falcon_team",
+       "bzr_mysql-6.0-falcon-kevin"     => "falcon_team",
       );
 
     foreach my $dir ( reverse splitdir($basedir) ) {

=== modified file 'mysql-test/r/partition_mgm.result'
--- a/mysql-test/r/partition_mgm.result	2008-12-10 08:06:58 +0000
+++ b/mysql-test/r/partition_mgm.result	2009-02-04 14:48:13 +0000
@@ -5,6 +5,11 @@ PARTITION BY HASH (a)
 PARTITIONS 1;
 INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
 ALTER TABLE t1 REORGANIZE PARTITION;
+ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
+ALTER ONLINE TABLE t1 REORGANIZE PARTITION;
+ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
+ALTER OFFLINE TABLE t1 REORGANIZE PARTITION;
+ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
 DROP TABLE t1;
 create table t1 (a int)
 partition by range (a)

=== modified file 'mysql-test/std_data/ndb_config_config.ini'
--- a/mysql-test/std_data/ndb_config_config.ini	2008-04-25 10:34:28 +0000
+++ b/mysql-test/std_data/ndb_config_config.ini	2009-02-02 15:58:48 +0000
@@ -66,4 +66,3 @@ NodeId: 228
 
 [mysqld]
 NodeId: 255
-

=== modified file 'mysql-test/suite/ndb/my.cnf'
--- a/mysql-test/suite/ndb/my.cnf	2008-05-09 15:28:34 +0000
+++ b/mysql-test/suite/ndb/my.cnf	2009-02-02 15:58:48 +0000
@@ -6,7 +6,22 @@ NoOfReplicas=                  2
 ndbd=,
 ndb_mgmd=
 mysqld=,
-ndbapi=,,
+ndbapi=,,,,,,,,,,,
+
+[cluster_config.ndbapi.8.1]
+NodeId=63
+
+[cluster_config.ndbapi.9.1]
+NodeId=127
+
+[cluster_config.ndbapi.10.1]
+NodeId=192
+
+[cluster_config.ndbapi.11.1]
+NodeId=228
+
+[cluster_config.ndbapi.12.1]
+NodeId=255
 
 [mysqld]
 # Make all mysqlds use cluster
@@ -14,11 +29,9 @@ ndbcluster
 
 # Time to wait for NDB connection before
 # accepting connections client connections
-ndb-wait-connected=            20
-
-ndb-extra-logging
+ndb-wait-connected=20
 
-#ndb-cluster-connection-pool=  3
+ndb-cluster-connection-pool=3
 
 [ENV]
 NDB_CONNECTSTRING=             @mysql_cluster.1.ndb_connectstring
@@ -26,16 +39,3 @@ MASTER_MYPORT=                 @mysqld.1
 MASTER_MYPORT1=                @mysqld.2.1.port
 
 NDB_BACKUP_DIR=                @cluster_config.ndbd.1.1.BackupDataDir
-
-
-# Give the second mysqld hardcoded NodeId
-[cluster_config.mysqld.2.1]
-NodeId=192
-
-# Set hardccoded NodeId's alos on ndbapi nodes
-[cluster_config.ndbapi.2.1]
-NodeId=228
-
-[cluster_config.ndbapi.3.1]
-NodeId=255
-

=== modified file 'mysql-test/suite/ndb/r/bug36547.result'
--- a/mysql-test/suite/ndb/r/bug36547.result	2008-05-07 14:43:32 +0000
+++ b/mysql-test/suite/ndb/r/bug36547.result	2009-02-02 06:45:57 +0000
@@ -2,11 +2,11 @@ SET NDB_EXTRA_LOGGING=1;
 ERROR HY000: Variable 'ndb_extra_logging' is a GLOBAL variable and should be set with SET GLOBAL
 SET @SAVE_NDB_EXTRA_LOGGING= @@NDB_EXTRA_LOGGING;
 SET GLOBAL NDB_EXTRA_LOGGING=1;
-SHOW VARIABLES LIKE 'ndb_extra%';
+SHOW VARIABLES LIKE 'ndb_extra_logging';
 Variable_name	Value
 ndb_extra_logging	1
 SET GLOBAL NDB_EXTRA_LOGGING=0;
-SHOW VARIABLES LIKE 'ndb_extra%';
+SHOW VARIABLES LIKE 'ndb_extra_logging';
 Variable_name	Value
 ndb_extra_logging	0
-SET @GLOBAL.NDB_EXTRA_LOGGGING= @SAVE_NDB_EXTRA_LOGGING;
+SET @@GLOBAL.NDB_EXTRA_LOGGING= @SAVE_NDB_EXTRA_LOGGING;

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2008-10-15 12:14:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2009-01-23 11:03:00 +0000
@@ -24,6 +24,7 @@ ndb_force_send	#
 ndb_index_stat_cache_entries	#
 ndb_index_stat_enable	#
 ndb_index_stat_update_freq	#
+ndb_log_binlog_index	#
 ndb_report_thresh_binlog_epoch_slip	#
 ndb_report_thresh_binlog_mem_usage	#
 ndb_use_copying_alter_table	#

=== modified file 'mysql-test/suite/ndb/r/ndb_config.result'
--- a/mysql-test/suite/ndb/r/ndb_config.result	2008-04-25 10:34:28 +0000
+++ b/mysql-test/suite/ndb/r/ndb_config.result	2009-02-02 16:02:58 +0000
@@ -1,5 +1,5 @@
 == 1 ==
-ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4,localhost mysqld,192,localhost mysqld,193,localhost mysqld,228,localhost mysqld,255,localhost
+ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4,localhost mysqld,5,localhost mysqld,6,localhost mysqld,7,localhost mysqld,8,localhost mysqld,9,localhost mysqld,10,localhost mysqld,11,localhost mysqld,12,localhost mysqld,63,localhost mysqld,127,localhost mysqld,192,localhost mysqld,228,localhost mysqld,255,localhost
 == 2 ==
 1,localhost,20971520,1048576 2,localhost,20971520,1048576
 == 3 ==

=== modified file 'mysql-test/suite/ndb/r/ndb_dbug_lock.result'
--- a/mysql-test/suite/ndb/r/ndb_dbug_lock.result	2008-10-29 13:09:15 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dbug_lock.result	2009-01-23 09:40:08 +0000
@@ -40,5 +40,5 @@ t1	CREATE TABLE `t1` (
   PRIMARY KEY (`a`)
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
 # Cleanup
-set session debug="-d,sleep_after_global_schema_lock";
+set session debug="-d,";
 drop table t1;

=== modified file 'mysql-test/suite/ndb/r/ndb_dd_ddl.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_ddl.result	2008-10-17 08:37:23 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_ddl.result	2009-01-28 15:06:33 +0000
@@ -236,80 +236,3 @@ engine ndb;
 ERROR HY000: Failed to drop TABLESPACE
 drop logfile group lg1
 engine ndb;
-
-# -----------------------------------------------------------------
-# End 5.1 test
-# -----------------------------------------------------------------
-
-# --
-# -- WL#4300: Define privileges for tablespaces.
-# --
-GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
-
-DROP DATABASE IF EXISTS mysqltest2;
-CREATE DATABASE mysqltest2;
-GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
-
-# -- Connection: mysqltest_u1@localhost
-
-# -- Grants for mysqltest_u1@localhost:
-SHOW GRANTS;
-Grants for mysqltest_u1@localhost
-GRANT CREATE TABLESPACE ON *.* TO 'mysqltest_u1'@'localhost'
-
-# -- Check CREATE LOGFILE GROUP...
-CREATE LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 1M
-UNDO_BUFFER_SIZE = 1M
-ENGINE = NDB;
-
-# -- Check ALTER LOGFILE GROUP...
-ALTER LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
-# -- Check CREATE TABLESPACE...
-CREATE TABLESPACE ts1
-ADD DATAFILE 'datafile.dat'
-USE LOGFILE GROUP lg1
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
-# -- Check ALTER TABLESPACE...
-ALTER TABLESPACE ts1
-DROP DATAFILE 'datafile.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
-# -- Connection: mysqltest_u2@localhost
-
-# -- Grants for mysqltest_u2@localhost:
-SHOW GRANTS;
-Grants for mysqltest_u2@localhost
-GRANT USAGE ON *.* TO 'mysqltest_u2'@'localhost'
-GRANT ALL PRIVILEGES ON `mysqltest2`.* TO 'mysqltest_u2'@'localhost'
-CREATE TABLE t1(c INT) TABLESPACE ts1;
-DROP TABLE t1;
-
-# -- Connection: mysqltest_u1@localhost
-
-
-# -- Check DROP TABLESPACE...
-DROP TABLESPACE ts1 
-ENGINE = NDB;
-
-# -- Check DROP LOGFILE GROUP...
-DROP LOGFILE GROUP lg1 
-ENGINE = NDB;
-
-# -- Connection: root@localhost
-
-DROP USER mysqltest_u1@localhost;
-DROP USER mysqltest_u2@localhost;
-DROP DATABASE mysqltest2;
-
-# -----------------------------------------------------------------
-# End 6.0 test
-# -----------------------------------------------------------------

=== added file 'mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_ddl_grant.result	2009-01-28 15:06:33 +0000
@@ -0,0 +1,73 @@
+
+# --
+# -- WL#4300: Define privileges for tablespaces.
+# --
+GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
+
+DROP DATABASE IF EXISTS mysqltest2;
+CREATE DATABASE mysqltest2;
+GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
+
+# -- Connection: mysqltest_u1@localhost
+
+# -- Grants for mysqltest_u1@localhost:
+SHOW GRANTS;
+Grants for mysqltest_u1@localhost
+GRANT CREATE TABLESPACE ON *.* TO 'mysqltest_u1'@'localhost'
+
+# -- Check CREATE LOGFILE GROUP...
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 1M
+UNDO_BUFFER_SIZE = 1M
+ENGINE = NDB;
+
+# -- Check ALTER LOGFILE GROUP...
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+# -- Check CREATE TABLESPACE...
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+# -- Check ALTER TABLESPACE...
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+# -- Connection: mysqltest_u2@localhost
+
+# -- Grants for mysqltest_u2@localhost:
+SHOW GRANTS;
+Grants for mysqltest_u2@localhost
+GRANT USAGE ON *.* TO 'mysqltest_u2'@'localhost'
+GRANT ALL PRIVILEGES ON `mysqltest2`.* TO 'mysqltest_u2'@'localhost'
+CREATE TABLE t1(c INT) TABLESPACE ts1;
+DROP TABLE t1;
+
+# -- Connection: mysqltest_u1@localhost
+
+
+# -- Check DROP TABLESPACE...
+DROP TABLESPACE ts1 
+ENGINE = NDB;
+
+# -- Check DROP LOGFILE GROUP...
+DROP LOGFILE GROUP lg1 
+ENGINE = NDB;
+
+# -- Connection: root@localhost
+
+DROP USER mysqltest_u1@localhost;
+DROP USER mysqltest_u2@localhost;
+DROP DATABASE mysqltest2;
+
+# -----------------------------------------------------------------
+# End 6.0 test
+# -----------------------------------------------------------------

=== modified file 'mysql-test/suite/ndb/r/ndb_discover_db.result'
--- a/mysql-test/suite/ndb/r/ndb_discover_db.result	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/r/ndb_discover_db.result	2009-02-02 15:58:48 +0000
@@ -5,3 +5,31 @@ create table discover_db.t1 (a int key, 
 create database discover_db_2;
 alter database discover_db_2 character set binary;
 create table discover_db_2.t1 (a int key, b int) engine ndb;
+show create database discover_db;
+Database	Create Database
+discover_db	CREATE DATABASE `discover_db` /*!40100 DEFAULT CHARACTER SET latin1 */
+show create database discover_db_2;
+Database	Create Database
+discover_db_2	CREATE DATABASE `discover_db_2` /*!40100 DEFAULT CHARACTER SET binary */
+reset master;
+insert into discover_db.t1 values (1,1);
+show binlog events from <binlog_start>;
+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
+mysqld-bin.000001	#	Query	1	#	BEGIN
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (discover_db.t1)
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
+mysqld-bin.000001	#	Write_rows	1	#	table_id: #
+mysqld-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
+mysqld-bin.000001	#	Query	1	#	COMMIT
+reset master;
+insert into discover_db_2.t1 values (1,1);
+show binlog events from <binlog_start>;
+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
+mysqld-bin.000001	#	Query	1	#	BEGIN
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (discover_db_2.t1)
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
+mysqld-bin.000001	#	Write_rows	1	#	table_id: #
+mysqld-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
+mysqld-bin.000001	#	Query	1	#	COMMIT
+drop database discover_db;
+drop database discover_db_2;

=== removed file 'mysql-test/suite/ndb/r/ndb_discover_db2.result'
--- a/mysql-test/suite/ndb/r/ndb_discover_db2.result	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/r/ndb_discover_db2.result	1970-01-01 00:00:00 +0000
@@ -1,28 +0,0 @@
-show create database discover_db;
-Database	Create Database
-discover_db	CREATE DATABASE `discover_db` /*!40100 DEFAULT CHARACTER SET latin1 */
-show create database discover_db_2;
-Database	Create Database
-discover_db_2	CREATE DATABASE `discover_db_2` /*!40100 DEFAULT CHARACTER SET binary */
-reset master;
-insert into discover_db.t1 values (1,1);
-show binlog events from <binlog_start>;
-Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-master-bin1.000001	#	Query	102	#	BEGIN
-master-bin1.000001	#	Table_map	102	#	table_id: # (discover_db.t1)
-master-bin1.000001	#	Table_map	102	#	table_id: # (mysql.ndb_apply_status)
-master-bin1.000001	#	Write_rows	102	#	table_id: #
-master-bin1.000001	#	Write_rows	102	#	table_id: # flags: STMT_END_F
-master-bin1.000001	#	Query	102	#	COMMIT
-reset master;
-insert into discover_db_2.t1 values (1,1);
-show binlog events from <binlog_start>;
-Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-master-bin1.000001	#	Query	102	#	BEGIN
-master-bin1.000001	#	Table_map	102	#	table_id: # (discover_db_2.t1)
-master-bin1.000001	#	Table_map	102	#	table_id: # (mysql.ndb_apply_status)
-master-bin1.000001	#	Write_rows	102	#	table_id: #
-master-bin1.000001	#	Write_rows	102	#	table_id: # flags: STMT_END_F
-master-bin1.000001	#	Query	102	#	COMMIT
-drop database discover_db;
-drop database discover_db_2;

=== modified file 'mysql-test/suite/ndb/r/ndb_read_multi_range.result'
--- a/mysql-test/suite/ndb/r/ndb_read_multi_range.result	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/r/ndb_read_multi_range.result	2009-02-02 15:58:48 +0000
@@ -520,7 +520,7 @@ select * from t2 order by id;
 id
 3
 drop trigger kaboom;
-drop table t1, t2;
+drop table t1;
 create table t1 (
 a int not null primary key,
 b int
@@ -636,3 +636,59 @@ i	i	9
 m	m	13
 v	v	22
 drop table t1;
+create table t1 (
+a int not null primary key,
+b int
+) engine = ndb;
+insert into t1 values (7,2),(8,3),(10,4);
+update t1 set b = 5 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+a	b
+7	5
+8	5
+10	5
+delete from t1 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+a	b
+drop table t1;
+create table t1 (a int primary key, b int, key b_idx (b)) engine ndb;
+insert into t1 values(1,1), (2,2), (3,3), (4,4), (5,5);
+select one.a 
+from t1 one left join t1 two 
+on (two.b = one.b) 
+where one.a in (3, 4) 
+order by a;
+a
+3
+4
+drop table t1;
+create table t1 (a varchar(1536) not null,
+b varchar(1536) not null,
+c int, primary key (a,b)) engine=ndb;
+insert into t1 values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3),
+('d', 'd', 4), ('e', 'e', 5), ('f', 'f', 6),
+('g', 'g', 7), ('h', 'h', 8), ('i', 'i', 9),
+('j', 'j', 10), ('k', 'k', 11), ('l', 'l', 12),
+('m', 'm', 13), ('n', 'n', 14), ('o', 'o', 15),
+('p', 'p', 16), ('q', 'q', 17), ('r', 'r', 18),
+('s', 's', 19), ('t', 't', 20), ('u', 'u', 21),
+('v', 'v', 22), ('w', 'w', 23), ('x', 'x', 24);
+select * from t1
+where (a >= 'aa' and b >= 'x' and a <= 'c' and b <= 'c')
+or (a = 'd')
+or (a = 'e')
+or (a = 'f')
+or (a > 'g' and a < 'ii')
+or (a >= 'j' and b >= 'x' and a <= 'k' and b <= 'k')
+or (a = 'm' and b = 'm')
+or (a = 'v')
+order by a asc, b asc;
+a	b	c
+d	d	4
+e	e	5
+f	f	6
+h	h	8
+i	i	9
+m	m	13
+v	v	22
+drop table t1, t2;

=== modified file 'mysql-test/suite/ndb/t/bug36547.test'
--- a/mysql-test/suite/ndb/t/bug36547.test	2008-05-07 14:43:32 +0000
+++ b/mysql-test/suite/ndb/t/bug36547.test	2009-02-02 06:45:57 +0000
@@ -7,7 +7,8 @@ SET NDB_EXTRA_LOGGING=1;
 
 SET @SAVE_NDB_EXTRA_LOGGING= @@NDB_EXTRA_LOGGING;
 SET GLOBAL NDB_EXTRA_LOGGING=1;
-SHOW VARIABLES LIKE 'ndb_extra%';
+SHOW VARIABLES LIKE 'ndb_extra_logging';
 SET GLOBAL NDB_EXTRA_LOGGING=0;
-SHOW VARIABLES LIKE 'ndb_extra%';
-SET @GLOBAL.NDB_EXTRA_LOGGGING= @SAVE_NDB_EXTRA_LOGGING;
+SHOW VARIABLES LIKE 'ndb_extra_logging';
+SET @@GLOBAL.NDB_EXTRA_LOGGING= @SAVE_NDB_EXTRA_LOGGING;
+

=== modified file 'mysql-test/suite/ndb/t/ndb_dbug_lock.test'
--- a/mysql-test/suite/ndb/t/ndb_dbug_lock.test	2008-11-06 18:54:30 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dbug_lock.test	2009-01-23 09:40:08 +0000
@@ -67,5 +67,5 @@ show create table t1;
 
 --echo # Cleanup
 --connection default
-set session debug="-d,sleep_after_global_schema_lock";
+set session debug="-d,";
 drop table t1;

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_ddl.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_ddl.test	2008-10-17 08:37:23 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_ddl.test	2009-01-28 15:06:33 +0000
@@ -367,105 +367,4 @@ engine ndb;
 --exec rm $MYSQLTEST_VARDIR/tmp/t1.frm
 
 
---echo
---echo # -----------------------------------------------------------------
---echo # End 5.1 test
---echo # -----------------------------------------------------------------
-
---echo
---echo # --
---echo # -- WL#4300: Define privileges for tablespaces.
---echo # --
-
-GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
-
---echo
-
---disable_warnings
-DROP DATABASE IF EXISTS mysqltest2;
---enable_warnings
-
-CREATE DATABASE mysqltest2;
-
-GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
-
---echo
---echo # -- Connection: mysqltest_u1@localhost
---echo
---connect(con1, localhost, mysqltest_u1,,)
-
---echo # -- Grants for mysqltest_u1@localhost:
-SHOW GRANTS;
-
---echo
---echo # -- Check CREATE LOGFILE GROUP...
-CREATE LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 1M
-UNDO_BUFFER_SIZE = 1M
-ENGINE = NDB;
-
---echo
---echo # -- Check ALTER LOGFILE GROUP...
-ALTER LOGFILE GROUP lg1
-ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
---echo
---echo # -- Check CREATE TABLESPACE...
-CREATE TABLESPACE ts1
-ADD DATAFILE 'datafile.dat'
-USE LOGFILE GROUP lg1
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
---echo
---echo # -- Check ALTER TABLESPACE...
-ALTER TABLESPACE ts1
-DROP DATAFILE 'datafile.dat'
-INITIAL_SIZE 1M
-ENGINE = NDB;
-
---echo
---echo # -- Connection: mysqltest_u2@localhost
---echo
---connect(con2, localhost, mysqltest_u2,,mysqltest2)
-
---echo # -- Grants for mysqltest_u2@localhost:
-SHOW GRANTS;
-
-CREATE TABLE t1(c INT) TABLESPACE ts1;
-
-DROP TABLE t1;
-
---echo
---echo # -- Connection: mysqltest_u1@localhost
---echo
---connection con1
-
---echo
---echo # -- Check DROP TABLESPACE...
-DROP TABLESPACE ts1 
-ENGINE = NDB;
-
---echo
---echo # -- Check DROP LOGFILE GROUP...
-DROP LOGFILE GROUP lg1 
-ENGINE = NDB;
-
---echo
---echo # -- Connection: root@localhost
---echo
---connection default
---disconnect con1
-
-DROP USER mysqltest_u1@localhost;
-DROP USER mysqltest_u2@localhost;
-
-DROP DATABASE mysqltest2;
-
---echo
---echo # -----------------------------------------------------------------
---echo # End 6.0 test
---echo # -----------------------------------------------------------------
+# End 5.1 test

=== added file 'mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_ddl_grant.test	2009-01-28 15:06:33 +0000
@@ -0,0 +1,101 @@
+-- source include/have_ndb.inc
+# grants are not in embedded
+-- source include/not_embedded.inc
+
+--echo
+--echo # --
+--echo # -- WL#4300: Define privileges for tablespaces.
+--echo # --
+
+GRANT CREATE TABLESPACE ON *.* TO mysqltest_u1@localhost;
+
+--echo
+
+--disable_warnings
+DROP DATABASE IF EXISTS mysqltest2;
+--enable_warnings
+
+CREATE DATABASE mysqltest2;
+
+GRANT ALL PRIVILEGES ON mysqltest2.* TO mysqltest_u2@localhost;
+
+--echo
+--echo # -- Connection: mysqltest_u1@localhost
+--echo
+--connect(con1, localhost, mysqltest_u1,,)
+
+--echo # -- Grants for mysqltest_u1@localhost:
+SHOW GRANTS;
+
+--echo
+--echo # -- Check CREATE LOGFILE GROUP...
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 1M
+UNDO_BUFFER_SIZE = 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Check ALTER LOGFILE GROUP...
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Check CREATE TABLESPACE...
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Check ALTER TABLESPACE...
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile.dat'
+INITIAL_SIZE 1M
+ENGINE = NDB;
+
+--echo
+--echo # -- Connection: mysqltest_u2@localhost
+--echo
+--connect(con2, localhost, mysqltest_u2,,mysqltest2)
+
+--echo # -- Grants for mysqltest_u2@localhost:
+SHOW GRANTS;
+
+CREATE TABLE t1(c INT) TABLESPACE ts1;
+
+DROP TABLE t1;
+
+--echo
+--echo # -- Connection: mysqltest_u1@localhost
+--echo
+--connection con1
+
+--echo
+--echo # -- Check DROP TABLESPACE...
+DROP TABLESPACE ts1 
+ENGINE = NDB;
+
+--echo
+--echo # -- Check DROP LOGFILE GROUP...
+DROP LOGFILE GROUP lg1 
+ENGINE = NDB;
+
+--echo
+--echo # -- Connection: root@localhost
+--echo
+--connection default
+--disconnect con1
+
+DROP USER mysqltest_u1@localhost;
+DROP USER mysqltest_u2@localhost;
+
+DROP DATABASE mysqltest2;
+
+--echo
+--echo # -----------------------------------------------------------------
+--echo # End 6.0 test
+--echo # -----------------------------------------------------------------

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_dump.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_dump.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_dump.test	2009-02-02 15:58:48 +0000
@@ -257,10 +257,10 @@ CREATE TABLE test.t (
 #'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting 
 #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB;
 
-let $MYSQLD_DATADIR= `select @@datadir`;
  SELECT count(*) FROM test.t;
  LOAD DATA INFILE 't_backup' INTO TABLE test.t;
- --remove_file $MYSQLD_DATADIR/test/t_backup
+--let $MYSQLD_DATADIR= `SELECT @@datadir`
+--remove_file  $MYSQLD_DATADIR/test/t_backup
 
  SELECT * FROM test.t order by a;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_discover_db.test'
--- a/mysql-test/suite/ndb/t/ndb_discover_db.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_discover_db.test	2009-02-02 15:58:48 +0000
@@ -12,6 +12,27 @@ drop database if exists discover_db_2;
 # The discovery happens in ndb_discover_db2.test
 #
 
+#
+# Shutdown server 1
+#
+
+-- connection server1
+# Write file to make mysql-test-run.pl expect the "crash", but don't start
+# it until it's told to.
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.1.expect
+wait
+EOF
+# Send shutdown to the connected server and give
+# it 30 seconds to die before zapping it.
+shutdown_server 30;
+# Check server is gone.
+--source include/wait_until_disconnected.inc
+
+#
+# Create databases while server1 is down
+#
+-- connection server2
+
 # check that created database is discovered
 create database discover_db;
 create table discover_db.t1 (a int key, b int) engine ndb;
@@ -21,15 +42,41 @@ create database discover_db_2;
 alter database discover_db_2 character set binary;
 create table discover_db_2.t1 (a int key, b int) engine ndb;
 
-let $MYSQLD_DATADIR= `select @@datadir`;
+#
+# Startup server1
+#
+
+-- connection server1
+# Write file to make mysql-test-run.pl start up the server again.
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.1.expect
+restart
+EOF
+# Turn on reconnect.
+--enable_reconnect
+# Call script that will poll the server waiting for it to be back online again.
+--source include/wait_until_connected_again.inc
+# Turn off reconnect again.
+--disable_reconnect
+#
+--disable_query_log
+--source include/ndb_not_readonly.inc
+--enable_query_log
+
+#
+# Now check that databases have been discovered
+#
 
--- remove_file $MYSQLD_DATADIR/discover_db/t1.frm
--- remove_file $MYSQLD_DATADIR/discover_db/t1.ndb
--- remove_file $MYSQLD_DATADIR/discover_db/db.opt
--- rmdir $MYSQLD_DATADIR/discover_db
-
--- remove_file $MYSQLD_DATADIR/discover_db_2/t1.frm
--- remove_file $MYSQLD_DATADIR/discover_db_2/t1.ndb
--- remove_file $MYSQLD_DATADIR/discover_db_2/db.opt
--- rmdir $MYSQLD_DATADIR/discover_db_2
+show create database discover_db;
+show create database discover_db_2;
+reset master;
+insert into discover_db.t1 values (1,1);
+--source include/show_binlog_events2.inc
+reset master;
+insert into discover_db_2.t1 values (1,1);
+--source include/show_binlog_events2.inc
 
+#
+# Cleanup
+#
+drop database discover_db;
+drop database discover_db_2;

=== removed file 'mysql-test/suite/ndb/t/ndb_discover_db2-master.opt'
--- a/mysql-test/suite/ndb/t/ndb_discover_db2-master.opt	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/t/ndb_discover_db2-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---skip-external-locking

=== removed file 'mysql-test/suite/ndb/t/ndb_discover_db2.test'
--- a/mysql-test/suite/ndb/t/ndb_discover_db2.test	2008-09-30 09:14:44 +0000
+++ b/mysql-test/suite/ndb/t/ndb_discover_db2.test	1970-01-01 00:00:00 +0000
@@ -1,21 +0,0 @@
--- source include/have_multi_ndb.inc
--- source include/have_binlog_format_mixed_or_row.inc
-
-#
-# When this test started there no database on disk for server2
-# Check that table has been discovered correctly, and that the
-# binlog is updated correctly
-#
-
--- connection server2
-show create database discover_db;
-show create database discover_db_2;
-reset master;
-insert into discover_db.t1 values (1,1);
---source include/show_binlog_events2.inc
-reset master;
-insert into discover_db_2.t1 values (1,1);
---source include/show_binlog_events2.inc
-
-drop database discover_db;
-drop database discover_db_2;

=== removed file 'mysql-test/suite/ndb/t/ndb_partition_error2-master.opt'
--- a/mysql-test/suite/ndb/t/ndb_partition_error2-master.opt	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/t/ndb_partition_error2-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---ndbcluster

=== modified file 'mysql-test/suite/ndb/t/ndb_read_multi_range.test'
--- a/mysql-test/suite/ndb/t/ndb_read_multi_range.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_read_multi_range.test	2009-02-02 15:58:48 +0000
@@ -386,7 +386,7 @@ delete from t1 where id in (1,2);
 select * from t2 order by id;
 
 drop trigger kaboom;
-drop table t1, t2;
+drop table t1;
 
 #bug#31874
 
@@ -484,3 +484,54 @@ select * from t1
     or (a = 'v')
     order by a asc, b asc;
 drop table t1;
+
+#bug#31874
+
+create table t1 (
+  a int not null primary key,
+  b int
+) engine = ndb;
+insert into t1 values (7,2),(8,3),(10,4);
+
+update t1 set b = 5 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+delete from t1 where a in (7,8) or a >= 10;
+select * from t1 order by a;
+
+drop table t1;
+
+#bug#35137 - self join + mrr
+
+create table t1 (a int primary key, b int, key b_idx (b)) engine ndb;
+insert into t1 values(1,1), (2,2), (3,3), (4,4), (5,5);
+
+select one.a 
+from t1 one left join t1 two 
+on (two.b = one.b) 
+where one.a in (3, 4) 
+order by a;
+
+drop table t1;
+
+create table t1 (a varchar(1536) not null,
+                 b varchar(1536) not null,
+                 c int, primary key (a,b)) engine=ndb;
+insert into t1 values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3),
+                      ('d', 'd', 4), ('e', 'e', 5), ('f', 'f', 6),
+                      ('g', 'g', 7), ('h', 'h', 8), ('i', 'i', 9),
+                      ('j', 'j', 10), ('k', 'k', 11), ('l', 'l', 12),
+                      ('m', 'm', 13), ('n', 'n', 14), ('o', 'o', 15),
+                      ('p', 'p', 16), ('q', 'q', 17), ('r', 'r', 18),
+                      ('s', 's', 19), ('t', 't', 20), ('u', 'u', 21),
+                      ('v', 'v', 22), ('w', 'w', 23), ('x', 'x', 24);
+select * from t1
+ where (a >= 'aa' and b >= 'x' and a <= 'c' and b <= 'c')
+    or (a = 'd')
+    or (a = 'e')
+    or (a = 'f')
+    or (a > 'g' and a < 'ii')
+    or (a >= 'j' and b >= 'x' and a <= 'k' and b <= 'k')
+    or (a = 'm' and b = 'm')
+    or (a = 'v')
+    order by a asc, b asc;
+drop table t1, t2;

=== removed file 'mysql-test/suite/ndb/t/ndb_restore_partition-master.opt'
--- a/mysql-test/suite/ndb/t/ndb_restore_partition-master.opt	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_partition-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---new

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_partition.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_partition.test	2007-07-04 20:38:53 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_partition.test	2009-02-03 13:28:13 +0000
@@ -2,6 +2,10 @@
 -- source include/ndb_default_cluster.inc
 -- source include/not_embedded.inc
 
+--disable_query_log
+set new=on;
+--enable_query_log
+
 --disable_warnings
 use test;
 drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;

=== added file 'mysql-test/suite/ndb_binlog/my.cnf'
--- a/mysql-test/suite/ndb_binlog/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_binlog/my.cnf	2009-02-02 10:35:33 +0000
@@ -0,0 +1,23 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+
+[ENV]
+NDB_CONNECTSTRING=             @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                 @mysqld.1.1.port
+MASTER_MYPORT1=                @mysqld.2.1.port
+
+NDB_BACKUP_DIR=                @cluster_config.ndbd.1.1.BackupDataDir
+

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result	2008-03-12 13:13:49 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result	2009-01-29 12:44:41 +0000
@@ -4,6 +4,21 @@ create database mysqltest;
 use mysqltest;
 drop database mysqltest;
 use test;
+show create table mysql.ndb_binlog_index;
+Table	Create Table
+ndb_binlog_index	CREATE TABLE `ndb_binlog_index` (
+  `Position` bigint(20) unsigned NOT NULL,
+  `File` varchar(255) NOT NULL,
+  `epoch` bigint(20) unsigned NOT NULL,
+  `inserts` int(10) unsigned NOT NULL,
+  `updates` int(10) unsigned NOT NULL,
+  `deletes` int(10) unsigned NOT NULL,
+  `schemaops` int(10) unsigned NOT NULL,
+  `orig_server_id` int(10) unsigned NOT NULL,
+  `orig_epoch` bigint(20) unsigned NOT NULL,
+  `gci` int(10) unsigned NOT NULL,
+  PRIMARY KEY (`epoch`,`orig_server_id`,`orig_epoch`)
+) ENGINE=MARIA DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
 reset master;
 create table t1 (a int primary key) engine=ndb;
 insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result	2008-06-18 15:03:43 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result	2009-02-01 21:05:19 +0000
@@ -4,8 +4,6 @@ drop table if exists t1;
 #
 create table t1 (a int key, b int) engine ndb;
 insert into t1 values (1,1);
-@the_backup_id:=backup_id
-<the_backup_id>
 #
 # extra table to be used to ensure data has arrived to binlog
 create table t2 (a int key, b int) engine ndb;
@@ -43,12 +41,12 @@ set SQL_LOG_BIN=0;
 insert into t2 values (2,2);
 show binlog events from <binlog_start>;
 Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-master-bin.000001	#	Query	1	#	BEGIN
-master-bin.000001	#	Table_map	1	#	table_id: # (test.t1)
-master-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
-master-bin.000001	#	Write_rows	1	#	table_id: #
-master-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
-master-bin.000001	#	Query	1	#	COMMIT
+mysqld-bin.000001	#	Query	1	#	BEGIN
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (test.t1)
+mysqld-bin.000001	#	Table_map	1	#	table_id: # (mysql.ndb_apply_status)
+mysqld-bin.000001	#	Write_rows	1	#	table_id: #
+mysqld-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
+mysqld-bin.000001	#	Query	1	#	COMMIT
 drop table t1, t2;
 #
 # Now more complex using "BANK schema" including restore of log

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test	2008-02-25 13:50:20 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_basic.test	2009-01-29 12:44:41 +0000
@@ -10,6 +10,9 @@ drop database mysqltest;
 use test;
 --enable_warnings
 
+# check type and schema for ndb_binlog_index
+show create table mysql.ndb_binlog_index;
+
 #
 # basic insert, update, delete test, alter, rename, drop
 # check that ndb_binlog_index gets the right info

=== added file 'mysql-test/suite/ndb_team/my.cnf'
--- a/mysql-test/suite/ndb_team/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_team/my.cnf	2009-02-02 15:58:48 +0000
@@ -0,0 +1,89 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,
+
+[cluster_config.slave]
+NoOfReplicas=                  1
+MaxNoOfConcurrentTransactions= 64
+TimeBetweenGlobalCheckpoints= 500
+TimeBetweenEpochs= 0
+NoOfFragmentLogFiles= 8
+FragmentLogFileSize= 6M
+ndbd=
+ndb_mgmd=
+mysqld=
+ndbapi=,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
+# Turn on bin logging
+log-bin=                       master-bin
+
+# Time to wait for NDB connection before
+# accepting connections client connections
+ndb-wait-connected=            20
+
+ndb-extra-logging
+
+[mysqld.1.1]
+
+[mysqld.1.1]
+
+[mysqld.1.slave]
+
+# Append <testname>-slave.opt file to the list of argument used when
+# starting the mysqld
+#!use-slave-opt
+
+log-bin=                      slave-bin
+relay-log=                    slave-relay-bin
+
+init-rpl-role=                slave
+log-slave-updates
+master-retry-count=           10
+
+# Values reported by slave when it connect to master
+# and shows up in SHOW SLAVE STATUS;
+report-host=                  127.0.0.1
+report-port=                  @mysqld.1.slave.port
+report-user=                  root
+
+loose-skip-innodb
+skip-slave-start
+
+# Directory where slaves find the dumps generated by "load data"
+# on the server. The path need to have constant length otherwise
+# test results will vary, thus a relative path is used.
+slave-load-tmpdir=            ../../../tmp
+
+rpl-recovery-rank=            @mysqld.1.slave.server-id
+
+# Use batching when applying the binlog on slave
+slave-allow-batching
+
+# Write additional info in mysql.ndb_binlog_index
+# to allow multi way replication
+ndb-log-orig
+
+
+[ENV]
+NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                @mysqld.1.1.port
+MASTER_MYPORT1=               @mysqld.2.1.port
+
+NDB_CONNECTSTRING_SLAVE=      @mysql_cluster.slave.ndb_connectstring
+SLAVE_MYPORT=                 @mysqld.1.slave.port
+SLAVE_MYSOCK=                 @mysqld.1.slave.socket
+
+NDB_BACKUP_DIR=               @cluster_config.ndbd.1.1.BackupDataDir

=== modified file 'mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result'
--- a/mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result	2008-02-13 19:52:52 +0000
+++ b/mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result	2009-02-02 15:58:48 +0000
@@ -133,6 +133,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 
 ***** Testing Altering table def scenario *****
@@ -507,6 +509,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 ****************************************
 * columns in master at middle of table *
@@ -581,6 +585,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 5, test.t10 has type 254
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -656,6 +662,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 252, test.t11 has type 15
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -807,6 +815,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1091
 Last_SQL_Error	Error 'Can't DROP 'c7'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t14 DROP COLUMN c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -893,6 +903,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1054
 Last_SQL_Error	Error 'Unknown column 'c7' in 't15'' on query. Default database: 'test'. Query: 'ALTER TABLE t15 ADD COLUMN c2 DECIMAL(8,2) AFTER c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -979,6 +991,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1072
 Last_SQL_Error	Error 'Key column 'c6' doesn't exist in table' on query. Default database: 'test'. Query: 'CREATE INDEX part_of_c6 ON t16 (c6)'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -1272,6 +1286,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 
 ***** Testing Altering table def scenario *****
@@ -1646,6 +1662,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 
 ****************************************
 * columns in master at middle of table *
@@ -1720,6 +1738,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 5, test.t10 has type 254
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -1795,6 +1815,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1535
 Last_SQL_Error	Table definition on master and slave does not match: Column 2 type mismatch - received type 252, test.t11 has type 15
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SET GLOBAL SQL_SLAVE_SKIP_COUNTER=2;
 START SLAVE;
 
@@ -1946,6 +1968,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1091
 Last_SQL_Error	Error 'Can't DROP 'c7'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t14 DROP COLUMN c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -2032,6 +2056,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1054
 Last_SQL_Error	Error 'Unknown column 'c7' in 't15'' on query. Default database: 'test'. Query: 'ALTER TABLE t15 ADD COLUMN c2 DECIMAL(8,2) AFTER c7'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 
@@ -2118,6 +2144,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	1072
 Last_SQL_Error	Error 'Key column 'c6' doesn't exist in table' on query. Default database: 'test'. Query: 'CREATE INDEX part_of_c6 ON t16 (c6)'
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 STOP SLAVE;
 RESET SLAVE;
 

=== modified file 'mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test'
--- a/mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/ndb_team/t/rpl_ndb_dd_advance.test	2009-02-02 16:02:58 +0000
@@ -287,25 +287,7 @@ while ($j)
 
 SELECT COUNT(*) FROM history;
 
-#RESET MASTER;
---exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT
-
---exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > $MYSQLTEST_VARDIR/tmp.dat
-
-CREATE TEMPORARY TABLE IF NOT EXISTS mysql.backup_info (id INT, backup_id INT) ENGINE = HEAP;
-
-DELETE FROM mysql.backup_info;
-
-LOAD DATA INFILE '../tmp.dat' INTO TABLE mysql.backup_info FIELDS TERMINATED BY ',';
---remove_file $MYSQLTEST_VARDIR/tmp.dat
---replace_column 1 <the_backup_id>
-
-SELECT @the_backup_id:=backup_id FROM mysql.backup_info;
-
-let the_backup_id=`select @the_backup_id`;
-
-DROP TABLE IF EXISTS mysql.backup_info;
-#RESET MASTER;
+--source include/ndb_backup.inc
 
 --echo ************ Restore the slave ************************
 connection slave;
@@ -375,9 +357,9 @@ SELECT COUNT(*) FROM history;
 
 --echo *** DUMP MASTER & SLAVE FOR COMPARE ********
 
---exec $MYSQL_DUMP  --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql
+--exec $MYSQL_DUMP --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_M.sql
 
---exec $MYSQL_DUMP_SLAVE  --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql
+--exec $MYSQL_DUMP_SLAVE --no-tablespaces --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/RPL_DD_ADV_S.sql
 
 --echo *************** TEST 2 CLEANUP SECTION ********************
 connection master;

=== modified file 'mysql-test/suite/parts/r/partition_auto_increment_ndb.result'
--- a/mysql-test/suite/parts/r/partition_auto_increment_ndb.result	2008-11-05 20:13:54 +0000
+++ b/mysql-test/suite/parts/r/partition_auto_increment_ndb.result	2009-02-03 14:17:59 +0000
@@ -122,7 +122,7 @@ INSERT INTO t1 VALUES (NULL);
 DELETE FROM t1 WHERE c1 >= 100;
 OPTIMIZE TABLE t1;
 Table	Op	Msg_type	Msg_text
-test.t1	optimize	note	The storage engine for the table doesn't support optimize
+test.t1	optimize	status	OK
 SHOW CREATE TABLE t1;
 Table	Create Table
 t1	CREATE TABLE `t1` (
@@ -389,7 +389,7 @@ INSERT INTO t1 VALUES (NULL);
 DELETE FROM t1 WHERE c1 >= 100;
 OPTIMIZE TABLE t1;
 Table	Op	Msg_type	Msg_text
-test.t1	optimize	note	The storage engine for the table doesn't support optimize
+test.t1	optimize	status	OK
 SHOW CREATE TABLE t1;
 Table	Create Table
 t1	CREATE TABLE `t1` (

=== modified file 'mysql-test/suite/rpl_ndb/my.cnf'
--- a/mysql-test/suite/rpl_ndb/my.cnf	2008-05-09 15:28:34 +0000
+++ b/mysql-test/suite/rpl_ndb/my.cnf	2009-02-02 15:58:48 +0000
@@ -6,6 +6,7 @@ NoOfReplicas=                  2
 ndbd=,
 ndb_mgmd=
 mysqld=,
+ndbapi=,,,,,,,,,
 
 [cluster_config.slave]
 NoOfReplicas=                  1
@@ -17,10 +18,15 @@ FragmentLogFileSize= 6M
 ndbd=
 ndb_mgmd=
 mysqld=
+ndbapi=,,,,
 
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
 # Turn on bin logging
 log-bin=                       master-bin
 

=== added file 'mysql-test/suite/rpl_ndb_big/my.cnf'
--- a/mysql-test/suite/rpl_ndb_big/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/my.cnf	2009-02-02 22:37:44 +0000
@@ -0,0 +1,89 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,
+
+[cluster_config.slave]
+NoOfReplicas=                  1
+MaxNoOfConcurrentTransactions= 64
+TimeBetweenGlobalCheckpoints= 500
+TimeBetweenEpochs= 0
+NoOfFragmentLogFiles= 8
+FragmentLogFileSize= 6M
+ndbd=
+ndb_mgmd=
+mysqld=
+ndbapi=,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
+# Turn on bin logging
+log-bin=                       master-bin
+
+# Time to wait for NDB connection before
+# accepting connections client connections
+ndb-wait-connected=            20
+
+ndb-extra-logging
+
+[mysqld.1.1]
+
+[mysqld.1.1]
+
+[mysqld.1.slave]
+
+# Append <testname>-slave.opt file to the list of argument used when
+# starting the mysqld
+#!use-slave-opt
+
+log-bin=                      slave-bin
+relay-log=                    slave-relay-bin
+
+init-rpl-role=                slave
+log-slave-updates
+master-retry-count=           10
+
+# Values reported by slave when it connect to master
+# and shows up in SHOW SLAVE STATUS;
+report-host=                  127.0.0.1
+report-port=                  @mysqld.1.slave.port
+report-user=                  root
+
+loose-skip-innodb
+skip-slave-start
+
+# Directory where slaves find the dumps generated by "load data"
+# on the server. The path need to have constant length otherwise
+# test results will vary, thus a relative path is used.
+slave-load-tmpdir=            ../../../tmp
+
+rpl-recovery-rank=            @mysqld.1.slave.server-id
+
+# Use batching when applying the binlog on slave
+slave-allow-batching
+
+# Write additional info in mysql.ndb_binlog_index
+# to allow multi way replication
+ndb-log-orig
+
+
+[ENV]
+NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                @mysqld.1.1.port
+MASTER_MYPORT1=               @mysqld.2.1.port
+
+NDB_CONNECTSTRING_SLAVE=      @mysql_cluster.slave.ndb_connectstring
+SLAVE_MYPORT=                 @mysqld.1.slave.port
+SLAVE_MYSOCK=                 @mysqld.1.slave.socket
+
+NDB_BACKUP_DIR=               @cluster_config.ndbd.1.1.BackupDataDir

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result	2008-11-04 11:30:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2innodb.result	2009-02-04 12:35:46 +0000
@@ -924,4 +924,5 @@ DELETE FROM t1;
 --- End test 5 key partition testing ---
 --- Do Cleanup ---
 DROP TABLE IF EXISTS t1;
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result	2008-11-04 11:30:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_2myisam.result	2009-02-04 12:35:46 +0000
@@ -924,4 +924,5 @@ DELETE FROM t1;
 --- End test 5 key partition testing ---
 --- Do Cleanup ---
 DROP TABLE IF EXISTS t1;
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_sync.result	2009-02-03 06:41:56 +0000
@@ -106,6 +106,8 @@ Last_IO_Errno	<Last_IO_Errno>
 Last_IO_Error	<Last_IO_Error>
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
 hex(c1)	hex(c2)	c3
 1	1	row1

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb-master.opt	2009-02-04 12:35:46 +0000
@@ -1 +1 @@
---new --default-storage-engine=ndbcluster --ndb_log_updated_only=0 
+--new --ndb-log-updated-only=0 

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2innodb.test	2009-02-04 12:35:46 +0000
@@ -29,7 +29,14 @@ CREATE TABLE mysql.ndb_apply_status
                    end_pos BIGINT UNSIGNED NOT NULL,
                    PRIMARY KEY USING HASH (server_id)) ENGINE=INNODB;
 
+-- connection master
+--disable_query_log
+set new=on;
+set storage_engine=ndbcluster;
+--enable_query_log
+
 --source extra/rpl_tests/rpl_ndb_2multi_eng.test
 
 --connection slave
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam-master.opt	2009-02-04 12:35:46 +0000
@@ -1 +1 @@
---new --default-storage-engine=ndbcluster --ndb_log_updated_only=0
+--ndb_log_updated_only=0

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_2myisam.test	2009-02-04 12:35:46 +0000
@@ -28,7 +28,14 @@ CREATE TABLE mysql.ndb_apply_status
                    end_pos BIGINT UNSIGNED NOT NULL,
                    PRIMARY KEY USING HASH (server_id)) ENGINE=MYISAM;
 
+-- connection master
+--disable_query_log
+set new=on;
+set storage_engine=ndbcluster;
+--enable_query_log
+
 --source extra/rpl_tests/rpl_ndb_2multi_eng.test
 
 --connection slave
+set @@global.slave_exec_mode= 'STRICT';
 drop table mysql.ndb_apply_status;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test	2008-12-13 11:02:16 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_apply_status.test	2009-02-03 14:01:50 +0000
@@ -16,7 +16,7 @@ select * from mysql.ndb_apply_status;
 
 
 -- source include/have_ndb.inc
--- source include/have_binlog_format_row.inc
+-- source include/have_binlog_format_mixed_or_row.inc
 -- source include/ndb_master-slave.inc
 
 #

=== added file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	2009-02-04 14:48:13 +0000
@@ -0,0 +1 @@
+--new=true

=== removed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---new=true

=== added file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	2009-02-04 14:48:13 +0000
@@ -0,0 +1 @@
+--new=true

=== removed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_dd_partitions-slave.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---new=true

=== removed file 'mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt	2008-11-22 15:24:06 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2-master.opt	1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---binlog-format=mixed

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_truncate_7ndb_2.test	2009-02-03 14:01:50 +0000
@@ -6,6 +6,5 @@
 # Change Author:  pcrews
 # Change:  Moved test to rpl_ndb suite, updated location of --source .test file
 
---source include/have_binlog_format_mixed.inc
---source include/big_test.inc
+--source include/have_binlog_format_mixed_or_row.inc
 --source suite/rpl_ndb_big/t/rpl_truncate_7ndb.test

=== modified file 'mysql-test/t/partition_mgm.test'
--- a/mysql-test/t/partition_mgm.test	2009-01-26 16:32:29 +0000
+++ b/mysql-test/t/partition_mgm.test	2009-02-04 13:08:05 +0000
@@ -11,7 +11,12 @@ ENGINE MYISAM
 PARTITION BY HASH (a)
 PARTITIONS 1;
 INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+--error ER_REORG_NO_PARAM_ERROR
 ALTER TABLE t1 REORGANIZE PARTITION;
+--error ER_REORG_NO_PARAM_ERROR
+ALTER ONLINE TABLE t1 REORGANIZE PARTITION;
+--error ER_REORG_NO_PARAM_ERROR
+ALTER OFFLINE TABLE t1 REORGANIZE PARTITION;
 DROP TABLE t1;
 
 #

=== modified file 'scripts/make_binary_distribution.sh'
--- a/scripts/make_binary_distribution.sh	2009-01-31 15:53:35 +0000
+++ b/scripts/make_binary_distribution.sh	2009-02-02 15:58:48 +0000
@@ -60,13 +60,16 @@ STRIP=1				# Option ignored
 SILENT=0
 PLATFORM=""
 TMP=/tmp
+NEW_NAME=""			# Final top directory and TAR package name
 SUFFIX=""
+SHORT_PRODUCT_TAG=""		# If don't want server suffix in package name
 NDBCLUSTER=""			# Option ignored
 
 for arg do
   case "$arg" in
     --tmp=*)    TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;;
     --suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
+    --short-product-tag=*) SHORT_PRODUCT_TAG=`echo "$arg" | sed -e "s;--short-product-tag=;;"` ;;
     --no-strip) STRIP=0 ;;
     --machine=*) machine=`echo "$arg" | sed -e "s;--machine=;;"` ;;
     --platform=*) PLATFORM=`echo "$arg" | sed -e "s;--platform=;;"` ;;
@@ -113,7 +116,11 @@ case $PLATFORM in
 esac
 
 # Change the distribution to a long descriptive name
-NEW_NAME=mysql@MYSQL_SERVER_SUFFIX@-@VERSION@-$PLATFORM$SUFFIX
+if [ x"$SHORT_PRODUCT_TAG" != x"" ] ; then
+  NEW_NAME=mysql-$SHORT_PRODUCT_TAG-@VERSION@-$PLATFORM$SUFFIX
+else
+  NEW_NAME=mysql@MYSQL_SERVER_SUFFIX@-@VERSION@-$PLATFORM$SUFFIX
+fi
 
 # ----------------------------------------------------------------------
 # Define BASE, and remove the old BASE directory if any

=== modified file 'scripts/mysql_system_tables.sql'
--- a/scripts/mysql_system_tables.sql	2008-12-24 10:48:24 +0000
+++ b/scripts/mysql_system_tables.sql	2009-02-02 12:28:30 +0000
@@ -86,5 +86,5 @@ CREATE TABLE IF NOT EXISTS backup_histor
 
 CREATE TABLE IF NOT EXISTS backup_progress ( backup_id BIGINT UNSIGNED NOT NULL COMMENT 'Key for backup_history table entries', object CHAR (30) NOT NULL DEFAULT '' COMMENT 'The object being operated on', start_time datetime NOT NULL DEFAULT 0 COMMENT 'The date/time of start of operation', stop_time datetime NOT NULL DEFAULT 0 COMMENT 'The date/time of end of operation', total_bytes BIGINT NOT NULL DEFAULT 0 COMMENT 'The size of the object in bytes', progress BIGINT UNSIGNED NOT NULL DEFAULT 0 COMMENT 'The number of bytes processed', error_num INT NOT NULL DEFAULT 0 COMMENT 'The error from this run 0 == none', notes CHAR(100) NOT NULL DEFAULT '' COMMENT 'Commentary from the backup engine') ENGINE=CSV DEFAULT CHARACTER SET utf8;
 
-CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts INT UNSIGNED NOT NULL, updates INT UNSIGNED NOT NULL, deletes INT UNSIGNED NOT NULL, schemaops INT UNSIGNED NOT NULL, orig_server_id INT UNSIGNED NOT NULL, orig_epoch BIGINT UNSIGNED NOT NULL, gci INT UNSIGNED NOT NULL, PRIMARY KEY(epoch, orig_server_id, orig_epoch)) ENGINE=MYISAM;
+CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts INT UNSIGNED NOT NULL, updates INT UNSIGNED NOT NULL, deletes INT UNSIGNED NOT NULL, schemaops INT UNSIGNED NOT NULL, orig_server_id INT UNSIGNED NOT NULL, orig_epoch BIGINT UNSIGNED NOT NULL, gci INT UNSIGNED NOT NULL, PRIMARY KEY(epoch, orig_server_id, orig_epoch)) ENGINE=MARIA;
 

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2009-01-31 16:21:19 +0000
+++ b/sql/ha_ndbcluster.cc	2009-02-09 13:34:12 +0000
@@ -213,7 +213,10 @@ static int update_status_variables(st_nd
                             ns->connected_port);
   }
   ns->number_of_replicas= 0;
-  ns->number_of_ready_data_nodes= c->get_no_ready();
+  {
+    int n= c->get_no_ready();
+    ns->number_of_ready_data_nodes= n > 0 ?  n : 0;
+  }
   ns->number_of_data_nodes= c->no_db_nodes();
   ns->connect_count= c->get_connect_count();
   return 0;
@@ -2599,7 +2602,7 @@ int ha_ndbcluster::ordered_index_scan(co
   if (lm == NdbOperation::LM_Read)
     options.scan_flags|= NdbScanOperation::SF_KeyInfo;
   if (sorted)
-    options.scan_flags|= NdbScanOperation::SF_OrderBy;
+    options.scan_flags|= NdbScanOperation::SF_OrderByFull;
   if (descending)
     options.scan_flags|= NdbScanOperation::SF_Descending;
   const NdbRecord *key_rec= m_index[active_index].ndb_record_key;
@@ -9276,18 +9279,18 @@ multi_range_row(uchar *p)
 }
 
 /* Get and put upper layer custom char *, use memcpy() for unaligned access. */
-static char *
-multi_range_get_custom(HANDLER_BUFFER *buffer, int range_no)
+static void
+multi_range_get_custom(HANDLER_BUFFER *buffer, int range_no, char **pcustom)
 {
   DBUG_ASSERT(range_no < MRR_MAX_RANGES);
-  return ((char **)(buffer->buffer))[range_no];
+  memcpy(pcustom, (char **)(buffer->buffer) + range_no, sizeof(*pcustom));
 }
 
 static void
 multi_range_put_custom(HANDLER_BUFFER *buffer, int range_no, char *custom)
 {
   DBUG_ASSERT(range_no < MRR_MAX_RANGES);
-  ((char **)(buffer->buffer))[range_no]= custom;
+  memcpy((char **)(buffer->buffer) + range_no, &custom, sizeof(custom));
 }
 
 /*
@@ -9347,12 +9350,10 @@ ha_ndbcluster::multi_range_read_info_con
   KEY* key_info= table->key_info + keyno;
   ulong reclength= table_share->reclength;
   uint entry_size= multi_range_max_entry(key_type, reclength);
-  ulong total_bufsize;
+  ulong total_bufsize= 0;
   uint save_bufsize= *bufsz;
   DBUG_ENTER("ha_ndbcluster::multi_range_read_info_const");
 
-  total_bufsize= multi_range_fixed_size(n_ranges_arg);
-
   seq_it= seq->init(seq_init_param, n_ranges, *flags);
   while (!seq->next(seq_it, &range))
   {
@@ -9383,6 +9384,9 @@ ha_ndbcluster::multi_range_read_info_con
                             reclength);
   }
 
+  /* n_ranges_arg may not be calculated, so we use actual calculated instead */
+  total_bufsize+= multi_range_fixed_size(n_ranges);
+
   if (total_rows != HA_POS_ERROR)
   {
     if (uses_blob_value(table->read_set) ||
@@ -9694,7 +9698,7 @@ int ha_ndbcluster::multi_range_start_ret
         if (lm == NdbOperation::LM_Read)
           options.scan_flags|= NdbScanOperation::SF_KeyInfo;
         if (mrr_is_output_sorted)
-          options.scan_flags|= NdbScanOperation::SF_OrderBy;
+          options.scan_flags|= NdbScanOperation::SF_OrderByFull;
 
         options.parallel=parallelism;
 
@@ -9884,8 +9888,8 @@ int ha_ndbcluster::multi_range_read_next
           m_active_cursor= NULL;
 
           /* Return the record. */
-          *range_info= multi_range_get_custom(multi_range_buffer,
-                                              expected_range_no);
+          multi_range_get_custom(multi_range_buffer,
+                                 expected_range_no, range_info);
           memcpy(table->record[0], multi_range_row(row_buf),
                  table_share->reclength);
           DBUG_RETURN(0);
@@ -9896,8 +9900,8 @@ int ha_ndbcluster::multi_range_read_next
             int res;
             if ((res= read_multi_range_fetch_next()) != 0)
             {
-              *range_info= multi_range_get_custom(multi_range_buffer,
-                                                  expected_range_no);
+              multi_range_get_custom(multi_range_buffer,
+                                     expected_range_no, range_info);
               first_running_range++;
               m_multi_range_result_ptr=
                 multi_range_next_entry(m_multi_range_result_ptr,
@@ -9928,8 +9932,8 @@ int ha_ndbcluster::multi_range_read_next
             */
             if (!mrr_is_output_sorted || expected_range_no == current_range_no)
             {
-              *range_info= multi_range_get_custom(multi_range_buffer,
-                                                  current_range_no);
+              multi_range_get_custom(multi_range_buffer,
+                                     current_range_no, range_info);
               /* Copy out data from the new row. */
               unpack_record(table->record[0], m_next_row);
               /*
@@ -11139,20 +11143,20 @@ int ha_ndbcluster::alter_table_phase1(TH
          goto err;
        }
        /*
-	 If the user has not specified the field format
-	 make it dynamic to enable on-line add attribute
+         If the user has not specified the field format
+         make it dynamic to enable on-line add attribute
        */
        if (field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT &&
            create_info->row_type == ROW_TYPE_DEFAULT &&
            col.getDynamic())
        {
-	 push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+         push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
                              ER_ILLEGAL_HA_CREATE_OPTION,
-		             "Converted FIXED field to DYNAMIC "
-			     "to enable on-line ADD COLUMN",
+                             "Converted FIXED field to DYNAMIC "
+                             "to enable on-line ADD COLUMN",
                              field->field_name);
-	}
-        new_tab->addColumn(col);
+       }
+       new_tab->addColumn(col);
      }
   }
 
@@ -11232,8 +11236,11 @@ int ha_ndbcluster::alter_table_phase2(TH
 
 #ifdef HAVE_NDB_BINLOG
   if (!ndbcluster_has_global_schema_lock(get_thd_ndb(thd)))
-    DBUG_RETURN(ndbcluster_no_global_schema_lock_abort
-                (thd, "ha_ndbcluster::alter_table_phase2"));
+  {
+    error= ndbcluster_no_global_schema_lock_abort
+      (thd, "ha_ndbcluster::alter_table_phase2");
+    goto err;
+  }
 #endif
 
   if ((*alter_flags & dropping).is_set())
@@ -11253,11 +11260,11 @@ int ha_ndbcluster::alter_table_phase2(TH
  err:
   if (error)
   {
-    set_ndb_share_state(m_share, NSS_INITIAL);
     /* ndb_share reference schema free */
     DBUG_PRINT("NDB_SHARE", ("%s binlog schema free  use_count: %u",
                              m_share->key, m_share->use_count));
   }
+  set_ndb_share_state(m_share, NSS_INITIAL);
   free_share(&m_share); // Decrease ref_count
   delete alter_data;
   DBUG_RETURN(error);

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2009-01-31 16:21:19 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2009-02-05 12:49:39 +0000
@@ -37,12 +37,15 @@
 #endif
 
 extern my_bool opt_ndb_log_orig;
+extern my_bool opt_ndb_log_bin;
 
 extern my_bool opt_ndb_log_update_as_write;
 extern my_bool opt_ndb_log_updated_only;
 
 extern my_bool ndbcluster_silent;
 
+extern my_bool ndb_log_binlog_index;
+
 /*
   defines for cluster replication table names
 */
@@ -3068,21 +3071,23 @@ ndb_add_ndb_binlog_index(THD *thd, ndb_b
   */
   do
   {
+    ulonglong epoch= 0, orig_epoch= 0;
+    uint orig_server_id= 0;
     empty_record(ndb_binlog_index);
 
     ndb_binlog_index->field[0]->store(first->master_log_pos);
     ndb_binlog_index->field[1]->store(first->master_log_file,
                                       strlen(first->master_log_file),
                                       &my_charset_bin);
-    ndb_binlog_index->field[2]->store(first->epoch);
+    ndb_binlog_index->field[2]->store(epoch= first->epoch);
     if (ndb_binlog_index->s->fields > 7)
     {
       ndb_binlog_index->field[3]->store(row->n_inserts);
       ndb_binlog_index->field[4]->store(row->n_updates);
       ndb_binlog_index->field[5]->store(row->n_deletes);
       ndb_binlog_index->field[6]->store(row->n_schemaops);
-      ndb_binlog_index->field[7]->store(row->orig_server_id);
-      ndb_binlog_index->field[8]->store(row->orig_epoch);
+      ndb_binlog_index->field[7]->store(orig_server_id= row->orig_server_id);
+      ndb_binlog_index->field[8]->store(orig_epoch= row->orig_epoch);
       ndb_binlog_index->field[9]->store(first->gci);
       row= row->next;
     }
@@ -3103,7 +3108,17 @@ ndb_add_ndb_binlog_index(THD *thd, ndb_b
 
     if ((error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0])))
     {
-      sql_print_error("NDB Binlog: Writing row to ndb_binlog_index: %d", error);
+      char tmp[128];
+      if (ndb_binlog_index->s->fields > 7)
+        my_snprintf(tmp, sizeof(tmp), "%u/%u,%u,%u/%u",
+                    uint(epoch >> 32), uint(epoch),
+                    orig_server_id,
+                    uint(orig_epoch >> 32), uint(orig_epoch));
+
+      else
+        my_snprintf(tmp, sizeof(tmp), "%u/%u", uint(epoch >> 32), uint(epoch));
+      sql_print_error("NDB Binlog: Writing row (%s) to ndb_binlog_index: %d",
+                      tmp, error);
       error= -1;
       goto add_ndb_binlog_index_err;
     }
@@ -4667,7 +4682,6 @@ pthread_handler_t ndb_binlog_thread_func
   Ndb *i_ndb= 0;
   Ndb *s_ndb= 0;
   Thd_ndb *thd_ndb=0;
-  int ndb_update_ndb_binlog_index= 1;
   injector *inj= injector::instance();
   uint incident_id= 0;
 
@@ -4764,7 +4778,7 @@ pthread_handler_t ndb_binlog_thread_func
   injector_ndb= i_ndb;
   schema_ndb= s_ndb;
 
-  if (opt_bin_log)
+  if (opt_bin_log && opt_ndb_log_bin)
   {
     ndb_binlog_running= TRUE;
   }
@@ -5093,6 +5107,44 @@ restart:
     {
       DBUG_PRINT("info", ("pollEvents res: %d", res));
       THD_SET_PROC_INFO(thd, "Processing events");
+      uchar apply_status_buf[512];
+      TABLE *apply_status_table= NULL;
+      if (ndb_apply_status_share)
+      {
+        /*
+          We construct the buffer to write the apply status binlog
+          event here, as the table->record[0] buffer is referenced
+          by the apply status event operation, and will be filled
+          with data at the nextEvent call if the first event should
+          happen to be from the apply status table
+        */
+        Ndb_event_data *event_data= ndb_apply_status_share->event_data;
+        if (!event_data)
+        {
+          DBUG_ASSERT(ndb_apply_status_share->op);
+          event_data= 
+            (Ndb_event_data *) ndb_apply_status_share->op->getCustomData();
+          DBUG_ASSERT(event_data);
+        }
+        apply_status_table= event_data->table;
+
+        /* 
+           Intialize apply_status_table->record[0] 
+        */
+        empty_record(apply_status_table);
+
+        apply_status_table->field[0]->store((longlong)::server_id);
+        /*
+          gci is added later, just before writing to binlog as gci
+          is unknown here
+        */
+        apply_status_table->field[2]->store("", 0, &my_charset_bin);
+        apply_status_table->field[3]->store((longlong)0);
+        apply_status_table->field[4]->store((longlong)0);
+        DBUG_ASSERT(sizeof(apply_status_buf) >= apply_status_table->s->reclength);
+        memcpy(apply_status_buf, apply_status_table->record[0],
+               apply_status_table->s->reclength);
+      }
       NdbEventOperation *pOp= i_ndb->nextEvent();
       ndb_binlog_index_row _row;
       while (pOp != NULL)
@@ -5197,44 +5249,30 @@ restart:
         }
         if (trans.good())
         {
-          if (ndb_apply_status_share)
+          if (apply_status_table)
           {
-            Ndb_event_data *event_data= 0;
-            if (ndb_apply_status_share->event_data)
-            {
-              event_data= ndb_apply_status_share->event_data;
-            }
-            else if (ndb_apply_status_share->op)
-            {
-              event_data= 
-                (Ndb_event_data *) ndb_apply_status_share->op->getCustomData();
-            }
-            DBUG_ASSERT(event_data);
-            TABLE *table= event_data->table;
-
 #ifndef DBUG_OFF
-            const LEX_STRING& name= table->s->table_name;
+            const LEX_STRING& name= apply_status_table->s->table_name;
             DBUG_PRINT("info", ("use_table: %.*s",
                                 (int) name.length, name.str));
 #endif
-            injector::transaction::table tbl(table, TRUE);
+            injector::transaction::table tbl(apply_status_table, TRUE);
             IF_DBUG(int ret=) trans.use_table(::server_id, tbl);
             DBUG_ASSERT(ret == 0);
 
-	    /* 
-	       Intialize table->record[0] 
-	    */
-	    empty_record(table);
-
-            table->field[0]->store((longlong)::server_id);
-            table->field[1]->store((longlong)gci);
-            table->field[2]->store("", 0, &my_charset_bin);
-            table->field[3]->store((longlong)0);
-            table->field[4]->store((longlong)0);
+            /* add the gci to the record */
+            Field *field= apply_status_table->field[1];
+            my_ptrdiff_t row_offset=
+              (my_ptrdiff_t) (apply_status_buf - apply_status_table->record[0]);
+            field->move_field_offset(row_offset);
+            field->store((longlong)gci);
+            field->move_field_offset(-row_offset);
+
             trans.write_row(::server_id,
-                            injector::transaction::table(table, TRUE),
-                            &table->s->all_set, table->s->fields,
-                            table->record[0]);
+                            injector::transaction::table(apply_status_table, TRUE),
+                            &apply_status_table->s->all_set,
+                            apply_status_table->s->fields,
+                            apply_status_buf);
           }
           else
           {
@@ -5360,7 +5398,7 @@ restart:
           rows->master_log_pos= start.file_pos();
 
           DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
-          if (ndb_update_ndb_binlog_index)
+          if (ndb_log_binlog_index)
           {
             ndb_add_ndb_binlog_index(thd, rows);
           }

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2009-01-29 21:17:59 +0000
+++ b/sql/mysqld.cc	2009-02-04 12:34:03 +0000
@@ -488,9 +488,11 @@ ulong ndb_extra_logging;
 #ifdef HAVE_NDB_BINLOG
 ulong ndb_report_thresh_binlog_epoch_slip;
 ulong ndb_report_thresh_binlog_mem_usage;
+my_bool ndb_log_binlog_index;
 my_bool opt_ndb_log_update_as_write;
 my_bool opt_ndb_log_updated_only;
 my_bool opt_ndb_log_orig;
+my_bool opt_ndb_log_bin;
 #endif
 
 extern const char *ndb_distribution_names[];
@@ -5793,7 +5795,7 @@ enum options_mysqld
   OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
   OPT_NDB_USE_COPYING_ALTER_TABLE,
   OPT_NDB_LOG_UPDATE_AS_WRITE, OPT_NDB_LOG_UPDATED_ONLY,
-  OPT_NDB_LOG_ORIG,
+  OPT_NDB_LOG_ORIG, OPT_NDB_LOG_BIN, OPT_NDB_LOG_BINLOG_INDEX,
   OPT_SKIP_SAFEMALLOC, OPT_MUTEX_DEADLOCK_DETECTOR,
   OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
   OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -6418,6 +6420,16 @@ thread is in the master's binlogs.",
    (uchar**) &opt_ndb_log_orig,
    (uchar**) &opt_ndb_log_orig,
    0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
+  {"ndb-log-bin", OPT_NDB_LOG_BIN,
+   "Log ndb tables in the binary log. Option only has meaning if "
+   "the binary log has been turned on for the server.",
+   (uchar**) &opt_ndb_log_bin, (uchar**) &opt_ndb_log_bin,
+   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+  {"ndb-log-binlog-index", OPT_NDB_LOG_BINLOG_INDEX,
+   "Insert mapping between epochs and binlog positions into the "
+   "ndb_binlog_index table.",
+   (uchar**) &ndb_log_binlog_index, (uchar**) &ndb_log_binlog_index,
+   0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
 #endif
   {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT,
    "Use exact records count during query planning and for fast "

=== modified file 'sql/set_var.cc'
--- a/sql/set_var.cc	2009-02-04 10:49:16 +0000
+++ b/sql/set_var.cc	2009-02-05 12:49:39 +0000
@@ -73,6 +73,7 @@ extern ulong ndb_extra_logging;
 #ifdef HAVE_NDB_BINLOG
 extern ulong ndb_report_thresh_binlog_epoch_slip;
 extern ulong ndb_report_thresh_binlog_mem_usage;
+extern my_bool ndb_log_binlog_index;
 #endif
 
 extern CHARSET_INFO *character_set_filesystem;
@@ -710,6 +711,8 @@ sys_ndb_report_thresh_binlog_epoch_slip(
 static sys_var_long_ptr
 sys_ndb_report_thresh_binlog_mem_usage(&vars, "ndb_report_thresh_binlog_mem_usage",
                                        &ndb_report_thresh_binlog_mem_usage);
+static sys_var_bool_ptr
+sys_ndb_log_binlog_index(&vars, "ndb_log_binlog_index", &ndb_log_binlog_index);
 #endif
 static sys_var_thd_bool
 sys_ndb_use_exact_count(&vars, "ndb_use_exact_count", &SV::ndb_use_exact_count);

=== modified file 'sql/slave.cc'
--- a/sql/slave.cc	2009-01-26 16:03:39 +0000
+++ b/sql/slave.cc	2009-02-04 12:35:46 +0000
@@ -2782,11 +2782,15 @@ Slave SQL thread aborted. Can't execute 
 
   /* Read queries from the IO/THREAD until this thread is killed */
 
+  thd->variables.new_mode= global_system_variables.new_mode;
+
   while (!sql_slave_killed(thd,rli))
   {
     thd_proc_info(thd, "Reading event from the relay log");
     DBUG_ASSERT(rli->sql_thd == thd);
     THD_CHECK_SENTRY(thd);
+
+    sql_print_information("new_mode %u", thd->variables.new_mode);
     if (exec_relay_log_event(thd,rli))
     {
       DBUG_PRINT("info", ("exec_relay_log_event() failed"));

=== modified file 'sql/sql_partition.cc'
--- a/sql/sql_partition.cc	2009-01-09 13:25:38 +0000
+++ b/sql/sql_partition.cc	2009-02-04 13:08:05 +0000
@@ -4202,43 +4202,25 @@ uint prep_alter_part_table(THD *thd, TAB
     }
     if (alter_info->flags & ALTER_TABLE_REORG)
     {
-      uint new_part_no, curr_part_no;
+      DBUG_ASSERT(table->s->db_type()->partition_flags);
+      /* 'ALTER TABLE t REORG PARTITION' only allowed with auto partition */
       if (tab_part_info->part_type != HASH_PARTITION ||
-          tab_part_info->use_default_no_partitions)
+          !tab_part_info->use_default_no_partitions ||
+          (table->s->db_type()->partition_flags &&
+           !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)))
       {
         my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
         DBUG_RETURN(TRUE);
       }
-      new_part_no= table->file->get_default_no_partitions(create_info);
-      curr_part_no= tab_part_info->no_parts;
-      if (new_part_no == curr_part_no)
-      {
-        /*
-          No change is needed, we will have the same number of partitions
-          after the change as before. Thus we can reply ok immediately
-          without any changes at all.
-        */
-        *fast_alter_partition= TRUE;
-        DBUG_RETURN(FALSE);
-      }
-      else if (new_part_no > curr_part_no)
-      {
-        /*
-          We will add more partitions, we use the ADD PARTITION without
-          setting the flag for no default number of partitions
-        */
-        alter_info->flags|= ALTER_ADD_PARTITION;
-        thd->work_part_info->no_parts= new_part_no - curr_part_no;
-      }
-      else
-      {
-        /*
-          We will remove hash partitions, we use the COALESCE PARTITION
-          without setting the flag for no default number of partitions
-        */
-        alter_info->flags|= ALTER_COALESCE_PARTITION;
-        alter_info->no_parts= curr_part_no - new_part_no;
-      }
+      DBUG_ASSERT(!alt_part_info ||
+                  alt_part_info->part_type == NOT_A_PARTITION);
+      /*
+        This is really a table operation, handled by native engines.
+        NDB can handle this fast/online. Skip the partitioning path.
+      */
+      if (alt_part_info)
+        thd->work_part_info= NULL;
+      DBUG_RETURN(FALSE);
     }
     if (table->s->db_type()->alter_partition_flags &&
         (!(flags= table->s->db_type()->alter_partition_flags())))

=== modified file 'storage/csv/ha_tina.cc'
--- a/storage/csv/ha_tina.cc	2009-01-31 16:21:19 +0000
+++ b/storage/csv/ha_tina.cc	2009-02-05 12:49:39 +0000
@@ -1686,10 +1686,10 @@ int ha_tina::check(THD* thd, HA_CHECK_OP
 bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info,
 					   uint table_changes)
 {
-  if (table_changes == IS_EQUAL_NO)  
+  if (table_changes == IS_EQUAL_NO)
     return COMPATIBLE_DATA_NO;
   else
-    return COMPATIBLE_DATA_YES;    
+    return COMPATIBLE_DATA_YES;
 }
 
 struct st_mysql_storage_engine csv_storage_engine=

=== modified file 'storage/ndb/include/mgmapi/mgmapi.h'
--- a/storage/ndb/include/mgmapi/mgmapi.h	2008-11-20 16:41:06 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi.h	2008-12-18 09:16:45 +0000
@@ -1191,23 +1191,23 @@ extern "C" {
      NDB_MGM_CLUSTERLOG_ALERT = 6,
      NDB_MGM_CLUSTERLOG_ALL = 7
   };
-  inline
+  static inline
   int ndb_mgm_filter_clusterlog(NdbMgmHandle h,
 				enum ndb_mgm_clusterlog_level s,
 				int e, struct ndb_mgm_reply* r)
   { return ndb_mgm_set_clusterlog_severity_filter(h,(enum ndb_mgm_event_severity)s,
 						  e,r); }
-  inline
+  static inline
   const unsigned int * ndb_mgm_get_logfilter(NdbMgmHandle h)
   { return ndb_mgm_get_clusterlog_severity_filter_old(h); }
 
-  inline
+  static inline
   int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle h, int n,
 				      enum ndb_mgm_event_category c,
 				      int l, struct ndb_mgm_reply* r)
   { return ndb_mgm_set_clusterlog_loglevel(h,n,c,l,r); }
 
-  inline
+  static inline
   const unsigned int * ndb_mgm_get_loglevel_clusterlog(NdbMgmHandle h)
   { return ndb_mgm_get_clusterlog_loglevel_old(h); }
 

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2008-05-29 13:09:49 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2008-12-18 09:16:45 +0000
@@ -191,9 +191,9 @@
 #define CFG_SCI_SEND_LIMIT            554
 #define CFG_SCI_BUFFER_MEM            555
 
-#define CFG_602                       602 // Removed: was OSE
-#define CFG_603                       603 // Removed: was OSE
-#define CFG_604                       604 // Removed: was OSE
+#define CFG_602                       602 /* Removed: was OSE */
+#define CFG_603                       603 /* Removed: was OSE */
+#define CFG_604                       604 /* Removed: was OSE */
 
 /**
  * API Config variables
@@ -220,6 +220,6 @@
 #define CONNECTION_TYPE_TCP           0
 #define CONNECTION_TYPE_SHM           1
 #define CONNECTION_TYPE_SCI           2
-#define CONNECTION_TYPE_OSE           3 // Removed.
+#define CONNECTION_TYPE_OSE           3 /* Removed. */
 
 #endif

=== modified file 'storage/ndb/include/mgmapi/mgmapi_error.h'
--- a/storage/ndb/include/mgmapi/mgmapi_error.h	2008-03-28 08:03:06 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_error.h	2008-12-18 09:16:45 +0000
@@ -81,40 +81,8 @@ extern "C" {
     enum ndb_mgm_error  code;
     const char *        msg;
   };
-  const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
-    { NDB_MGM_NO_ERROR, "No error" },
-
-    /* Request for service errors */
-    { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
-    { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
-    { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
-    { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
-    { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
-    { NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
-    { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
-    { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
-
-    /* Service errors - Start/Stop Node or System */
-    { NDB_MGM_START_FAILED, "Start failed" },
-    { NDB_MGM_STOP_FAILED, "Stop failed" },
-    { NDB_MGM_RESTART_FAILED, "Restart failed" },
-
-    /* Service errors - Backup */
-    { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
-    { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
-
-    /* Service errors - Single User Mode */
-    { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
-      "Could not enter single user mode" },
-    { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
-      "Could not exit single user mode" },
-
-    /* Usage errors */
-    { NDB_MGM_USAGE_ERROR,
-      "Usage error" }
-  };
-  const int ndb_mgm_noOfErrorMsgs =
-  sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
+  extern const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[];
+  extern const int ndb_mgm_noOfErrorMsgs;
 #endif
 
 #ifdef __cplusplus

=== modified file 'storage/ndb/include/mgmapi/ndb_logevent.h'
--- a/storage/ndb/include/mgmapi/ndb_logevent.h	2008-02-11 14:07:49 +0000
+++ b/storage/ndb/include/mgmapi/ndb_logevent.h	2008-12-18 09:16:45 +0000
@@ -393,6 +393,7 @@ extern "C" {
       } NDBStartCompleted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } STTORRYRecieved;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -437,6 +438,7 @@ extern "C" {
       } NDBStopForced;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } NDBStopAborted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -470,9 +472,11 @@ extern "C" {
       /* NODERESTART */
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } NR_CopyDict;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } NR_CopyDistr;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -518,12 +522,15 @@ extern "C" {
       } ArbitResult;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } GCP_TakeoverStarted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } GCP_TakeoverCompleted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
+        unsigned _todo;
       } LCP_TakeoverStarted;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
@@ -604,6 +611,7 @@ extern "C" {
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
 	/* TODO */
+        unsigned _todo;
       } WarningEvent;
 
       /* INFO */
@@ -618,6 +626,7 @@ extern "C" {
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {
 	/* TODO */
+        unsigned _todo;
       } InfoEvent;
       /** Log event specific data for for corresponding NDB_LE_ log event */
       struct {

=== modified file 'storage/ndb/include/ndbapi/NdbScanOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbScanOperation.hpp	2008-11-08 20:40:15 +0000
+++ b/storage/ndb/include/ndbapi/NdbScanOperation.hpp	2009-02-09 13:28:30 +0000
@@ -54,6 +54,12 @@ public:
       each fragment, to get a single sorted result set.
     */
     SF_OrderBy = (1 << 24),
+    /**
+     * Same as order by, except that it will automatically 
+     *   add all key columns into the read-mask
+     */
+    SF_OrderByFull = (16 << 24),
+
     /* Index scan in descending order, instead of default ascending. */
     SF_Descending = (2 << 24),
     /*

=== modified file 'storage/ndb/include/util/Bitmask.hpp'
--- a/storage/ndb/include/util/Bitmask.hpp	2008-01-23 09:34:09 +0000
+++ b/storage/ndb/include/util/Bitmask.hpp	2008-12-18 08:41:41 +0000
@@ -159,9 +159,27 @@ public:
 		       unsigned pos, unsigned len, const Uint32 src[]);
   
   /**
+   * copyField - Copy bitfield from one position and length
+   * to another position and length.
+   * Undefined for overlapping bitfields
+   */
+  static void copyField(Uint32 dst[], unsigned destPos,
+                        const Uint32 src[], unsigned srcPos, unsigned len);
+  
+  /**
    * getText - Return as hex-digits (only for debug routines).
    */
   static char* getText(unsigned size, const Uint32 data[], char* buf);
+
+  /**
+   * Parse string with numbers format
+   *   1,2,3-5
+   * @return -1 if unparsable chars found, 
+   *         -2 str has number > bitmask size
+   *            else returns number of bits set 
+   */
+  static int parseMask(unsigned size, Uint32 data[], const char * str);
+
 private:
   static void getFieldImpl(const Uint32 data[], unsigned, unsigned, Uint32 []);
   static void setFieldImpl(Uint32 data[], unsigned, unsigned, const Uint32 []);
@@ -593,6 +611,9 @@ public:
    */
   static char* getText(const Uint32 data[], char* buf);
   char* getText(char* buf) const;
+
+  static int parseMask(Uint32 data[], const char * src);
+  int parseMask(const char * src);
 };
 
 template <unsigned size>
@@ -909,6 +930,21 @@ BitmaskPOD<size>::overlaps(BitmaskPOD<si
 }
 
 template <unsigned size>
+int
+BitmaskPOD<size>::parseMask(Uint32 data[], const char* buf)
+{
+  return BitmaskImpl::parseMask(size, data, buf);
+}
+
+template <unsigned size>
+inline
+int
+BitmaskPOD<size>::parseMask(const char* buf)
+{
+  return BitmaskPOD<size>::parseMask(rep.data, buf);
+}
+
+template <unsigned size>
 class Bitmask : public BitmaskPOD<size> {
 public:
   Bitmask() { this->clear();}
@@ -979,4 +1015,58 @@ BitmaskImpl::setField(unsigned size, Uin
   setFieldImpl(dst+1, used & 31, len-used, src+(used >> 5));
 }
 
+/* Three way min utiltiy for copyField below */
+inline unsigned minLength(unsigned a, unsigned b, unsigned c)
+{
+  return (a < b ? 
+          (a < c ? a : c) : 
+          (b < c ? b : c ));
+}
+
+inline void
+BitmaskImpl::copyField(Uint32 _dst[], unsigned dstPos,
+                       const Uint32 _src[], unsigned srcPos, unsigned len)
+{
+  /* Algorithm
+   * While (len > 0)
+   *  - Find the longest bit length we can copy from one 32-bit word
+   *    to another (which is the miniumum of remaining length, 
+   *    space in current src word and space in current dest word)
+   *  - Extract that many bits from src, and shift them to the correct
+   *    position to insert into dest
+   *  - Mask out the to-be-written words from dest (and any irrelevant 
+   *    words in src) and or them together
+   *  - Move onto next chunk
+   */
+  while (len > 0)
+  {
+    const Uint32* src= _src + (srcPos >> 5);
+    Uint32* dst= _dst + (dstPos >> 5);
+    unsigned srcOffset= srcPos & 31;
+    unsigned dstOffset= dstPos & 31;
+    unsigned srcBitsInWord= 32 - srcOffset; 
+    unsigned dstBitsInWord= 32 - dstOffset;
+    
+    /* How many bits can we copy at once? */
+    unsigned bits= minLength(dstBitsInWord, srcBitsInWord, len);
+    
+    /* Create mask for affected bits in dest */
+    Uint32 destMask= (~(Uint32)0 >> (32-bits) << dstOffset);
+    
+    /* Grab source data and shift to dest offset */
+    Uint32 data= ((*src) >> srcOffset) << dstOffset;
+    
+    /* Mask out affected bits in dest and irrelevant bits in source
+     * and combine
+     */
+    *dst= (*dst  & ~destMask) | (data & destMask);
+    
+    srcPos+= bits;
+    dstPos+= bits;
+    len-= bits;
+  }
+  
+  return;
+}
+
 #endif

=== modified file 'storage/ndb/src/common/portlib/NdbThread.c'
--- a/storage/ndb/src/common/portlib/NdbThread.c	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/common/portlib/NdbThread.c	2008-12-12 11:05:58 +0000
@@ -39,11 +39,10 @@ struct NdbThread 
 #ifdef NDB_SHM_TRANSPORTER
 void NdbThread_set_shm_sigmask(my_bool block)
 {
-  DBUG_ENTER("NdbThread_set_shm_sigmask");
   if (g_ndb_shm_signum)
   {
     sigset_t mask;
-    DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
+    // DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
     sigemptyset(&mask);
     sigaddset(&mask, g_ndb_shm_signum);
     if (block)
@@ -51,7 +50,7 @@ void NdbThread_set_shm_sigmask(my_bool b
     else
       pthread_sigmask(SIG_UNBLOCK, &mask, 0);
   }
-  DBUG_VOID_RETURN;
+  return;
 }
 #endif
 

=== modified file 'storage/ndb/src/common/util/Bitmask.cpp'
--- a/storage/ndb/src/common/util/Bitmask.cpp	2008-01-23 14:04:43 +0000
+++ b/storage/ndb/src/common/util/Bitmask.cpp	2008-12-18 08:41:41 +0000
@@ -115,3 +115,62 @@ BitmaskImpl::setFieldImpl(Uint32 dst[],
  * storage/ndb/test/ndbapi/testBitfield.cpp
  * to get coverage from automated testing
  */
+
+int
+BitmaskImpl::parseMask(unsigned size, Uint32 data[], const char * src)
+{
+  int cnt = 0;
+  BaseString tmp(src);
+  Vector<BaseString> list;
+  tmp.split(list, ",");
+  for (unsigned i = 0; i<list.size(); i++)
+  {
+    list[i].trim();
+    if (list[i].empty())
+      continue;
+    unsigned num = 0;
+    char * delim = (char*)strchr(list[i].c_str(), '-');
+    unsigned first = 0;
+    unsigned last = 0;
+    if (delim == 0)
+    {
+      int res = sscanf(list[i].c_str(), "%u", &first);
+      if (res != 1)
+      {
+        return -1;
+      }
+      last = first;
+    }
+    else
+    {
+      * delim = 0;
+      delim++;
+      int res0 = sscanf(list[i].c_str(), "%u", &first);
+      if (res0 != 1)
+      {
+        return -1;
+      }
+      int res1 = sscanf(delim, "%u", &last);
+      if (res1 != 1)
+      {
+        return -1;
+      }
+      if (first > last)
+      {
+        unsigned tmp = first;
+        first = last;
+        last = tmp;
+      }
+    }
+    
+    for (unsigned j = first; j<(last+1); j++)
+    {
+      if (j >= (size << 5))
+        return -2;
+
+      cnt++;
+      BitmaskImpl::set(size, data, j);
+    }
+  }
+  return cnt;
+}

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2009-01-29 10:56:52 +0000
@@ -1,4 +1,4 @@
-Next QMGR 937
+Next QMGR 938
 Next NDBCNTR 1002
 Next NDBFS 2000
 Next DBACC 3002

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2008-09-30 06:55:35 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2009-01-27 14:32:31 +0000
@@ -1076,20 +1076,24 @@ Backup::execBACKUP_REQ(Signal* signal)
   const Uint32 dataLen32 = req->backupDataLen; // In 32 bit words
   const Uint32 flags = signal->getLength() > 2 ? req->flags : 2;
 
-  if(getOwnNodeId() != getMasterNodeId()) {
+  if (getOwnNodeId() != getMasterNodeId())
+  {
     jam();
-    sendBackupRef(senderRef, flags, signal, senderData, BackupRef::IAmNotMaster);
+    sendBackupRef(senderRef, flags, signal, senderData,
+                  BackupRef::IAmNotMaster);
     return;
   }//if
 
   if (c_defaults.m_diskless)
   {
+    jam();
     sendBackupRef(senderRef, flags, signal, senderData, 
 		  BackupRef::CannotBackupDiskless);
     return;
   }
   
-  if(dataLen32 != 0) {
+  if (dataLen32 != 0)
+  {
     jam();
     sendBackupRef(senderRef, flags, signal, senderData, 
 		  BackupRef::BackupDefinitionNotImplemented);
@@ -1104,9 +1108,11 @@ Backup::execBACKUP_REQ(Signal* signal)
    */
   BackupRecordPtr ptr;
   c_backups.seize(ptr);
-  if(ptr.i == RNIL) {
+  if (ptr.i == RNIL)
+  {
     jam();
-    sendBackupRef(senderRef, flags, signal, senderData, BackupRef::OutOfBackupRecord);
+    sendBackupRef(senderRef, flags, signal, senderData,
+                  BackupRef::OutOfBackupRecord);
     return;
   }//if
 
@@ -1125,34 +1131,71 @@ Backup::execBACKUP_REQ(Signal* signal)
   ptr.p->backupDataLen = 0;
   ptr.p->masterData.errorCode = 0;
 
+  ptr.p->masterData.sequence.retriesLeft = 3;
+  sendUtilSequenceReq(signal, ptr);
+}
+
+void
+Backup::sendUtilSequenceReq(Signal* signal, BackupRecordPtr ptr, Uint32 delay)
+{
+  jam();
+
   UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
-    
   ptr.p->masterData.gsn = GSN_UTIL_SEQUENCE_REQ;
   utilReq->senderData  = ptr.i;
   utilReq->sequenceId  = NDB_BACKUP_SEQUENCE;
   utilReq->requestType = UtilSequenceReq::NextVal;
-  sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ, 
-	     signal, UtilSequenceReq::SignalLength, JBB);
+
+  if (delay == 0)
+  {
+    jam();
+    sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+               signal, UtilSequenceReq::SignalLength, JBB);
+  }
+  else
+  {
+    jam();
+    sendSignalWithDelay(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+                        signal, delay, UtilSequenceReq::SignalLength);
+  }
 }
 
 void
 Backup::execUTIL_SEQUENCE_REF(Signal* signal)
 {
-  BackupRecordPtr ptr LINT_SET_PTR;
   jamEntry();
+  BackupRecordPtr ptr LINT_SET_PTR;
   UtilSequenceRef * utilRef = (UtilSequenceRef*)signal->getDataPtr();
   ptr.i = utilRef->senderData;
   c_backupPool.getPtr(ptr);
   ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ);
+
+  if (utilRef->errorCode == UtilSequenceRef::TCError)
+  {
+    jam();
+    if (ptr.p->masterData.sequence.retriesLeft > 0)
+    {
+      jam();
+      infoEvent("BACKUP: retrying sequence on error %u",
+                utilRef->TCErrorCode);
+      ptr.p->masterData.sequence.retriesLeft--;
+      sendUtilSequenceReq(signal, ptr, 300);
+      return;
+    }
+  }
+  warningEvent("BACKUP: aborting due to sequence error (%u, %u)",
+               utilRef->errorCode,
+               utilRef->TCErrorCode);
+
   sendBackupRef(signal, ptr, BackupRef::SequenceFailure);
 }//execUTIL_SEQUENCE_REF()
 
-
 void
 Backup::sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode)
 {
   jam();
-  sendBackupRef(ptr.p->clientRef, ptr.p->flags, signal, ptr.p->clientData, errorCode);
+  sendBackupRef(ptr.p->clientRef, ptr.p->flags, signal,
+                ptr.p->clientData, errorCode);
   cleanup(signal, ptr);
 }
 
@@ -1163,6 +1206,7 @@ Backup::sendBackupRef(BlockReference sen
   jam();
   if (SEND_BACKUP_STARTED_FLAG(flags))
   {
+    jam();
     BackupRef* ref = (BackupRef*)signal->getDataPtrSend();
     ref->senderData = senderData;
     ref->errorCode = errorCode;
@@ -1170,7 +1214,9 @@ Backup::sendBackupRef(BlockReference sen
     sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB);
   }
 
-  if(errorCode != BackupRef::IAmNotMaster){
+  if (errorCode != BackupRef::IAmNotMaster)
+  {
+    jam();
     signal->theData[0] = NDB_LE_BackupFailedToStart;
     signal->theData[1] = senderRef;
     signal->theData[2] = errorCode;
@@ -2320,7 +2366,7 @@ Backup::stopBackupReply(Signal* signal, 
 
   sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupComplete);
   
-  if(!ptr.p->checkError())
+  if(!ptr.p->checkError() &&  ptr.p->masterData.errorCode == 0)
   {
     if (SEND_BACKUP_COMPLETED_FLAG(ptr.p->flags))
     {
@@ -4908,6 +4954,7 @@ Backup::execABORT_BACKUP_ORD(Signal* sig
   default:
 #endif
     ptr.p->setErrorCode(requestType);
+    ptr.p->masterData.errorCode = requestType;
     ok= true;
   }
   ndbrequire(ok);

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.hpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp	2008-04-07 10:15:36 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp	2009-01-27 16:11:49 +0000
@@ -501,6 +501,9 @@ public:
       SignalCounter sendCounter;
       Uint32 errorCode;
       union {
+        struct {
+          Uint32 retriesLeft;
+        } sequence;
 	struct {
 	  Uint32 startBackup;
 	} waitGCP;
@@ -667,6 +670,8 @@ public:
   void abort_scan(Signal*, BackupRecordPtr ptr);
   void removeBackup(Signal*, BackupRecordPtr ptr);
 
+  void sendUtilSequenceReq(Signal*, BackupRecordPtr ptr, Uint32 delay = 0);
+
   /*
     For periodic backup status reporting and explicit backup status reporting
   */

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2009-02-04 12:35:22 +0000
@@ -5328,6 +5328,12 @@ void Dbdih::startGcpMasterTakeOver(Signa
   signal->theData[0] = NDB_LE_GCP_TakeoverStarted;
   sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
 
+  /**
+   * save own value...
+   *   to be able to check values returned in MASTER_GCPCONF
+   */
+  m_gcp_save.m_master.m_new_gci = m_gcp_save.m_gci;
+
   setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER);
 }//Dbdih::handleNewMaster()
 
@@ -5628,6 +5634,17 @@ void Dbdih::execMASTER_GCPCONF(Signal* s
   ndbassert(ok); // Unhandled case...
 
   ok = false;
+  /**
+   * GCI should differ with atmost one
+   */
+  ndbrequire(saveGCI == m_gcp_save.m_gci ||
+             saveGCI == m_gcp_save.m_gci + 1 ||
+             saveGCI + 1 == m_gcp_save.m_gci);
+  if (saveGCI > m_gcp_save.m_master.m_new_gci)
+  {
+    jam();
+    m_gcp_save.m_master.m_new_gci = saveGCI;
+  }
   switch(saveState){
   case MasterGCPConf::GCP_SAVE_IDLE:
     jam();
@@ -5742,7 +5759,6 @@ void Dbdih::MASTER_GCPhandling(Signal* s
   else
   {
     ok = false;
-    m_gcp_save.m_master.m_new_gci = m_gcp_save.m_gci;
     switch(m_gcp_save.m_master.m_state){
     case GcpSave::GCP_SAVE_IDLE:
       jam();
@@ -8510,6 +8526,21 @@ void Dbdih::execGCP_NODEFINISH(Signal* s
     sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1);
     return;
   }
+  else if (ERROR_INSERTED(7216))
+  {
+    infoEvent("GCP_SAVE all/%u", c_error_insert_extra);
+    NodeRecordPtr nodePtr;
+    nodePtr.i = c_error_insert_extra;
+    ptrAss(nodePtr, nodeRecord);
+
+    removeAlive(nodePtr);
+    sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
+    insertAlive(nodePtr);
+    signal->theData[0] = 9999;
+    sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 1000, 1);
+    c_GCP_SAVEREQ_Counter.setWaitingFor(c_error_insert_extra);
+    return;
+  }
 #endif
   
   sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
@@ -15489,8 +15520,21 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal
   }//if
   if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
     // Set time between LCP to min value
-    g_eventLogger->info("Set time between LCP to min value");
-    c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
+    if (signal->getLength() == 2)
+    {
+      Uint32 tmp;
+      const ndb_mgm_configuration_iterator * p = 
+	m_ctx.m_config.getOwnConfigIterator();
+      ndbrequire(p != 0);
+      ndb_mgm_get_int_parameter(p, CFG_DB_LCP_INTERVAL, &tmp);
+      g_eventLogger->info("Reset time between LCP to %u", tmp);
+      c_lcpState.clcpDelay = tmp;
+    }
+    else
+    {
+      g_eventLogger->info("Set time between LCP to min value");
+      c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
+    }
     return;
   }
   if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
@@ -15605,6 +15649,12 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal
     SET_ERROR_INSERT_VALUE2(7214, signal->theData[1]);
     return;
   }
+
+  DECLARE_DUMP0(DBDIH, 7216, "Set error 7216 with extra arg")
+  {
+    SET_ERROR_INSERT_VALUE2(7216, signal->theData[1]);
+    return;
+  }
 }//Dbdih::execDUMP_STATE_ORD()
 
 void

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2009-02-02 21:21:34 +0000
@@ -704,10 +704,12 @@ public:
      *       fragment operations on the fragment. 
      *       A maximum of four concurrently active is allowed.
      */
-    typedef Bitmask<4> ScanNumberMask;
+
+    typedef Bitmask<8> ScanNumberMask; // Max 255 KeyInfo20::ScanNo
     ScanNumberMask m_scanNumberMask;
     DLList<ScanRecord>::Head m_activeScans;
     DLFifoList<ScanRecord>::Head m_queuedScans;
+    DLFifoList<ScanRecord>::Head m_queuedTupScans;
 
     Uint16 srLqhLognode[4];
     /**

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2009-02-02 21:21:34 +0000
@@ -9832,6 +9832,7 @@ Uint32 Dblqh::initScanrec(const ScanFrag
   Uint32 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
   const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo);
   const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo);
+  const Uint32 accScan = (rangeScan == 0) && (tupScan == 0);
 
   scanptr.p->scanKeyinfoFlag = keyinfo;
   scanptr.p->scanLockHold = scanLockHold;
@@ -9847,12 +9848,7 @@ Uint32 Dblqh::initScanrec(const ScanFrag
   scanptr.p->m_max_batch_size_rows = max_rows;
   scanptr.p->m_max_batch_size_bytes = max_bytes;
 
-#if 0
-  if (! rangeScan)
-    tupScan = 1;
-#endif
-
-  if (! rangeScan && ! tupScan)
+  if (accScan)
     scanptr.p->scanBlockref = tcConnectptr.p->tcAccBlockref;
   else if (! tupScan)
     scanptr.p->scanBlockref = tcConnectptr.p->tcTuxBlockref;
@@ -9895,13 +9891,27 @@ Uint32 Dblqh::initScanrec(const ScanFrag
    * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1)  =  1-11
    *  idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
    */
-  tupScan = 0; // Make sure that close tup scan does not start acc scan incorrectly
-  Uint32 start = (rangeScan || tupScan) ? MAX_PARALLEL_SCANS_PER_FRAG : 1 ;
-  Uint32 stop = (rangeScan || tupScan) ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : 
-    MAX_PARALLEL_SCANS_PER_FRAG - 1;
-  stop += start;
+  Uint32 start, stop;
+  if (accScan)
+  {
+    start = 1;
+    stop = MAX_PARALLEL_SCANS_PER_FRAG - 1;
+  }
+  else if (rangeScan)
+  {
+    start = MAX_PARALLEL_SCANS_PER_FRAG;
+    stop = start + MAX_PARALLEL_INDEX_SCANS_PER_FRAG - 1;
+  }
+  else
+  {
+    ndbassert(tupScan);
+    start = MAX_PARALLEL_SCANS_PER_FRAG + MAX_PARALLEL_INDEX_SCANS_PER_FRAG;
+    stop = start + MAX_PARALLEL_INDEX_SCANS_PER_FRAG - 1;
+  }
+  ndbrequire((start < 32 * tFragPtr.p->m_scanNumberMask.Size) &&
+             (stop < 32 * tFragPtr.p->m_scanNumberMask.Size));
   Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
-    
+  
   if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
     jam();
     
@@ -9915,7 +9925,9 @@ Uint32 Dblqh::initScanrec(const ScanFrag
      */
     scanptr.p->scanState = ScanRecord::IN_QUEUE;
     LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
-				      fragptr.p->m_queuedScans);
+				      tupScan == 0 ? 
+                                      fragptr.p->m_queuedScans :
+                                      fragptr.p->m_queuedTupScans);
     queue.add(scanptr);
     return ZOK;
   }
@@ -9993,8 +10005,11 @@ void Dblqh::finishScanrec(Signal* signal
 {
   release_acc_ptr_list(scanptr.p);
 
+  Uint32 tupScan = scanptr.p->tupScan;
   LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
-				    fragptr.p->m_queuedScans);
+                                    tupScan == 0 ? 
+                                    fragptr.p->m_queuedScans :
+                                    fragptr.p->m_queuedTupScans);
   
   if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
     jam();

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2008-12-24 10:48:24 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2009-02-02 15:58:48 +0000
@@ -4064,13 +4064,13 @@ void Dbtc::sendtckeyconf(Signal* signal,
     tcKeyConf->apiConnectPtr = regApiPtr->ndbapiConnect;
     tcKeyConf->gci_hi = Uint32(regApiPtr->globalcheckpointid >> 32);
     Uint32* gci_lo = (Uint32*)&tcKeyConf->operations[TopWords >> 1];
-    * gci_lo = Uint32(regApiPtr->globalcheckpointid);
     tcKeyConf->confInfo = confInfo;
     tcKeyConf->transId1 = regApiPtr->transid[0];
     tcKeyConf->transId2 = regApiPtr->transid[1];
     copyFromToLen(&regApiPtr->tcSendArray[0],
 		  (UintR*)&tcKeyConf->operations,
 		  (UintR)ZTCOPCONF_SIZE);
+    * gci_lo = Uint32(regApiPtr->globalcheckpointid);
     sendSignal(regApiPtr->ndbapiBlockref,
 	       GSN_TCKEYCONF, signal, (TpacketLen - 1) + 1 /** gci_lo */, JBB);
     return;

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2007-10-25 09:00:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-01-27 14:32:31 +0000
@@ -1564,7 +1564,8 @@ DbUtil::execUTIL_SEQUENCE_REQ(Signal* si
     ndbrequire(opPtr.p->attrInfo.next(it));
     * it.data = 0;
   }
-  
+
+  transPtr.p->noOfRetries = 3;
   runTransaction(signal, transPtr);
 }
 
@@ -1673,6 +1674,7 @@ DbUtil::reportSequence(Signal* signal, c
   ret->sequenceId = transP->sequence.sequenceId;
   ret->requestType = transP->sequence.requestType;
   ret->errorCode = (Uint32)errCode;
+  ret->TCErrorCode = transP->errorCode;
   sendSignal(transP->clientRef, GSN_UTIL_SEQUENCE_REF, signal, 
 	     UtilSequenceRef::SignalLength, JBB);
 }

=== modified file 'storage/ndb/src/kernel/blocks/lgman.cpp'
--- a/storage/ndb/src/kernel/blocks/lgman.cpp	2008-04-22 19:36:05 +0000
+++ b/storage/ndb/src/kernel/blocks/lgman.cpp	2008-12-20 19:48:44 +0000
@@ -1380,11 +1380,13 @@ Lgman::flush_log(Signal* signal, Ptr<Log
 
       if (ptr.p->m_log_buffer_waiters.isEmpty() || ptr.p->m_outstanding_fs)
       {
+        jam();
 	force =  0;
       }
       
       if (force < 2)
       {
+        jam();
 	signal->theData[0] = LgmanContinueB::FLUSH_LOG;
 	signal->theData[1] = ptr.i;
 	signal->theData[2] = force + 1;
@@ -1394,6 +1396,7 @@ Lgman::flush_log(Signal* signal, Ptr<Log
       }
       else
       {
+        jam();
 	Buffer_idx pos= producer.m_current_pos;
 	GlobalPage *page = m_shared_page_pool.getPtr(pos.m_ptr_i);
 	
@@ -1417,7 +1420,7 @@ Lgman::flush_log(Signal* signal, Ptr<Log
 	ndbrequire(ptr.p->m_free_buffer_words > free);
 	ptr.p->m_free_file_words -= free;
 	ptr.p->m_free_buffer_words -= free;
-	
+         
 	validate_logfile_group(ptr, "force_log_flush");
 	
 	next_page(ptr.p, PRODUCER);
@@ -1438,17 +1441,25 @@ Lgman::flush_log(Signal* signal, Ptr<Log
   Uint32 tot= 0;
   while(!(consumer.m_current_page == producer.m_current_page) && !full)
   {
+    jam();
     validate_logfile_group(ptr, "before flush log");
 
     Uint32 cnt; // pages written
     Uint32 page= consumer.m_current_pos.m_ptr_i;
     if(consumer.m_current_page.m_ptr_i == producer.m_current_page.m_ptr_i)
     {
-      if(consumer.m_current_page.m_idx > producer.m_current_page.m_idx)
+      /**
+       * In same range
+       */
+      jam();
+
+      if(producer.m_current_pos.m_ptr_i > page)
       {
+        /**
+         * producer ahead of consumer in same chunk
+         */
 	jam();
-	Uint32 tmp= 
-	  consumer.m_current_page.m_idx - producer.m_current_page.m_idx;
+	Uint32 tmp= producer.m_current_pos.m_ptr_i - page;
 	cnt= write_log_pages(signal, ptr, page, tmp);
 	assert(cnt <= tmp);
 	
@@ -1458,8 +1469,9 @@ Lgman::flush_log(Signal* signal, Ptr<Log
       }
       else
       {
-	// Only 1 chunk
-	ndbrequire(ptr.p->m_buffer_pages.getSize() == 2); 
+        /**
+         * consumer ahead of producer in same chunk
+         */
 	Uint32 tmp= consumer.m_current_page.m_idx + 1;
 	cnt= write_log_pages(signal, ptr, page, tmp);
 	assert(cnt <= tmp);
@@ -1552,8 +1564,9 @@ Lgman::process_log_buffer_waiters(Signal
   bool removed= false;
   Ptr<Log_waiter> waiter;
   list.first(waiter);
+  Uint32 sz  = waiter.p->m_size;
   Uint32 logfile_group_id = ptr.p->m_logfile_group_id;
-  if(waiter.p->m_size + 2*File_formats::UNDO_PAGE_WORDS < free_buffer)
+  if(sz + 2*File_formats::UNDO_PAGE_WORDS < free_buffer)
   {
     removed= true;
     Uint32 block = waiter.p->m_block;
@@ -2058,7 +2071,6 @@ Logfile_client::add_entry(const Change* 
 	}
 	* (dst - 1) |= File_formats::Undofile::UNDO_NEXT_LSN << 16;
 	ptr.p->m_free_file_words += 2;
-	ptr.p->m_free_buffer_words += 2;
 	m_lgman->validate_logfile_group(ptr);
       }
       else

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2009-01-29 10:56:52 +0000
@@ -1010,6 +1010,13 @@ void Qmgr::execCM_REGCONF(Signal* signal
   c_start.m_gsn = GSN_CM_NODEINFOREQ;
   c_start.m_nodes = c_clusterNodes;
 
+  if (ERROR_INSERTED(937))
+  {
+    CLEAR_ERROR_INSERT_VALUE;
+    signal->theData[0] = 9999;
+    sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 500, 1);
+  }
+
   return;
 }//Qmgr::execCM_REGCONF()
 
@@ -2847,7 +2854,13 @@ void Qmgr::node_failed(Signal* signal, U
     jam();
     return;
   case ZSTARTING:
-    c_start.reset();
+    /**
+     * bug#42422
+     *   Force "real" failure handling
+     */
+    failedNodePtr.p->phase = ZRUNNING;
+    failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE);
+    return;
     // Fall-through
   default:
     jam();
@@ -3410,6 +3423,8 @@ void Qmgr::execPREP_FAILREQ(Signal* sign
   NodeRecPtr myNodePtr;
   jamEntry();
   
+  c_start.reset();
+  
   if (check_multi_node_shutdown(signal))
   {
     jam();

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2008-04-22 19:36:05 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2008-12-18 08:41:41 +0000
@@ -175,26 +175,18 @@ Configuration::init(int argc, char** arg
 
   if (_nowait_nodes)
   {
-    BaseString str(_nowait_nodes);
-    Vector<BaseString> arr;
-    str.split(arr, ",");
-    for (Uint32 i = 0; i<arr.size(); i++)
+    int res = g_nowait_nodes.parseMask(_nowait_nodes);
+    if(res == -2 || (res > 0 && g_nowait_nodes.get(0)))
     {
-      char *endptr = 0;
-      long val = strtol(arr[i].c_str(), &endptr, 10);
-      if (*endptr)
-      {
-	ndbout_c("Unable to parse nowait-nodes argument: %s : %s", 
-		 arr[i].c_str(), _nowait_nodes);
-	exit(-1);
-      }
-      if (! (val > 0 && val < MAX_NDB_NODES))
-      {
-	ndbout_c("Invalid nodeid specified in nowait-nodes: %ld : %s", 
-		 val, _nowait_nodes);
-	exit(-1);
-      }
-      g_nowait_nodes.set(val);
+      ndbout_c("Invalid nodeid specified in nowait-nodes: %s", 
+               _nowait_nodes);
+      exit(-1);
+    }
+    else if (res < 0)
+    {
+      ndbout_c("Unable to parse nowait-nodes argument: %s",
+               _nowait_nodes);
+      exit(-1);
     }
   }
 

=== modified file 'storage/ndb/src/mgmapi/Makefile.am'
--- a/storage/ndb/src/mgmapi/Makefile.am	2007-04-13 09:33:08 +0000
+++ b/storage/ndb/src/mgmapi/Makefile.am	2008-12-18 10:04:16 +0000
@@ -17,7 +17,8 @@ MYSQLCLUSTERdir=        .
 
 noinst_LTLIBRARIES = libmgmapi.la
 
-libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ConfigInfo.cpp
+libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ConfigInfo.cpp \
+                       mgmapi_error.c
 
 INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi
 

=== added file 'storage/ndb/src/mgmapi/mgmapi_error.c'
--- a/storage/ndb/src/mgmapi/mgmapi_error.c	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/mgmapi/mgmapi_error.c	2008-12-18 10:04:16 +0000
@@ -0,0 +1,53 @@
+ /* Copyright (C) 2003 MySQL AB
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+
+#include <mgmapi_error.h>
+
+const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = 
+{
+  { NDB_MGM_NO_ERROR, "No error" },
+  
+  /* Request for service errors */
+  { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
+  { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
+  { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
+  { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
+  { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
+  { NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
+  { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
+  { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
+  
+  /* Service errors - Start/Stop Node or System */
+  { NDB_MGM_START_FAILED, "Start failed" },
+  { NDB_MGM_STOP_FAILED, "Stop failed" },
+  { NDB_MGM_RESTART_FAILED, "Restart failed" },
+  
+  /* Service errors - Backup */
+  { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
+  { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
+  
+  /* Service errors - Single User Mode */
+  { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
+    "Could not enter single user mode" },
+  { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
+    "Could not exit single user mode" },
+  
+  /* Usage errors */
+  { NDB_MGM_USAGE_ERROR,
+    "Usage error" }
+};
+
+const int ndb_mgm_noOfErrorMsgs =
+  sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);

=== modified file 'storage/ndb/src/mgmapi/ndb_logevent.cpp'
--- a/storage/ndb/src/mgmapi/ndb_logevent.cpp	2008-01-24 11:21:39 +0000
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp	2009-01-14 10:47:05 +0000
@@ -19,8 +19,8 @@
 
 #include <NdbOut.hpp>
 #include <Properties.hpp>
-#include <socket_io.h>
 #include <InputStream.hpp>
+#include <NdbTick.h>
 
 #include <debugger/EventLogger.hpp>
 #include <kernel/NodeBitmask.hpp>
@@ -442,11 +442,15 @@ int ndb_logevent_get_next(const NdbLogEv
 
   SocketInputStream in(h->socket, timeout_in_milliseconds);
 
-  Properties p;
+  /*
+    Read log event header until header received
+    or timeout expired. The MGM server will continusly
+    send <PING>'s that should be ignored.
+  */
   char buf[256];
-
-  /* header */
-  while (1) {
+  NDB_TICKS start = NdbTick_CurrentMillisecond();
+  while(1)
+  {
     if (in.gets(buf,sizeof(buf)) == 0)
     {
       h->m_error= NDB_LEH_READ_ERROR;
@@ -466,9 +470,14 @@ int ndb_logevent_get_next(const NdbLogEv
 
     if(in.timedout())
         return 0;
-  }
 
-  /* read name-value pairs into properties object */
+    if ((NdbTick_CurrentMillisecond() - start) > timeout_in_milliseconds)
+      return 0;
+
+  };
+
+  /* Read name-value pairs until empty new line */
+  Properties p;
   while (1)
   {
     if (in.gets(buf,sizeof(buf)) == 0)

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2009-01-08 14:35:49 +0000
@@ -3046,22 +3046,28 @@ MgmtSrvr::getConnectionDbParameter(int n
   DBUG_RETURN(1);
 }
 
-void MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd)
+
+bool MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd)
 {
-  if (theFacade->get_registry()->connect_server(sockfd))
-  {
-    /**
-     * Force an update_connections() so that the
-     * ClusterMgr and TransporterFacade is up to date
-     * with the new connection.
-     * Important for correct node id reservation handling
-     */
-    NdbMutex_Lock(theFacade->theMutexPtr);
-    theFacade->get_registry()->update_connections();
-    NdbMutex_Unlock(theFacade->theMutexPtr);
-  }
+  DBUG_ENTER("MgmtSrvr::transporter_connect");
+  TransporterRegistry* tr= theFacade->get_registry();
+  if (!tr->connect_server(sockfd))
+    DBUG_RETURN(false);
+
+  /*
+    Force an update_connections() so that the
+    ClusterMgr and TransporterFacade is up to date
+    with the new connection.
+    Important for correct node id reservation handling
+  */
+  theFacade->lock_mutex();
+  tr->update_connections();
+  theFacade->unlock_mutex();
+
+  DBUG_RETURN(true);
 }
 
+
 int MgmtSrvr::connect_to_self(const char * bindaddress)
 {
   int r= 0;

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.hpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2008-03-14 13:32:49 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2009-01-08 14:35:49 +0000
@@ -444,7 +444,7 @@ public:
 
   int connect_to_self(const char* bindaddress = 0);
 
-  void transporter_connect(NDB_SOCKET_TYPE sockfd);
+  bool transporter_connect(NDB_SOCKET_TYPE sockfd);
 
   ConfigRetriever *get_config_retriever() { return m_config_retriever; };
 

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2009-01-23 11:03:00 +0000
@@ -294,7 +294,7 @@ struct PurgeStruct
 #define SLEEP_ERROR_INSERTED(x) if(ERROR_INSERTED(x)){NdbSleep_SecSleep(10);}
 
 MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock, Uint64 session_id)
-  : SocketServer::Session(sock), m_mgmsrv(mgm)
+  : SocketServer::Session(sock), m_mgmsrv(mgm), m_name("unknown:0")
 {
   DBUG_ENTER("MgmApiSession::MgmApiSession");
   m_input = new SocketInputStream(sock, 30000);
@@ -306,6 +306,13 @@ MgmApiSession::MgmApiSession(class MgmtS
   m_session_id= session_id;
   m_mutex= NdbMutex_Create();
   m_errorInsert= 0;
+
+  struct sockaddr_in addr;
+  SOCKET_SIZE_TYPE addrlen= sizeof(addr);
+  if (getpeername(sock, (struct sockaddr*)&addr, &addrlen) == 0)
+    m_name.assfmt("%s:%d", inet_ntoa(addr.sin_addr), ntohs(addr.sin_port));
+  DBUG_PRINT("info", ("new connection from: %s", m_name.c_str()));
+
   DBUG_VOID_RETURN;
 }
 
@@ -1674,11 +1681,28 @@ void
 MgmApiSession::transporter_connect(Parser_t::Context &ctx,
 				   Properties const &args)
 {
-  m_mgmsrv.transporter_connect(m_socket);
 
-  m_stop= true;
-  m_stopped= true; // force a stop (no closing socket)
-  m_socket= NDB_INVALID_SOCKET;   // so nobody closes it
+  if (!m_mgmsrv.transporter_connect(m_socket))
+  {
+    // Connection not allowed or failed
+    g_eventLogger->warning("Failed to convert connection "
+                           "from '%s' to transporter",
+                           name());
+
+    // Close the socket to indicate failure to other side
+  }
+  else
+  {
+    /*
+      Conversion to transporter suceeded
+      Stop this session thread and release resources
+      but don't close the socket, it's been taken over
+      by the transporter
+    */
+    m_socket= NDB_INVALID_SOCKET;   // so nobody closes it
+  }
+
+  m_stop= true; // Stop the session
 }
 
 void

=== modified file 'storage/ndb/src/mgmsrv/Services.hpp'
--- a/storage/ndb/src/mgmsrv/Services.hpp	2007-03-22 11:33:07 +0000
+++ b/storage/ndb/src/mgmsrv/Services.hpp	2009-01-08 14:35:49 +0000
@@ -48,6 +48,9 @@ private:
 
   int m_errorInsert;
 
+  BaseString m_name;
+  const char* name() { return m_name.c_str(); }
+
   const char *get_error_text(int err_no)
   { return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); }
 

=== modified file 'storage/ndb/src/ndbapi/ClusterMgr.cpp'
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp	2008-08-08 09:40:47 +0000
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp	2009-01-08 11:57:59 +0000
@@ -556,8 +556,7 @@ ClusterMgr::reportNodeFailed(NodeId node
     theFacade.ReportNodeDead(nodeId);
   }
   
-  theNode.nfCompleteRep = false;
-  if(noOfAliveNodes == 0)
+  if (noOfConnectedNodes == 0)
   {
     if (!global_flag_skip_invalidate_cache &&
         theFacade.m_globalDictCache)
@@ -568,6 +567,10 @@ ClusterMgr::reportNodeFailed(NodeId node
       m_connect_count ++;
       m_cluster_state = CS_waiting_for_clean_cache;
     }
+  }
+  theNode.nfCompleteRep = false;
+  if(noOfAliveNodes == 0)
+  {
     NFCompleteRep rep;
     for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
       if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){

=== modified file 'storage/ndb/src/ndbapi/ClusterMgr.hpp'
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp	2007-05-09 14:31:16 +0000
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp	2008-12-16 20:51:49 +0000
@@ -85,7 +85,6 @@ public:
   
   const Node &  getNodeInfo(NodeId) const;
   Uint32        getNoOfConnectedNodes() const;
-  bool          isClusterAlive() const;
   void          hb_received(NodeId);
 
   Uint32        m_connect_count;
@@ -145,11 +144,6 @@ ClusterMgr::getNoOfConnectedNodes() cons
 }
 
 inline
-bool
-ClusterMgr::isClusterAlive() const {
-  return noOfAliveNodes != 0;
-}
-inline
 void
 ClusterMgr::hb_received(NodeId nodeId) {
   theNodes[nodeId].m_info.m_heartbeat_cnt= 0;

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2008-09-25 10:55:39 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2009-01-08 11:57:59 +0000
@@ -1080,6 +1080,8 @@ NdbEventBuffer::NdbEventBuffer(Ndb *ndb)
   // initialize lists
   bzero(&g_empty_gci_container, sizeof(Gci_container));
   init_gci_containers();
+
+  m_alive_node_bit_mask.clear();
 }
 
 NdbEventBuffer::~NdbEventBuffer()
@@ -1836,11 +1838,16 @@ NdbEventBuffer::complete_bucket(Gci_cont
 
 void
 NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep,
-                                         Uint32 len)
+                                         Uint32 len, int complete_cluster_failure)
 {
-  if (unlikely(m_active_op_count == 0))
+  if (!complete_cluster_failure)
   {
-    return;
+    m_alive_node_bit_mask.set(refToNode(rep->senderRef));
+
+    if (unlikely(m_active_op_count == 0))
+    {
+      return;
+    }
   }
   
   DBUG_ENTER_EVENT("NdbEventBuffer::execSUB_GCP_COMPLETE_REP");
@@ -2089,13 +2096,15 @@ NdbEventBuffer::report_node_connected(Ui
 }
 
 void
-NdbEventBuffer::report_node_failure(Uint32 node_id)
+NdbEventBuffer::report_node_failure_completed(Uint32 node_id)
 {
+  m_alive_node_bit_mask.clear(node_id);
+
   NdbEventOperation* op= m_ndb->getEventOperation(0);
   if (op == 0)
     return;
 
-  DBUG_ENTER("NdbEventBuffer::report_node_failure");
+  DBUG_ENTER("NdbEventBuffer::report_node_failure_completed");
   SubTableData data;
   LinearSectionPtr ptr[3];
   bzero(&data, sizeof(data));
@@ -2120,20 +2129,17 @@ NdbEventBuffer::report_node_failure(Uint
    */
   // no need to lock()/unlock(), receive thread calls this
   insert_event(&op->m_impl, data, ptr, data.senderData);
-  DBUG_VOID_RETURN;
-}
 
-void
-NdbEventBuffer::completeClusterFailed()
-{
-  NdbEventOperation* op= m_ndb->getEventOperation(0);
-  if (op == 0)
-    return;
+  if (!m_alive_node_bit_mask.isclear())
+    DBUG_VOID_RETURN;
 
-  DBUG_ENTER("NdbEventBuffer::completeClusterFailed");
+  /*
+   * Cluster failure
+   */
 
+  DBUG_PRINT("info", ("Cluster failure"));
 
-  Uint64 gci = Uint64((m_latestGCI >> 32) + 1) << 32;
+  gci = Uint64((m_latestGCI >> 32) + 1) << 32;
   bool found = find_max_known_gci(&gci);
 
   Uint64 * array = m_known_gci.getBase();
@@ -2169,18 +2175,10 @@ NdbEventBuffer::completeClusterFailed()
   /**
    * Inject new event
    */
-  SubTableData data;
-  LinearSectionPtr ptr[3];
-  bzero(&data, sizeof(data));
-  bzero(ptr, sizeof(ptr));
-
   data.tableId = ~0;
   data.requestInfo = 0;
   SubTableData::setOperation(data.requestInfo,
 			     NdbDictionary::Event::_TE_CLUSTER_FAILURE);
-  data.flags = SubTableData::LOG;
-  data.gci_hi = Uint32(gci >> 32);
-  data.gci_lo = Uint32(gci);
 
   /**
    * Insert this event for each operation
@@ -2212,7 +2210,7 @@ NdbEventBuffer::completeClusterFailed()
   rep.gci_lo= gci & 0xFFFFFFFF;
   rep.gcp_complete_rep_count= cnt;
   rep.flags = 0;
-  execSUB_GCP_COMPLETE_REP(&rep, SubGcpCompleteRep::SignalLength);
+  execSUB_GCP_COMPLETE_REP(&rep, SubGcpCompleteRep::SignalLength, 1);
 
   DBUG_VOID_RETURN;
 }

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp	2008-02-11 13:24:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp	2008-12-16 20:51:49 +0000
@@ -424,8 +424,7 @@ public:
     and added to all event ops listed as active or pending delete
     in m_dropped_ev_op using insertDataL, includeing the blob
     event ops referenced by a regular event op.
-    - NdbEventBuffer::report_node_failure
-    - NdbEventBuffer::completeClusterFailed
+    - NdbEventBuffer::report_node_failure_completed
 
     TE_ACTIVE is sent from the kernel on initial execute/start of the
     event op, but is also internally generetad on node connect like
@@ -528,12 +527,12 @@ public:
   int insertDataL(NdbEventOperationImpl *op,
 		  const SubTableData * const sdata, Uint32 len,
 		  LinearSectionPtr ptr[3]);
-  void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const, Uint32 len);
+  void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const, Uint32 len,
+                                int complete_cluster_failure= 0);
   void complete_outof_order_gcis();
   
   void report_node_connected(Uint32 node_id);
-  void report_node_failure(Uint32 node_id);
-  void completeClusterFailed();
+  void report_node_failure_completed(Uint32 node_id);
 
   // used by user thread 
   Uint64 getLatestGCI();
@@ -664,6 +663,8 @@ private:
   void complete_bucket(Gci_container*);
   bool find_max_known_gci(Uint64 * res) const;
   void resize_known_gci();
+
+  Bitmask<(unsigned int)_NDB_NODE_BITMASK_SIZE> m_alive_node_bit_mask;
 };
 
 inline

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2009-02-10 08:24:37 +0000
@@ -788,7 +788,10 @@ NdbIndexScanOperation::scanIndexImpl(con
     return -1;
   }
 
-  if (scan_flags & NdbScanOperation::SF_OrderBy)
+  result_record->copyMask(m_read_mask, result_mask);
+
+  if (scan_flags & (NdbScanOperation::SF_OrderBy | 
+                    NdbScanOperation::SF_OrderByFull))
   {
     /**
      * For ordering, we need all keys in the result row.
@@ -796,19 +799,34 @@ NdbIndexScanOperation::scanIndexImpl(con
      * So for each key column, check that it is included in the result
      * NdbRecord.
      */
+#define MASKSZ ((NDB_MAX_ATTRIBUTES_IN_TABLE+31)>>5)
+    Uint32 keymask[MASKSZ];
+    BitmaskImpl::clear(MASKSZ, keymask);
+
     for (i = 0; i < key_record->key_index_length; i++)
     {
-      const NdbRecord::Attr *key_col =
-        &key_record->columns[key_record->key_indexes[i]];
-      if (key_col->attrId >= result_record->m_attrId_indexes_length ||
-          result_record->m_attrId_indexes[key_col->attrId] < 0)
+      Uint32 attrId = key_record->columns[key_record->key_indexes[i]].attrId;
+      if (attrId >= result_record->m_attrId_indexes_length ||
+          result_record->m_attrId_indexes[attrId] < 0)
       {
         setErrorCodeAbort(4292);
         return -1;
       }
+
+      BitmaskImpl::set(MASKSZ, keymask, attrId);
     }
-  }
 
+    if (scan_flags & NdbScanOperation::SF_OrderByFull)
+    {
+      BitmaskImpl::bitOR(MASKSZ, m_read_mask, keymask);
+    }
+    else if (!BitmaskImpl::contains(MASKSZ, m_read_mask, keymask))
+    {
+      setErrorCodeAbort(4341);
+      return -1;
+    }
+  }
+  
   if (!(key_record->flags & NdbRecord::RecIsIndex))
   {
     setErrorCodeAbort(4283);
@@ -833,8 +851,6 @@ NdbIndexScanOperation::scanIndexImpl(con
   if (res==-1)
     return -1;
 
-  result_record->copyMask(m_read_mask, result_mask);
-
   /* Fix theStatus as set in processIndexScanDefs(). */
   theStatus= NdbOperation::UseNdbRecord;
 
@@ -853,11 +869,8 @@ NdbIndexScanOperation::scanIndexImpl(con
       But cannot mask pseudo columns, nor key columns in ordered scans.
     */
     attrId= col->attrId;
-    if ( result_mask &&
-         !(attrId & AttributeHeader::PSEUDO) &&
-         !( (scan_flags & NdbScanOperation::SF_OrderBy) &&
-            (col->flags & NdbRecord::IsKey) ) &&
-         !(result_mask[attrId>>3] & (1<<(attrId & 7))) )
+    if ( !(attrId & AttributeHeader::PSEUDO) &&
+         !BitmaskImpl::get(MASKSZ, m_read_mask, attrId))
     {
       continue;
     }
@@ -1022,7 +1035,7 @@ NdbScanOperation::processTableScanDefs(N
     tupScan = false;
   }
   
-  if (rangeScan && (scan_flags & SF_OrderBy))
+  if (rangeScan && (scan_flags & (SF_OrderBy | SF_OrderByFull)))
     parallel = fragCount; // Note we assume fragcount of base table==
                           // fragcount of index.
   
@@ -1295,39 +1308,74 @@ NdbScanOperation::executeCursor(int node
    * Call finaliseScanOldApi() for old style scans before
    * proceeding
    */  
-  if (m_scanUsingOldApi &&
-      finaliseScanOldApi() == -1) 
-    return -1;
-
-  NdbTransaction * tCon = theNdbCon;
+  bool locked = false;
   TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
-  Guard guard(tp->theMutexPtr);
 
-  Uint32 seq = tCon->theNodeSequence;
+  int res = 0;
+  if (m_scanUsingOldApi && finaliseScanOldApi() == -1)
+  {
+    res = -1;
+    goto done;
+  }
 
-  if (tp->get_node_alive(nodeId) &&
-      (tp->getNodeSequence(nodeId) == seq)) {
+  {
+    locked = true;
+    NdbTransaction * tCon = theNdbCon;
+    NdbMutex_Lock(tp->theMutexPtr);
+    
+    Uint32 seq = tCon->theNodeSequence;
+    
+    if (tp->get_node_alive(nodeId) &&
+        (tp->getNodeSequence(nodeId) == seq)) {
+      
+      tCon->theMagicNumber = 0x37412619;
+      
+      if (doSendScan(nodeId) == -1)
+      {
+        res = -1;
+        goto done;
+      }
+      
+      m_executed= true; // Mark operation as executed
+    } 
+    else
+    {
+      if (!(tp->get_node_stopping(nodeId) &&
+            (tp->getNodeSequence(nodeId) == seq)))
+      {
+        TRACE_DEBUG("The node is hard dead when attempting to start a scan");
+        setErrorCode(4029);
+        tCon->theReleaseOnClose = true;
+      } 
+      else 
+      {
+        TRACE_DEBUG("The node is stopping when attempting to start a scan");
+        setErrorCode(4030);
+      }//if
+      res = -1;
+      tCon->theCommitStatus = NdbTransaction::Aborted;
+    }//if
+  }
 
-    tCon->theMagicNumber = 0x37412619;
+done:
+    /**
+   * Set pointers correctly
+   *   so that nextResult will handle it correctly
+   *   even if doSendScan was never called
+   *   bug#42454
+   */
+  m_curr_row = 0;
+  m_sent_receivers_count = theParallelism;
+  if(m_ordered)
+  {
+    m_current_api_receiver = theParallelism;
+    m_api_receivers_count = theParallelism;
+  }
 
-    if (doSendScan(nodeId) == -1)
-      return -1;
+  if (locked)
+    NdbMutex_Unlock(tp->theMutexPtr);
 
-    m_executed= true; // Mark operation as executed
-    return 0;
-  } else {
-    if (!(tp->get_node_stopping(nodeId) &&
-          (tp->getNodeSequence(nodeId) == seq))){
-      TRACE_DEBUG("The node is hard dead when attempting to start a scan");
-      setErrorCode(4029);
-      tCon->theReleaseOnClose = true;
-    } else {
-      TRACE_DEBUG("The node is stopping when attempting to start a scan");
-      setErrorCode(4030);
-    }//if
-    tCon->theCommitStatus = NdbTransaction::Aborted;
-  }//if
-  return -1;
+  return res;
 }
 
 
@@ -1791,7 +1839,7 @@ int NdbScanOperation::finaliseScanOldApi
      * don't
      */
     const unsigned char * resultMask= 
-      ((m_savedScanFlagsOldApi & SF_OrderBy) !=0) ? 
+      ((m_savedScanFlagsOldApi & (SF_OrderBy | SF_OrderByFull)) !=0) ? 
       m_accessTable->m_pkMask : 
       emptyMask;
 
@@ -2046,14 +2094,6 @@ NdbScanOperation::doSendScan(int aProces
   }    
   theStatus = WaitResponse;  
 
-  m_curr_row = 0;
-  m_sent_receivers_count = theParallelism;
-  if(m_ordered)
-  {
-    m_current_api_receiver = theParallelism;
-    m_api_receivers_count = theParallelism;
-  }
-  
   return tSignalCount;
 }//NdbOperation::doSendScan()
 
@@ -3004,7 +3044,7 @@ NdbIndexScanOperation::processIndexScanD
                                             Uint32 parallel,
                                             Uint32 batch)
 {
-  const bool order_by = scan_flags & SF_OrderBy;
+  const bool order_by = scan_flags & (SF_OrderBy | SF_OrderByFull);
   const bool order_desc = scan_flags & SF_Descending;
   const bool read_range_no = scan_flags & SF_ReadRangeNo;
   m_multi_range = scan_flags & SF_MultiRange;

=== modified file 'storage/ndb/src/ndbapi/Ndbif.cpp'
--- a/storage/ndb/src/ndbapi/Ndbif.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp	2008-12-16 20:51:49 +0000
@@ -269,13 +269,7 @@ Ndb::report_node_failure_completed(Uint3
   {
     // node failed
     // eventOperations in the ndb object should be notified
-    theEventBuffer->report_node_failure(node_id);
-    if(!theImpl->m_transporter_facade->theClusterMgr->isClusterAlive())
-    {
-      // cluster is unavailable, 
-      // eventOperations in the ndb object should be notified
-      theEventBuffer->completeClusterFailed();
-    }
+    theEventBuffer->report_node_failure_completed(node_id);
   }
   
   abortTransactionsAfterNodeFailure(node_id);

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.hpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.hpp	2008-05-16 13:08:36 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp	2009-01-29 16:24:04 +0000
@@ -323,7 +323,9 @@ TransporterFacade::unlock_mutex()
 inline
 unsigned Ndb_cluster_connection_impl::get_connect_count() const
 {
-  return m_transporter_facade->theClusterMgr->m_connect_count;
+  if (m_transporter_facade->theClusterMgr)
+    return m_transporter_facade->theClusterMgr->m_connect_count;
+  return 0;
 }
 
 inline
@@ -351,9 +353,12 @@ TransporterFacade::getNodeGrp(NodeId n) 
 inline
 bool
 TransporterFacade::get_node_alive(NodeId n) const {
-
-  const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n);
-  return node.m_alive;
+  if (theClusterMgr)
+  {
+    const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n);
+    return node.m_alive;
+  }
+  return 0;
 }
 
 inline

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2009-02-09 13:34:12 +0000
@@ -365,7 +365,7 @@ ErrorBundle ErrorCodes[] = {
   { 708,  DMEC, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
   { 709,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
   { 710,  DMEC, SE, "Internal: Get by table name not supported, use table id." },
-  { 721,  HA_ERR_TABLE_EXIST,   OE, "Table or index with given name already exists" },
+  { 721,  HA_ERR_TABLE_EXIST,   OE, "Schema object with given name already exists" },
   { 723,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
   { 736,  DMEC, SE, "Unsupported array size" },
   { 737,  HA_WRONG_CREATE_OPTION, SE, "Attribute array size too big" },
@@ -678,6 +678,7 @@ ErrorBundle ErrorCodes[] = {
   { 4290, DMEC, AE, "Missing column specification in NdbDictionary::RecordSpecification" },
   { 4291, DMEC, AE, "Duplicate column specification in NdbDictionary::RecordSpecification" },
   { 4292, DMEC, AE, "NdbRecord for tuple access is not an index key NdbRecord" },
+  { 4341, DMEC, AE, "Not all keys read when using option SF_OrderBy" },
   { 4293, DMEC, AE, "Error returned from application scanIndex() callback" },
   { 4294, DMEC, AE, "Scan filter is too large, discarded" },
   { 4295, DMEC, AE, "Column is NULL in Get/SetValueSpec structure" },
@@ -686,6 +687,7 @@ ErrorBundle ErrorCodes[] = {
   { 4298, DMEC, AE, "Invalid or unsupported ScanOptions structure" },
   { 4299, DMEC, AE, "Incorrect combination of ScanOption flags, extraGetValues ptr and numExtraGetValues" },
   { 2810, DMEC, TR, "No space left on the device" },
+  { 2811, DMEC, TR, "Error with file permissions, please check file system" },
   { 2815, DMEC, TR, "Error in reading files, please check file system" },
 
   { NO_CONTACT_WITH_PROCESS, DMEC, AE,

=== modified file 'storage/ndb/test/include/DbUtil.hpp'
--- a/storage/ndb/test/include/DbUtil.hpp	2008-03-03 15:10:42 +0000
+++ b/storage/ndb/test/include/DbUtil.hpp	2008-12-12 08:48:37 +0000
@@ -102,7 +102,7 @@ public:
   bool doQuery(BaseString& str, SqlResultSet& result);
   bool doQuery(BaseString& str, const Properties& args, SqlResultSet& result);
 
-  bool waitConnected(int timeout);
+  bool waitConnected(int timeout = 120);
 
   bool  databaseLogin(const char * host,
                       const char * user,

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2007-10-15 08:09:00 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2008-12-16 17:12:00 +0000
@@ -1796,9 +1796,7 @@ TESTCASE("InsertError2", "" ){
 }
 TESTCASE("Fill", 
 	 "Verify what happens when we fill the db" ){
-  INITIALIZER(runFillTable);
-  INITIALIZER(runPkRead);
-  FINALIZER(runClearTable2);
+  STEP(runFillTable);
 }
 TESTCASE("Bug25090", 
 	 "Verify what happens when we fill the db" ){

=== modified file 'storage/ndb/test/ndbapi/testMgm.cpp'
--- a/storage/ndb/test/ndbapi/testMgm.cpp	2007-06-13 12:54:00 +0000
+++ b/storage/ndb/test/ndbapi/testMgm.cpp	2009-01-14 10:47:05 +0000
@@ -787,6 +787,56 @@ done:
   return result;
 }
 
+// Enabled in 6.4
+#if NDB_VERSION_D > 60400
+int runTestBug40922(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbMgmd mgmd;
+
+  if (!mgmd.connect())
+    return NDBT_FAILED;
+
+  int filter[] = {
+    15, NDB_MGM_EVENT_CATEGORY_BACKUP,
+    1, NDB_MGM_EVENT_CATEGORY_STARTUP,
+    0
+  };
+  NdbLogEventHandle le_handle =
+    ndb_mgm_create_logevent_handle(mgmd.handle(), filter);
+  if (!le_handle)
+    return NDBT_FAILED;
+
+  g_info << "Calling ndb_log_event_get_next" << endl;
+
+  struct ndb_logevent le_event;
+  int r = ndb_logevent_get_next(le_handle,
+                                &le_event,
+                                2000);
+  g_info << "ndb_log_event_get_next returned " << r << endl;
+
+  int result = NDBT_FAILED;
+  if (r == 0)
+  {
+    // Got timeout
+    g_info << "ndb_logevent_get_next returned timeout" << endl;
+    result = NDBT_OK;
+  }
+  else
+  {
+    if(r>0)
+      g_err << "ERROR: Receieved unexpected event: "
+            << le_event.type << endl;
+    if(r<0)
+      g_err << "ERROR: ndb_logevent_get_next returned error: "
+            << r << endl;
+  }
+
+  ndb_mgm_destroy_logevent_handle(&le_handle);
+
+  return result;
+}
+#endif
+
 NDBT_TESTSUITE(testMgm);
 TESTCASE("SingleUserMode", 
 	 "Test single user mode"){
@@ -828,6 +878,14 @@ TESTCASE("ApiMgmStructEventTimeout",
   INITIALIZER(runTestMgmApiStructEventTimeout);
 
 }
+// Enabled in 6.4
+#if 0
+TESTCASE("Bug40922",
+	 "Make sure that ndb_logevent_get_next returns when "
+         "called with a timeout"){
+  INITIALIZER(runTestBug40922);
+}
+#endif
 NDBT_TESTSUITE_END(testMgm);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2008-12-08 12:35:55 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2009-01-30 10:41:42 +0000
@@ -281,7 +281,7 @@ int runRestarter(NDBT_Context* ctx, NDBT
     return NDBT_FAILED;
   }
   
-  loops *= restarter.getNumDbNodes();
+  loops *= (restarter.getNumDbNodes() > 4 ? 4 : restarter.getNumDbNodes());
   while(i<loops && result != NDBT_FAILED && !ctx->isTestStopped()){
 
     int id = lastId % restarter.getNumDbNodes();
@@ -2506,7 +2506,10 @@ runMNF(NDBT_Context* ctx, NDBT_Step* ste
     {
       for (int i = 0; i<cnt; i++)
       {
-        res.insertErrorInNode(nodes[i], 7180);
+        if (res.getNextMasterNodeId(master) == nodes[i])
+          res.insertErrorInNode(nodes[i], 7180);
+        else
+          res.insertErrorInNode(nodes[i], 7205);
       }
 
       int lcp = 7099;
@@ -3117,6 +3120,105 @@ runBug41295(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+int
+runBug41469(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 4)
+  {
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+
+  int loops = ctx->getNumLoops();
+
+  int val0[] = { 7216, 0 }; 
+  int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+  for (int i = 0; i<loops; i++)
+  {
+    int master = res.getMasterNodeId();
+    int next = res.getNextMasterNodeId(master);
+    
+    if (res.dumpStateOneNode(master, val2, 2))
+      return NDBT_FAILED;
+    
+    ndbout_c("stopping %u, err 7216 (next: %u)", master, next);
+    val0[1] = next;
+    if (res.dumpStateOneNode(master, val0, 2))
+      return NDBT_FAILED;
+    
+    res.waitNodesNoStart(&master, 1);
+    res.startNodes(&master, 1);
+    ndbout_c("waiting for cluster started");
+    if (res.waitClusterStarted())
+    {
+      return NDBT_FAILED;
+    }
+  }
+  ctx->stopTest();
+  return NDBT_OK;
+}
+
+int
+runBug42422(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter res;
+  
+  if (res.getNumDbNodes() < 4)
+  {
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+  
+  int loops = ctx->getNumLoops();
+  while (--loops >= 0)
+  {
+    int master = res.getMasterNodeId();
+    ndbout_c("master: %u", master);
+    int nodeId = res.getRandomNodeSameNodeGroup(master, rand()); 
+    ndbout_c("target: %u", nodeId);
+    int node2 = res.getRandomNodeOtherNodeGroup(nodeId, rand());
+    ndbout_c("node 2: %u", node2);
+    
+    res.restartOneDbNode(nodeId,
+                         /** initial */ false, 
+                         /** nostart */ true,
+                         /** abort   */ true);
+    
+    res.waitNodesNoStart(&nodeId, 1);
+    
+    int dump[] = { 9000, 0 };
+    dump[1] = node2;
+    
+    if (res.dumpStateOneNode(nodeId, dump, 2))
+      return NDBT_FAILED;
+    
+    int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+    if (res.dumpStateOneNode(nodeId, val2, 2))
+      return NDBT_FAILED;
+    
+    res.insertErrorInNode(nodeId, 937);
+    ndbout_c("%u : starting %u", __LINE__, nodeId);
+    res.startNodes(&nodeId, 1);
+    NdbSleep_SecSleep(3);
+    ndbout_c("%u : waiting for %u to not get not-started", __LINE__, nodeId);
+    res.waitNodesNoStart(&nodeId, 1);
+    
+    ndbout_c("%u : starting %u", __LINE__, nodeId);
+    res.startNodes(&nodeId, 1);
+    
+    ndbout_c("%u : waiting for cluster started", __LINE__);
+    if (res.waitClusterStarted())
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ctx->stopTest();
+  return NDBT_OK;
+}
+
 NDBT_TESTSUITE(testNodeRestart);
 TESTCASE("NoLoad", 
 	 "Test that one node at a time can be stopped and then restarted "\
@@ -3564,6 +3666,15 @@ TESTCASE("Bug41295", "")
   STEP(runBug41295);
   FINALIZER(runClearTable);
 }
+TESTCASE("Bug41469", ""){
+  INITIALIZER(runLoadTable);
+  STEP(runBug41469);
+  STEP(runScanUpdateUntilStopped);
+  FINALIZER(runClearTable);
+}
+TESTCASE("Bug42422", ""){
+  INITIALIZER(runBug42422);
+}
 NDBT_TESTSUITE_END(testNodeRestart);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testScan.cpp'
--- a/storage/ndb/test/ndbapi/testScan.cpp	2008-04-28 14:17:28 +0000
+++ b/storage/ndb/test/ndbapi/testScan.cpp	2009-02-04 12:35:22 +0000
@@ -1235,6 +1235,96 @@ runBug24447(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+int runBug42545(NDBT_Context* ctx, NDBT_Step* step){
+
+  int loops = ctx->getNumLoops();
+
+  Ndb* pNdb = GETNDB(step);
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 2)
+  {
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+
+  const NdbDictionary::Index * pIdx = 
+    GETNDB(step)->getDictionary()->getIndex(orderedPkIdxName, 
+					    ctx->getTab()->getName());
+  
+
+  int i = 0;
+  while (pIdx && i++ < loops && !ctx->isTestStopped()) 
+  {
+    g_info << i << ": ";
+    NdbTransaction* pTrans = pNdb->startTransaction();
+    int nodeId = pTrans->getConnectedNodeId();
+    
+    {
+      Uint32 cnt = 0;
+      Vector<NdbTransaction*> translist;
+      while (cnt < 3)
+      {
+        NdbTransaction* p2 = pNdb->startTransaction();
+        translist.push_back(p2);
+        if (p2->getConnectedNodeId() == (Uint32)nodeId)
+          cnt++;
+      }
+      
+      for (size_t t = 0; t < translist.size(); t++)
+        translist[t]->close();
+      translist.clear();
+    }
+
+    NdbIndexScanOperation* 
+      pOp = pTrans->getNdbIndexScanOperation(pIdx, ctx->getTab());
+    
+    int r0 = pOp->readTuples(NdbOperation::LM_CommittedRead,
+                             NdbScanOperation::SF_OrderBy);
+
+    ndbout << "Restart node " << nodeId << endl; 
+    res.restartOneDbNode(nodeId,
+                         /** initial */ false, 
+                         /** nostart */ true,
+                         /** abort   */ true);
+    
+    res.waitNodesNoStart(&nodeId, 1);
+    res.startNodes(&nodeId, 1);
+    res.waitNodesStarted(&nodeId, 1);
+
+    int r1 = pTrans->execute(NdbTransaction::NoCommit);
+
+    int r2;
+    while ((r2 = pOp->nextResult()) == 0);
+
+    ndbout_c("r0: %d r1: %d r2: %d", r0, r1, r2);
+
+    pTrans->close();
+  }
+  
+  return NDBT_OK;
+}
+
+int
+initBug42559(NDBT_Context* ctx, NDBT_Step* step){
+  
+  int dump[] = { 7017  }; // Max LCP speed
+  NdbRestarter res;
+  res.dumpStateAllNodes(dump, 1);
+
+  return NDBT_OK;
+}
+int
+finalizeBug42559(NDBT_Context* ctx, NDBT_Step* step){
+  
+  int dump[] = { 7017, 1  }; // Restore config value
+  NdbRestarter res;
+  res.dumpStateAllNodes(dump, 2);
+
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testScan);
 TESTCASE("ScanRead", 
 	 "Verify scan requirement: It should be possible "\
@@ -1725,6 +1815,24 @@ TESTCASE("Bug36124",
   STEP(runBug36124);
   FINALIZER(runClearTable);
 }
+TESTCASE("Bug42545", "")
+{
+  INITIALIZER(createOrderedPkIndex);
+  INITIALIZER(runLoadTable);
+  STEP(runBug42545);
+  FINALIZER(createOrderedPkIndex_Drop);
+  FINALIZER(runClearTable);
+}
+TESTCASE("Bug42559", "") 
+{
+  INITIALIZER(initBug42559);
+  INITIALIZER(createOrderedPkIndex);
+  INITIALIZER(runLoadTable);
+  STEPS(runScanReadIndex, 70);
+  FINALIZER(createOrderedPkIndex_Drop);
+  FINALIZER(finalizeBug42559);
+  FINALIZER(runClearTable);
+}
 NDBT_TESTSUITE_END(testScan);
 
 int main(int argc, const char** argv){
@@ -1734,3 +1842,4 @@ int main(int argc, const char** argv){
 }
 
 template class Vector<Attrib*>;
+template class Vector<NdbTransaction*>;

=== modified file 'storage/ndb/test/ndbapi/testUpgrade.cpp'
--- a/storage/ndb/test/ndbapi/testUpgrade.cpp	2008-02-21 13:57:42 +0000
+++ b/storage/ndb/test/ndbapi/testUpgrade.cpp	2008-12-12 09:40:06 +0000
@@ -45,7 +45,7 @@ int runUpgrade_NR1(NDBT_Context* ctx, ND
     g_err << "Cluster '" << clusters.column("name")
           << "@" << tmp_result.column("connectstring") << "'" << endl;
 
-    if (restarter.waitClusterStarted(1))
+    if (restarter.waitClusterStarted())
       return NDBT_FAILED;
 
     // Restart ndb_mgmd(s)
@@ -65,7 +65,7 @@ int runUpgrade_NR1(NDBT_Context* ctx, ND
     }
 
     ndbout << "Waiting for started"<< endl;
-    if (restarter.waitClusterStarted(1))
+    if (restarter.waitClusterStarted())
       return NDBT_FAILED;
     ndbout << "Started"<< endl;
 
@@ -126,7 +126,7 @@ int runUpgrade_NR2(NDBT_Context* ctx, ND
     g_err << "Cluster '" << clusters.column("name")
           << "@" << tmp_result.column("connectstring") << "'" << endl;
 
-    if(restarter.waitClusterStarted(1))
+    if(restarter.waitClusterStarted())
       return NDBT_FAILED;
 
     // Restart ndb_mgmd(s)
@@ -144,6 +144,8 @@ int runUpgrade_NR2(NDBT_Context* ctx, ND
         return NDBT_FAILED;
     }
 
+    NdbSleep_SecSleep(5); // TODO, handle arbitration
+
     // Restart one ndbd in each node group
     SqlResultSet ndbds;
     if (!atrt.getNdbds(clusterId, ndbds))
@@ -239,7 +241,7 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
     g_err << "Cluster '" << clusters.column("name")
           << "@" << tmp_result.column("connectstring") << "'" << endl;
 
-    if(restarter.waitClusterStarted(1))
+    if(restarter.waitClusterStarted())
       return NDBT_FAILED;
 
     // Restart ndb_mgmd(s)
@@ -257,6 +259,8 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
         return NDBT_FAILED;
     }
 
+    NdbSleep_SecSleep(5); // TODO, handle arbitration
+
     // Restart one ndbd in each node group
     SqlResultSet ndbds;
     if (!atrt.getNdbds(clusterId, ndbds))
@@ -338,14 +342,14 @@ int runCheckStarted(NDBT_Context* ctx, N
 
   // Check cluster is started
   NdbRestarter restarter;
-  if(restarter.waitClusterStarted(1) != 0){
+  if(restarter.waitClusterStarted() != 0){
     g_err << "All nodes was not started " << endl;
     return NDBT_FAILED;
   }
 
   // Check atrtclient is started
   AtrtClient atrt;
-  if(!atrt.waitConnected(60)){
+  if(!atrt.waitConnected()){
     g_err << "atrt server was not started " << endl;
     return NDBT_FAILED;
   }
@@ -357,7 +361,7 @@ int runCheckStarted(NDBT_Context* ctx, N
 
   while (procs.next())
   {
-    if (procs.columnAsInt("node_id") == -1){
+    if (procs.columnAsInt("node_id") == (unsigned)-1){
       ndbout << "Found one process with node_id -1, "
              << "use --fix-nodeid=1 to atrt to fix this" << endl;
       return NDBT_FAILED;
@@ -367,118 +371,21 @@ int runCheckStarted(NDBT_Context* ctx, N
   return NDBT_OK;
 }
 
-
-int runRestoreProcs(NDBT_Context* ctx, NDBT_Step* step){
-  AtrtClient atrt;
-  g_err << "Starting to reset..." << endl;
-
-  SqlResultSet clusters;
-  if (!atrt.getClusters(clusters))
-    return NDBT_FAILED;
-
-  while (clusters.next())
-  {
-    uint clusterId= clusters.columnAsInt("id");
-    SqlResultSet tmp_result;
-    if (!atrt.getConnectString(clusterId, tmp_result))
-      return NDBT_FAILED;
-
-    NdbRestarter restarter(tmp_result.column("connectstring"));
-    restarter.setReconnect(true); // Restarting mgmd
-    g_err << "Cluster '" << clusters.column("name")
-          << "@" << tmp_result.column("connectstring") << "'" << endl;
-
-    if(restarter.waitClusterStarted(1))
-      return NDBT_FAILED;
-
-    // Reset ndb_mgmd(s)
-    SqlResultSet mgmds;
-    if (!atrt.getMgmds(clusterId, mgmds))
-      return NDBT_FAILED;
-
-    while (mgmds.next())
-    {
-      ndbout << "Reset mgmd" << mgmds.columnAsInt("node_id") << endl;
-      if (!atrt.resetProc(mgmds.columnAsInt("id")))
-        return NDBT_FAILED;
-
-      if(restarter.waitConnected() != 0)
-        return NDBT_FAILED;
-    }
-
-    if(restarter.waitClusterStarted(1))
-      return NDBT_FAILED;
-
-    // Reset ndbd(s)
-    SqlResultSet ndbds;
-    if (!atrt.getNdbds(clusterId, ndbds))
-      return NDBT_FAILED;
-
-    while(ndbds.next())
-    {
-      int nodeId = ndbds.columnAsInt("node_id");
-      int processId = ndbds.columnAsInt("id");
-      ndbout << "Reset node " << nodeId << endl;
-
-      if (!atrt.resetProc(processId))
-        return NDBT_FAILED;
-
-    }
-
-    if (restarter.waitClusterNoStart())
-      return NDBT_FAILED;
-
-  }
-
-
-  // All nodes are in no start, start them up again
-  clusters.reset();
-  while (clusters.next())
-  {
-    uint clusterId= clusters.columnAsInt("id");
-    SqlResultSet tmp_result;
-    if (!atrt.getConnectString(clusterId, tmp_result))
-      return NDBT_FAILED;
-
-    NdbRestarter restarter(tmp_result.column("connectstring"));
-    g_err << "Cluster '" << clusters.column("name")
-          << "@" << tmp_result.column("connectstring") << "'" << endl;
-
-    if (restarter.waitClusterNoStart())
-      return NDBT_FAILED;
-
-    ndbout << "Starting and wait for started..." << endl;
-    if (restarter.startAll())
-      return NDBT_FAILED;
-
-    if (restarter.waitClusterStarted())
-      return NDBT_FAILED;
-  }
-
-  ctx->stopTest();
-  return NDBT_OK;
-}
-
-
-
 NDBT_TESTSUITE(testUpgrade);
 TESTCASE("Upgrade_NR1",
 	 "Test that one node at a time can be upgraded"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR1);
-  FINALIZER(runRestoreProcs);
 }
 TESTCASE("Upgrade_NR2",
 	 "Test that one node in each nodegroup can be upgradde simultaneously"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR2);
-  FINALIZER(runRestoreProcs);
 }
 TESTCASE("Upgrade_NR3",
 	 "Test that one node in each nodegroup can be upgrade simultaneously"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR3);
-  FINALIZER(runRestoreProcs);
 }
 NDBT_TESTSUITE_END(testUpgrade);
 

=== modified file 'storage/ndb/test/run-test/Makefile.am'
--- a/storage/ndb/test/run-test/Makefile.am	2008-08-23 20:29:50 +0000
+++ b/storage/ndb/test/run-test/Makefile.am	2008-12-11 13:47:52 +0000
@@ -23,7 +23,8 @@ include $(top_srcdir)/storage/ndb/config
 test_PROGRAMS = atrt
 test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
           conf-ndbmaster.cnf \
-          conf-dl145a.cnf test-tests.txt conf-test.cnf db.sql
+          conf-dl145a.cnf test-tests.txt conf-test.cnf db.sql \
+          conf-upgrade.cnf upgrade-tests.txt
 
 test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
           atrt-clear-result.sh autotest-run.sh atrt-backtrace.sh

=== modified file 'storage/ndb/test/run-test/atrt-gather-result.sh'
--- a/storage/ndb/test/run-test/atrt-gather-result.sh	2007-02-16 20:09:38 +0000
+++ b/storage/ndb/test/run-test/atrt-gather-result.sh	2008-12-12 09:40:06 +0000
@@ -12,5 +12,18 @@ do
   shift
 done
 
-
-
+#
+# clean tables...not to make results too large
+#
+lst=$(find . -name '*.frm')
+if [ "$lst" ]
+then
+    for i in $lst
+    do
+	basename=$(echo $i | sed 's!\.frm!!')
+	if [ "$basename" ]
+	then
+	    rm -f $basename.*
+	fi
+    done
+fi

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2008-12-17 18:40:14 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2009-01-08 11:57:59 +0000
@@ -149,10 +149,17 @@ bool setup_hosts(atrt_config&);
 
 bool do_command(atrt_config& config);
 
-bool
-start_process(atrt_process & proc);
-bool
-stop_process(atrt_process & proc);
+bool start_process(atrt_process & proc);
+bool stop_process(atrt_process & proc);
+
+/**
+ * check configuration if any changes has been 
+ *   done for the duration of the latest running test
+ *   if so, return true, and reset those changes
+ *   (true, indicates that a restart is needed to actually
+ *    reset the running processes)
+ */
+bool reset_config(atrt_config&);
 
 NdbOut&
 operator<<(NdbOut& out, const atrt_process& proc);

=== modified file 'storage/ndb/test/run-test/autotest-boot.sh'
--- a/storage/ndb/test/run-test/autotest-boot.sh	2008-12-10 15:18:06 +0000
+++ b/storage/ndb/test/run-test/autotest-boot.sh	2008-12-16 17:12:00 +0000
@@ -166,9 +166,9 @@ fi
 
 if [ -z "$tag1" ]
 then
-    dst_place1=${build_dir}/clone-$clone1-$DATE.$$
+    dst_place1=${build_dir}/clone1-$clone1-$DATE.$$
 else
-    dst_place1=${build_dir}/clone-$tag1-$DATE.$$
+    dst_place1=${build_dir}/clone1-$tag1-$DATE.$$
     extra_args="$extra_args --clone1=$tag1"
     extra_clone1="-r$tag1"
 fi
@@ -226,7 +226,7 @@ if [ "$build" ]
 then
     rm -rf $dst_place0
 
-    if [ "$dst_place1" ]
+    if [ "$clone1" ]
     then
 	rm -rf $dst_place1
     fi

=== modified file 'storage/ndb/test/run-test/autotest-run.sh'
--- a/storage/ndb/test/run-test/autotest-run.sh	2008-12-10 15:18:06 +0000
+++ b/storage/ndb/test/run-test/autotest-run.sh	2008-12-12 11:09:03 +0000
@@ -213,8 +213,9 @@ choose_conf(){
 #########################################
 
 count_hosts(){
-    cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \
-    if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l`
+    ch="CHOOSE_host"
+    cnt=$(for i in `grep $ch $1 | sed 's!,! !g'` ; do echo $i; done\
+          | grep $ch | sort | uniq | wc -l)
     echo $cnt
 }
 
@@ -247,18 +248,22 @@ cd $run_dir
 choose $conf $hosts > d.tmp.$$
 sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf
 
+prefix="--prefix=$install_dir0"
+if [ "$install_dir1" ]
+then
+    prefix="$prefix --prefix1=$install_dir1"
+fi
+
+
 # Setup configuration
-$atrt Cdq my.cnf
+$atrt Cdq $prefix my.cnf
 
 # Start...
 args=""
 args="--report-file=report.txt"
 args="$args --log-file=log.txt"
 args="$args --testcase-file=$test_dir/$RUN-tests.txt"
-if [ "$install_dir1" ]
-then
-    args="$args --prefix=$install_dir0 --prefix1=$install_dir1"
-fi
+args="$args $prefix"
 $atrt $args my.cnf
 
 # Make tar-ball

=== modified file 'storage/ndb/test/run-test/command.cpp'
--- a/storage/ndb/test/run-test/command.cpp	2008-11-27 18:03:09 +0000
+++ b/storage/ndb/test/run-test/command.cpp	2008-12-12 08:48:37 +0000
@@ -83,10 +83,12 @@ do_change_version(atrt_config& config, S
   atrt_process& proc= *config.m_processes[process_id];
 
   // Save current proc state
-  assert(proc.m_save.m_saved == false);
-  proc.m_save.m_proc= proc.m_proc;
-  proc.m_save.m_saved= true;
-
+  if (proc.m_save.m_saved == false)
+  {
+    proc.m_save.m_proc= proc.m_proc;
+    proc.m_save.m_saved= true;
+  }
+  
   g_logger.info("stopping process...");
   if (!stop_process(proc))
     return false;
@@ -145,7 +147,7 @@ do_reset_proc(atrt_config& config, SqlRe
   {
     ndbout << "process has not changed" << endl;
   }
-
+  
   g_logger.info("starting process...");
   if (!start_process(proc))
     return false;

=== added file 'storage/ndb/test/run-test/conf-upgrade.cnf'
--- a/storage/ndb/test/run-test/conf-upgrade.cnf	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/conf-upgrade.cnf	2008-12-15 20:06:12 +0000
@@ -0,0 +1,32 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+mysqld = CHOOSE_host1
+fix-nodeid=1
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+loose-skip-bdb
+socket=mysql.sock
+
+[client]
+protocol=tcp
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1,CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 50M 
+DataMemory = 100M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
+NoOfFragmentLogFiles = 4
+FragmentLogFileSize = 64M
+

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2009-02-04 13:08:05 +0000
@@ -309,6 +309,10 @@ max-time: 500
 cmd: testScan
 args: -n ScanRead488O -l 10 T6 D1 D2 
 
+max-time: 500
+cmd: testScan
+args: -n Bug42559 T6 D1 D2 
+
 max-time: 1000
 cmd: testScan
 args: -n ScanRead488T -l 10 T6 D1 D2 
@@ -990,6 +994,10 @@ max-time: 5000
 cmd: testNodeRestart
 args: -n GCP T1
 
+max-time: 1200
+cmd: testNodeRestart
+args: -n Bug41469 T1
+
 max-time: 180
 cmd: testIndex
 args: -n Bug28804 T1 T6
@@ -1159,7 +1167,7 @@ args: -l 100 -n Bug37158
 # EOF 2008-06-03
 max-time: 500
 cmd: test_event
-args -n bug37672 T1
+args: -n bug37672 T1
 
 #EOF 2008-07-04
 max-time: 500
@@ -1169,10 +1177,18 @@ args: 
 #EOF 2008-07-09
 max-time: 600
 cmd: test_event
-args -r 5000 -n Bug30780 T1
+args: -r 5000 -n Bug30780 T1
 
 #EOF 2008-08-11
 max-time: 1200
 cmd: testNodeRestart
-args -n Bug41295 T1
+args: -n Bug41295 T1
+
+max-time: 1200
+cmd: testNodeRestart
+args: -n Bug42422 -l 1 T1
+
+max-time: 300
+cmd: testScan
+args: -n Bug42545 -l 1 T1
 

=== modified file 'storage/ndb/test/run-test/db.cpp'
--- a/storage/ndb/test/run-test/db.cpp	2008-11-10 11:41:44 +0000
+++ b/storage/ndb/test/run-test/db.cpp	2008-12-19 06:40:28 +0000
@@ -132,14 +132,23 @@ connect_mysqld(atrt_process* proc)
 
   const char * port = find(proc, "--port=");
   const char * socket = find(proc, "--socket=");
-  assert(port);
+  if (port == 0 && socket == 0)
+  {
+    g_logger.error("Neither socket nor port specified...cant connect to mysql");
+    return false;
+  }
   
+  if (port)
+  {
+    mysql_protocol_type val = MYSQL_PROTOCOL_TCP;
+    mysql_options(&proc->m_mysql, MYSQL_OPT_PROTOCOL, &val);
+  }
   for (size_t i = 0; i<20; i++)
   {
     if (mysql_real_connect(&proc->m_mysql,
 			   proc->m_host->m_hostname.c_str(),
 			   "root", "", "test",
-			   atoi(port),
+			   port ? atoi(port) : 0,
 			   socket,
 			   0))
     {
@@ -152,8 +161,8 @@ connect_mysqld(atrt_process* proc)
   
   g_logger.error("Failed to connect to mysqld err: >%s< >%s:%u:%s<",
 		 mysql_error(&proc->m_mysql),
-		 proc->m_host->m_hostname.c_str(),atoi(port),
-		 socket);
+		 proc->m_host->m_hostname.c_str(), port ? atoi(port) : 0,
+		 socket ? socket : "<null>");
   return false;
 }
 
@@ -246,7 +255,7 @@ populate_options(MYSQL* mysql, MYSQL_STM
     
     if (mysql_stmt_execute(stmt))
     {
-      g_logger.error("Failed to execute: %s", mysql_error(mysql));
+      g_logger.error("0 Failed to execute: %s", mysql_error(mysql));
       return false;
     }
     kk++;
@@ -286,11 +295,11 @@ populate_db(atrt_config& config, atrt_pr
       
       if (mysql_stmt_execute(stmt))
       {
-	g_logger.error("Failed to execute: %s", mysql_error(&mysqld->m_mysql));
+	g_logger.error("1 Failed to execute: %s", mysql_error(&mysqld->m_mysql));
 	return false;
       }
-      mysql_stmt_close(stmt);
     }
+    mysql_stmt_close(stmt);
   }
 
   {
@@ -319,7 +328,7 @@ populate_db(atrt_config& config, atrt_pr
       
       if (mysql_stmt_execute(stmt))
       {
-	g_logger.error("Failed to execute: %s", mysql_error(&mysqld->m_mysql));
+	g_logger.error("2 Failed to execute: %s", mysql_error(&mysqld->m_mysql));
 	return false;
       }
     }
@@ -386,7 +395,7 @@ populate_db(atrt_config& config, atrt_pr
       
       if (mysql_stmt_execute(stmt))
       {
-	g_logger.error("Failed to execute: %s", mysql_error(&mysqld->m_mysql));
+	g_logger.error("3 Failed to execute: %s", mysql_error(&mysqld->m_mysql));
 	return false;
       }
 

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2008-08-23 20:29:50 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2008-12-11 13:47:52 +0000
@@ -116,6 +116,11 @@ setup_files(atrt_config& config, int set
   BaseString mycnf;
   mycnf.assfmt("%s/my.cnf", g_basedir);
   
+  if (!create_directory(g_basedir))
+  {
+    return false;
+  }
+
   if (mycnf != g_my_cnf)
   {
     struct stat sbuf;

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2008-09-25 10:55:39 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2009-01-08 11:57:59 +0000
@@ -254,15 +254,18 @@ main(int argc, char ** argv)
   /**
    * Main loop
    */
-  while(!feof(g_test_case_file)){
+  while(!feof(g_test_case_file))
+  {
     /**
      * Do we need to restart ndb
      */
-    if(restart){
+    if(restart)
+    {
+      restart = false;
       g_logger.info("(Re)starting server processes");
       if(!stop_processes(g_config, ~0))
 	goto end;
-
+      
       if (!setup_directories(g_config, 2))
 	goto end;
       
@@ -276,7 +279,7 @@ main(int argc, char ** argv)
       {
         g_logger.info("Failed to start server processes");
         g_logger.info("Gathering logs and saving them as test %u", test_no);
-
+        
         int tmp;
         if(!gather_result(g_config, &tmp))
           goto end;
@@ -327,31 +330,37 @@ main(int argc, char ** argv)
     
     const time_t start = time(0);
     time_t now = start;
-    do {
+    do 
+    {
       if(!update_status(g_config, atrt_process::AP_ALL))
 	goto end;
-
-      if(is_running(g_config, p_ndb) != 2){
+      
+      if(is_running(g_config, p_ndb) != 2)
+      {
 	result = ERR_NDB_FAILED;
 	break;
       }
-
-      if(is_running(g_config, p_servers) != 2){
+      
+      if(is_running(g_config, p_servers) != 2)
+      {
 	result = ERR_SERVERS_FAILED;
 	break;
       }
 
-      if(is_running(g_config, p_clients) == 0){
+      if(is_running(g_config, p_clients) == 0)
+      {
 	break;
       }
 
-      if (!do_command(g_config)){
+      if (!do_command(g_config))
+      {
         result = ERR_COMMAND_FAILED;
 	break;
       }
 
       now = time(0);
-      if(now  > (start + test_case.m_max_time)){
+      if(now  > (start + test_case.m_max_time))
+      {
 	result = ERR_MAX_TIME_ELAPSED;
 	break;
       }
@@ -371,18 +380,20 @@ main(int argc, char ** argv)
 		  test_no, 
 		  (result == 0 ? "OK" : "FAILED"), result);
 
-    if(g_report_file != 0){
+    if(g_report_file != 0)
+    {
       fprintf(g_report_file, "%s ; %d ; %d ; %ld\n",
 	      test_case.m_name.c_str(), test_no, result, elapsed);
       fflush(g_report_file);
     }    
 
-    if(g_mode == 0 && result){
+    if(g_mode == 0 && result)
+    {
       g_logger.info
 	("Encountered failed test in interactive mode - terminating");
       break;
     }
-
+    
     BaseString resdir;
     resdir.assfmt("result.%d", test_no);
     remove_dir(resdir.c_str(), true);
@@ -400,11 +411,15 @@ main(int argc, char ** argv)
     {
       remove_dir("result", true);
     }
+   
+    if (reset_config(g_config))
+    {
+      restart = true;
+    }
     
-    if(result != 0){
+    if(result != 0)
+    {
       restart = true;
-    } else {
-      restart = false;
     }
     test_no++;
   }
@@ -858,7 +873,8 @@ next:
 bool
 start_process(atrt_process & proc){
   if(proc.m_proc.m_id != -1){
-    g_logger.critical("starting already started process: %d", proc.m_index);
+    g_logger.critical("starting already started process: %u", 
+                      (unsigned)proc.m_index);
     return false;
   }
   
@@ -1126,13 +1142,23 @@ setup_test_case(atrt_config& config, con
     if(proc.m_type == atrt_process::AP_NDB_API || 
        proc.m_type == atrt_process::AP_CLIENT)
     {
-      proc.m_proc.m_path = "";
+      BaseString cmd;
       if (tc.m_command.c_str()[0] != '/')
       {
-	proc.m_proc.m_path.appfmt("%s/bin/", g_prefix);
+        cmd.appfmt("%s/bin/", g_prefix);
+      }
+      cmd.append(tc.m_command.c_str());
+
+      if (0) // valgrind
+      {
+        proc.m_proc.m_path = "/usr/bin/valgrind";
+        proc.m_proc.m_args.appfmt("%s %s", cmd.c_str(), tc.m_args.c_str());
+      }
+      else
+      {
+        proc.m_proc.m_path = cmd;
+        proc.m_proc.m_args.assign(tc.m_args);
       }
-      proc.m_proc.m_path.append(tc.m_command.c_str());
-      proc.m_proc.m_args.assign(tc.m_args);
       if(!tc.m_run_all)
         break;
     }
@@ -1208,28 +1234,38 @@ setup_hosts(atrt_config& config){
   return true;
 }
 
+static
+bool
+do_rsync(const char *dir, const char *dst)
+{
+  BaseString tmp = g_setup_progname;
+  tmp.appfmt(" %s %s/ %s", dst, dir, dir);
+  
+  g_logger.info("rsyncing %s to %s", dir, dst);
+  g_logger.debug("system(%s)", tmp.c_str());
+  const int r1 = system(tmp.c_str());
+  if(r1 != 0)
+  {
+    g_logger.critical("Failed to rsync %s to %s", dir, dst);
+    return false;
+  }
+  
+  return true;
+}
+
 bool
 deploy(atrt_config & config)
 {
   for (size_t i = 0; i<config.m_hosts.size(); i++)
   {
-    BaseString tmp = g_setup_progname;
-    tmp.appfmt(" %s %s/ %s",
-	       config.m_hosts[i]->m_hostname.c_str(),
-	       g_prefix,
-	       g_prefix);
-  
-    g_logger.info("rsyncing %s to %s", g_prefix,
-		  config.m_hosts[i]->m_hostname.c_str());
-    g_logger.debug("system(%s)", tmp.c_str());
-    const int r1 = system(tmp.c_str());
-    if(r1 != 0)
-    {
-      g_logger.critical("Failed to rsync %s to %s", 
-			g_prefix,
-			config.m_hosts[i]->m_hostname.c_str());
+    if (!do_rsync(g_basedir, config.m_hosts[i]->m_hostname.c_str()))
+      return false;
+
+    if (!do_rsync(g_prefix, config.m_hosts[i]->m_hostname.c_str()))
+      return false;
+    
+    if (g_prefix1 && !do_rsync(g_prefix1, config.m_hosts[i]->m_hostname.c_str()))
       return false;
-    }
   }
   
   return true;
@@ -1328,6 +1364,27 @@ require(bool x)
     abort();
 }
 
+bool
+reset_config(atrt_config & config)
+{
+  bool changed = false;
+  for(size_t i = 0; i<config.m_processes.size(); i++)
+  {
+    atrt_process & proc = *config.m_processes[i]; 
+    if (proc.m_save.m_saved)
+    {
+      if (!stop_process(proc))
+        return false;
+      
+      changed = true;
+      proc.m_save.m_saved = false;
+      proc.m_proc = proc.m_save.m_proc;
+      proc.m_proc.m_id = -1;
+    }
+  }
+  return changed;
+}
+
 template class Vector<Vector<SimpleCpcClient::Process> >;
 template class Vector<atrt_host*>;
 template class Vector<atrt_cluster*>;

=== modified file 'storage/ndb/test/run-test/setup.cpp'
--- a/storage/ndb/test/run-test/setup.cpp	2008-11-27 18:03:09 +0000
+++ b/storage/ndb/test/run-test/setup.cpp	2008-12-12 08:04:28 +0000
@@ -49,14 +49,14 @@ bool
 setup_config(atrt_config& config, const char* atrt_mysqld)
 {
   BaseString tmp(g_clusters);
-  Vector<BaseString> clusters;
-  tmp.split(clusters, ",");
-
+  
   if (atrt_mysqld)
   {
-    clusters.push_back(BaseString(".atrt"));
+    tmp.appfmt(",.atrt");
   }
-  
+  Vector<BaseString> clusters;
+  tmp.split(clusters, ",");
+
   bool fqpn = clusters.size() > 1 || g_fqpn;
   
   size_t j,k;
@@ -131,6 +131,7 @@ setup_config(atrt_config& config, const 
       proc_args[1].value = 0;
       proc_args[2].value = 0;      
       proc_args[3].value = 0;      
+      proc_args[4].value = atrt_mysqld;
     }
 
     /**
@@ -154,10 +155,11 @@ setup_config(atrt_config& config, const 
       /**
        * Load cluster options
        */
-      
-      argc = 1;
+      int argc = 1;
+      const char * argv[] = { "atrt", 0, 0 };
       argv[argc++] = buf.c_str();
       const char *groups[] = { "mysql_cluster", 0 };
+      char ** tmp = (char**)argv;
       ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
       
       if (ret)
@@ -201,7 +203,7 @@ load_process(atrt_config& config, atrt_c
 {
   atrt_host * host_ptr = find(hostname, config.m_hosts);
   atrt_process *proc_ptr = new atrt_process;
-  
+
   config.m_processes.push_back(proc_ptr);
   host_ptr->m_processes.push_back(proc_ptr);
   cluster.m_processes.push_back(proc_ptr);
@@ -212,6 +214,7 @@ load_process(atrt_config& config, atrt_c
   proc.m_index = idx;
   proc.m_type = type;
   proc.m_host = host_ptr;
+  proc.m_save.m_saved = false;
   if (g_fix_nodeid)
     proc.m_nodeid= cluster.m_next_nodeid++;
   else

=== added file 'storage/ndb/test/run-test/upgrade-tests.txt'
--- a/storage/ndb/test/run-test/upgrade-tests.txt	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/upgrade-tests.txt	2008-12-12 08:56:22 +0000
@@ -0,0 +1,12 @@
+cmd: testUpgrade
+args: -n Upgrade_NR1 T1
+max-time: 600
+
+cmd: testUpgrade
+args: -n Upgrade_NR2 T1
+max-time: 600
+
+cmd: testUpgrade
+args: -n Upgrade_NR3 T1
+max-time: 600
+

=== modified file 'storage/ndb/test/src/DbUtil.cpp'
--- a/storage/ndb/test/src/DbUtil.cpp	2008-11-10 10:53:34 +0000
+++ b/storage/ndb/test/src/DbUtil.cpp	2008-12-12 08:04:28 +0000
@@ -358,7 +358,7 @@ DbUtil::runQuery(const char* sql,
   MYSQL_BIND *bind_param = new MYSQL_BIND[params];
   NdbAutoObjArrayPtr<MYSQL_BIND> _guard(bind_param);
 
-  bzero(bind_param, sizeof(bind_param));
+  bzero(bind_param, params * sizeof(MYSQL_BIND));
 
   for(uint i= 0; i < mysql_stmt_param_count(stmt); i++)
   {
@@ -429,7 +429,7 @@ DbUtil::runQuery(const char* sql,
     uint num_fields= mysql_num_fields(res);
     MYSQL_BIND *bind_result = new MYSQL_BIND[num_fields];
     NdbAutoObjArrayPtr<MYSQL_BIND> _guard1(bind_result);
-    bzero(bind_result, sizeof(bind_result));
+    bzero(bind_result, num_fields * sizeof(MYSQL_BIND));
 
     for (uint i= 0; i < num_fields; i++)
     {
@@ -437,6 +437,8 @@ DbUtil::runQuery(const char* sql,
 
       switch(fields[i].type){
       case MYSQL_TYPE_STRING:
+        buf_len = fields[i].length + 1;
+        break;
       case MYSQL_TYPE_VARCHAR:
       case MYSQL_TYPE_VAR_STRING:
         buf_len= fields[i].max_length + 1;
@@ -444,14 +446,18 @@ DbUtil::runQuery(const char* sql,
       case MYSQL_TYPE_LONGLONG:
         buf_len= sizeof(long long);
         break;
+      case MYSQL_TYPE_LONG:
+        buf_len = sizeof(long);
+        break;
       default:
         break;
       }
-
+      
       bind_result[i].buffer_type= fields[i].type;
       bind_result[i].buffer= malloc(buf_len);
       bind_result[i].buffer_length= buf_len;
-
+      bind_result[i].is_null = (my_bool*)malloc(sizeof(my_bool));
+      * bind_result[i].is_null = 0;
     }
 
     if (mysql_stmt_bind_result(stmt, bind_result)){
@@ -464,8 +470,11 @@ DbUtil::runQuery(const char* sql,
     {
       Properties curr(true);
       for (uint i= 0; i < num_fields; i++){
+        if (* bind_result[i].is_null)
+          continue;
         switch(fields[i].type){
         case MYSQL_TYPE_STRING:
+	  ((char*)bind_result[i].buffer)[fields[i].max_length] = 0;
         case MYSQL_TYPE_VARCHAR:
         case MYSQL_TYPE_VAR_STRING:
           curr.put(fields[i].name, (char*)bind_result[i].buffer);
@@ -479,7 +488,7 @@ DbUtil::runQuery(const char* sql,
         default:
           curr.put(fields[i].name, *(int*)bind_result[i].buffer);
           break;
-       }
+        }
       }
       rows.put("row", row++, &curr);
     }
@@ -487,8 +496,10 @@ DbUtil::runQuery(const char* sql,
     mysql_free_result(res);
 
     for (uint i= 0; i < num_fields; i++)
+    {
       free(bind_result[i].buffer);
-
+      free(bind_result[i].is_null);
+    }
   }
 
   // Save stats in result set

=== modified file 'storage/ndb/test/src/HugoTransactions.cpp'
--- a/storage/ndb/test/src/HugoTransactions.cpp	2008-11-17 09:26:25 +0000
+++ b/storage/ndb/test/src/HugoTransactions.cpp	2009-02-04 12:32:27 +0000
@@ -51,8 +51,8 @@ HugoTransactions::scanReadRecords(Ndb* p
   while (true){
 
     if (retryAttempt >= m_retryMax){
-      g_err << "ERROR: has retried this operation " << retryAttempt 
-	    << " times, failing!" << endl;
+      g_err << __LINE__ << " ERROR: has retried this operation " 
+            << retryAttempt << " times, failing!" << endl;
       return NDBT_FAILED;
     }
 
@@ -154,6 +154,18 @@ HugoTransactions::scanReadRecords(Ndb* p
 	  // Too many active scans, no limit on number of retry attempts
 	  break;
 	default:
+          if (err.classification == NdbError::TimeoutExpired)
+          {
+            if (retryAttempt >= (m_retryMax / 10) && 
+                (parallelism == 0 || parallelism > 1))
+            {
+              /**
+               * decrease parallelism
+               */
+              parallelism = 1;
+              ndbout_c("decrease parallelism");
+            }
+          }
 	  retryAttempt++;
 	}
 	continue;
@@ -195,8 +207,10 @@ HugoTransactions::scanReadRecords(Ndb* p
   while (true){
 
     if (retryAttempt >= m_retryMax){
-      g_err << "ERROR: has retried this operation " << retryAttempt 
-	    << " times, failing!" << endl;
+      g_err << __LINE__ << " ERROR: has retried this operation " 
+            << retryAttempt  << " times, failing!" << endl;
+      g_err << "lm: " << Uint32(lm) << " flags: H'" << hex << scan_flags
+            << endl;
       return NDBT_FAILED;
     }
 
@@ -298,6 +312,18 @@ HugoTransactions::scanReadRecords(Ndb* p
 	  // Too many active scans, no limit on number of retry attempts
 	  break;
 	default:
+          if (err.classification == NdbError::TimeoutExpired)
+          {
+            if (retryAttempt >= (m_retryMax / 10) && 
+                (parallelism == 0 || parallelism > 1))
+            {
+              /**
+               * decrease parallelism
+               */
+              parallelism = 1;
+              ndbout_c("decrease parallelism");
+            }
+          }
 	  retryAttempt++;
 	}
 	continue;

=== modified file 'storage/ndb/test/src/NDBT_Tables.cpp'
--- a/storage/ndb/test/src/NDBT_Tables.cpp	2008-11-17 09:26:25 +0000
+++ b/storage/ndb/test/src/NDBT_Tables.cpp	2008-12-20 19:48:44 +0000
@@ -868,11 +868,21 @@ NDBT_Tables::create_default_tablespace(N
   NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
 
   int res;
+  Uint32 mb = 8;
+  {
+    char buf[256];
+    if (NdbEnv_GetEnv("UNDOBUFFER", buf, sizeof(buf)))
+    {
+      mb = atoi(buf);
+      ndbout_c("Using %umb dd-undo-buffer", mb);
+    }
+  }
+
   NdbDictionary::LogfileGroup lg = pDict->getLogfileGroup("DEFAULT-LG");
   if (strcmp(lg.getName(), "DEFAULT-LG") != 0)
   {
     lg.setName("DEFAULT-LG");
-    lg.setUndoBufferSize(8*1024*1024);
+    lg.setUndoBufferSize(mb*1024*1024);
     res = pDict->createLogfileGroup(lg);
     if(res != 0){
       g_err << "Failed to create logfilegroup:"
@@ -881,7 +891,7 @@ NDBT_Tables::create_default_tablespace(N
     }
   }
 
-  Uint32 mb = 96;
+  mb = 96;
   Uint32 files = 13;
 
   {

=== modified file 'storage/ndb/test/tools/log_listner.cpp'
--- a/storage/ndb/test/tools/log_listner.cpp	2007-04-10 08:27:02 +0000
+++ b/storage/ndb/test/tools/log_listner.cpp	2009-01-14 13:33:03 +0000
@@ -7,15 +7,11 @@ NDB_STD_OPTS_VARS;
 
 static struct my_option my_long_options[] =
 {
-  NDB_STD_OPTS("ndb_logevent_listen"),
+  NDB_STD_OPTS("eventlog"),
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 static void usage()
 {
-  char desc[] = 
-    "tabname\n"\
-    "This program list all properties of table(s) in NDB Cluster.\n"\
-    "  ex: desc T1 T2 T4\n";
   ndb_std_print_version();
   my_print_help(my_long_options);
   my_print_variables(my_long_options);
@@ -40,7 +36,7 @@ main(int argc, char** argv)
   load_defaults("my",load_default_groups,&argc,&argv);
   int ho_error;
 #ifndef DBUG_OFF
-  opt_debug= "d:t:O,/tmp/ndb_desc.trace";
+  opt_debug= "d:t:O,/tmp/eventlog.trace";
 #endif
   if ((ho_error=handle_options(&argc, &argv, my_long_options, 
 			       ndb_std_get_one_option)))

=== modified file 'storage/ndb/tools/waiter.cpp'
--- a/storage/ndb/tools/waiter.cpp	2008-08-12 18:56:42 +0000
+++ b/storage/ndb/tools/waiter.cpp	2009-01-08 11:57:59 +0000
@@ -25,12 +25,15 @@
 
 #include <NDBT.hpp>
 
+#include <kernel/NodeBitmask.hpp>
+
 static int
 waitClusterStatus(const char* _addr, ndb_mgm_node_status _status);
 
 enum ndb_waiter_options {
   OPT_WAIT_STATUS_NOT_STARTED = NDB_STD_OPTIONS_LAST,
   OPT_WAIT_STATUS_SINGLE_USER
+  ,OPT_NOWAIT_NODES
 };
 NDB_STD_OPTS_VARS;
 
@@ -38,6 +41,8 @@ static int _no_contact = 0;
 static int _not_started = 0;
 static int _single_user = 0;
 static int _timeout = 120;
+static const char* _nowait_nodes = 0;
+static NdbNodeBitmask nowait_nodes_bitmask;
 
 const char *load_default_groups[]= { "mysql_cluster",0 };
 
@@ -57,6 +62,10 @@ static struct my_option my_long_options[
   { "timeout", 't', "Timeout to wait in seconds",
     (uchar**) &_timeout, (uchar**) &_timeout, 0,
     GET_INT, REQUIRED_ARG, 120, 0, 0, 0, 0, 0 }, 
+  { "nowait-nodes", OPT_NOWAIT_NODES, 
+    "Nodes that will not be waited for",
+    (uchar**) &_nowait_nodes, (uchar**) &_nowait_nodes, 0,
+    GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 
@@ -107,6 +116,23 @@ int main(int argc, char** argv){
     wait_status= NDB_MGM_NODE_STATUS_STARTED;
   }
 
+  if (_nowait_nodes)
+  {
+    int res = nowait_nodes_bitmask.parseMask(_nowait_nodes);
+    if(res == -2 || (res > 0 && nowait_nodes_bitmask.get(0)))
+    {
+      ndbout_c("Invalid nodeid specified in nowait-nodes: %s", 
+               _nowait_nodes);
+      exit(-1);
+    }
+    else if (res < 0)
+    {
+      ndbout_c("Unable to parse nowait-nodes argument: %s",
+               _nowait_nodes);
+      exit(-1);
+    }
+  }
+
   if (waitClusterStatus(_hostName, wait_status) != 0)
     return NDBT_ProgramExit(NDBT_FAILED);
   return NDBT_ProgramExit(NDBT_OK);
@@ -148,7 +174,8 @@ getStatus(){
       node = &status->node_states[i];      
       switch(node->node_type){
       case NDB_MGM_NODE_TYPE_NDB:
-	ndbNodes.push_back(*node);
+        if (!nowait_nodes_bitmask.get(node->node_id))
+          ndbNodes.push_back(*node);
 	break;
       case NDB_MGM_NODE_TYPE_MGM:
         /* Don't care about MGM nodes */

Thread
bzr commit into mysql-6.0-rpl branch (zhenxing.he:2814) Bug#27004 Bug#28077Bug#32662 Bug#33696 Bug#34526 Bug#40386 Bug#40922 Bug#41462 Bug#41469Bug#41...He Zhenxing12 Feb