MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Tomas Ulin Date:March 24 2009 10:34am
Subject:bzr commit into mysql-6.0-ndb branch (tomas.ulin:2750)
View as plain text  
#At file:///home/tomas/mysql_src/mysql-6.0/

 2750 Tomas Ulin	2009-03-24 [merge]
      merge 6.0 and 6.0-ndb
      removed:
        mysql-test/suite/parts/r/ndb_dd_backuprestore.result
        mysql-test/suite/parts/t/ndb_dd_backuprestore.test
      added:
        storage/ndb/test/run-test/check-tests.sh
      renamed:
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_mixed_engines_transactions.result => mysql-test/suite/rpl_ndb_big/r/rpl_ndb_mix_eng_trans.result
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mixed_engines_transactions-master.opt => mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mix_eng_trans-master.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mixed_engines_transactions-slave.opt => mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mix_eng_trans-slave.opt
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mixed_engines_transactions.test => mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mix_eng_trans.test
      modified:
        .bzr-mysql/default.conf
        configure.in
        mysql-test/include/ndb_not_readonly.inc
        mysql-test/lib/My/ConfigFactory.pm
        mysql-test/lib/v1/ndb_config_2_node.ini
        mysql-test/suite/funcs_1/t/disabled.def
        mysql-test/suite/ndb/r/ndb_alter_table_online.result
        mysql-test/suite/ndb/r/ndb_dd_alter.result
        mysql-test/suite/ndb/r/ndb_dd_basic.result
        mysql-test/suite/ndb/r/ndb_index_ordered.result
        mysql-test/suite/ndb/r/ndbapi.result
        mysql-test/suite/ndb/t/disabled.def
        mysql-test/suite/ndb/t/ndb_alter_table_online.test
        mysql-test/suite/ndb/t/ndb_dd_basic.test
        mysql-test/suite/ndb/t/ndb_index_ordered.test
        mysql-test/suite/ndb/t/ndbapi.test
        mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result
        mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result
        mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result
        mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result
        mysql-test/suite/parts/t/disabled.def
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_auto_inc.result
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_circular_simplex.result
        mysql-test/suite/rpl_ndb_big/r/rpl_ndb_multi.result
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_auto_inc.test
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_circular_simplex.test
        mysql-test/suite/rpl_ndb_big/t/rpl_ndb_multi.test
        mysql-test/suite/rpl_ndb_big/t/rpl_ndbapi_multi.test
        mysql-test/t/mysqldump.test
        scripts/mysql_system_tables.sql
        sql/ha_ndbcluster.cc
        sql/ha_ndbcluster_binlog.cc
        sql/slave.cc
        sql/sql_table.cc
        storage/ndb/include/kernel/RefConvert.hpp
        storage/ndb/include/kernel/signaldata/AllocNodeId.hpp
        storage/ndb/include/kernel/signaldata/BuildIndx.hpp
        storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp
        storage/ndb/include/kernel/signaldata/FsOpenReq.hpp
        storage/ndb/include/mgmapi/mgmapi_config_parameters.h
        storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
        storage/ndb/include/ndbapi/NdbReceiver.hpp
        storage/ndb/src/common/debugger/EventLogger.cpp
        storage/ndb/src/kernel/blocks/backup/Backup.cpp
        storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
        storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
        storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
        storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
        storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
        storage/ndb/src/kernel/blocks/lgman.cpp
        storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
        storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
        storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
        storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
        storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp
        storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
        storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
        storage/ndb/src/kernel/blocks/print_file.cpp
        storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
        storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
        storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
        storage/ndb/src/kernel/blocks/trix/Trix.cpp
        storage/ndb/src/kernel/blocks/tsman.cpp
        storage/ndb/src/kernel/vm/DataBuffer.hpp
        storage/ndb/src/mgmclient/CommandInterpreter.cpp
        storage/ndb/src/mgmsrv/ConfigInfo.cpp
        storage/ndb/src/mgmsrv/MgmtSrvr.cpp
        storage/ndb/src/mgmsrv/MgmtSrvr.hpp
        storage/ndb/src/mgmsrv/Services.cpp
        storage/ndb/src/ndbapi/Ndb.cpp
        storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
        storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
        storage/ndb/src/ndbapi/NdbOperationExec.cpp
        storage/ndb/src/ndbapi/NdbReceiver.cpp
        storage/ndb/src/ndbapi/NdbScanOperation.cpp
        storage/ndb/src/ndbapi/ndberror.c
        storage/ndb/test/include/HugoOperations.hpp
        storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp
        storage/ndb/test/ndbapi/bench/ndb_async2.cpp
        storage/ndb/test/ndbapi/bench/testData.h
        storage/ndb/test/ndbapi/testBlobs.cpp
        storage/ndb/test/ndbapi/testIndex.cpp
        storage/ndb/test/ndbapi/testNdbApi.cpp
        storage/ndb/test/ndbapi/testTransactions.cpp
        storage/ndb/test/ndbapi/test_event.cpp
        storage/ndb/test/run-test/conf-dl145a.cnf
        storage/ndb/test/run-test/conf-ndbmaster.cnf
        storage/ndb/test/run-test/daily-basic-tests.txt
        storage/ndb/test/run-test/daily-devel-tests.txt
        storage/ndb/test/src/HugoOperations.cpp
        storage/ndb/test/src/HugoTransactions.cpp
        storage/ndb/test/src/NdbBackup.cpp
        storage/ndb/test/src/NdbRestarts.cpp
        storage/ndb/test/src/UtilTransactions.cpp
        storage/ndb/tools/ndb_error_reporter
        storage/ndb/tools/restore/Restore.cpp
        storage/ndb/tools/restore/restore_main.cpp

=== modified file '.bzr-mysql/default.conf'
--- a/.bzr-mysql/default.conf	2009-03-19 12:21:49 +0000
+++ b/.bzr-mysql/default.conf	2009-03-24 10:33:15 +0000
@@ -1,5 +1,5 @@
 [MYSQL]
-tree_location = bzr+ssh://bk-internal.mysql.com/bzrroot/server/mysql-6.0
-post_commit_to = "commits@stripped"
-post_push_to = "commits@stripped"
-tree_name = "mysql-6.0"
+tree_location = bzr+ssh://bk-internal.mysql.com/bzrroot/server/mysql-6.0-ndb
+post_commit_to = commits@lists.mysql.com
+post_push_to = commits@stripped
+tree_name = mysql-6.0-ndb

=== modified file 'configure.in'
--- a/configure.in	2009-03-19 12:15:19 +0000
+++ b/configure.in	2009-03-24 10:33:15 +0000
@@ -14,11 +14,11 @@ AC_CANONICAL_TARGET
 AM_INIT_AUTOMAKE([1.6.2])
 AC_PROG_LIBTOOL
 
-AC_CONFIG_HEADERS([include/config.h])
+AC_CONFIG_HEADERS([include/config.h:config.h.in])
 
 NDB_VERSION_MAJOR=6
 NDB_VERSION_MINOR=2
-NDB_VERSION_BUILD=17
+NDB_VERSION_BUILD=18
 NDB_VERSION_STATUS="-alpha"
 
 PROTOCOL_VERSION=10

=== modified file 'mysql-test/include/ndb_not_readonly.inc'
--- a/mysql-test/include/ndb_not_readonly.inc	2009-02-02 15:58:48 +0000
+++ b/mysql-test/include/ndb_not_readonly.inc	2009-03-12 10:45:04 +0000
@@ -11,7 +11,7 @@ let $counter= 600;
 while ($mysql_errno)
 {
   # Table is readonly until the mysqld has connected properly
-  --error 0,ER_NO_SUCH_TABLE,ER_OPEN_AS_READONLY,ER_GET_ERRMSG
+  --error 0,ER_NO_SUCH_TABLE,ER_OPEN_AS_READONLY,ER_GET_ERRMSG,ER_TABLE_DEF_CHANGED
   replace into mysql.ndb_apply_status values(0,0,"",0,0);
   if ($mysql_errno)
   {

=== modified file 'mysql-test/lib/My/ConfigFactory.pm'
--- a/mysql-test/lib/My/ConfigFactory.pm	2009-03-02 12:48:35 +0000
+++ b/mysql-test/lib/My/ConfigFactory.pm	2009-03-24 10:33:15 +0000
@@ -240,6 +240,20 @@ sub fix_cluster_backup_dir {
 }
 
 
+sub fix_cluster_undo_file_dir {
+  my ($self, $config, $group_name, $group)= @_;
+  my $dir= $group->value('DataDir');
+  return "$dir/uf";
+}
+
+
+sub fix_cluster_data_file_dir {
+  my ($self, $config, $group_name, $group)= @_;
+  my $dir= $group->value('DataDir');
+  return "$dir/df";
+}
+
+
 #
 # Rules to run for each ndb_mgmd in the config
 #  - will be run in order listed here
@@ -260,6 +274,8 @@ my @ndbd_rules=
  { 'HostName' => \&fix_host },
  { 'DataDir' => \&fix_cluster_dir },
  { 'BackupDataDir' => \&fix_cluster_backup_dir },
+ { 'FileSystemPathDD' => \&fix_cluster_undo_file_dir },
+ { 'FileSystemPathDataFiles' => \&fix_cluster_data_file_dir },
 );
 
 

=== modified file 'mysql-test/lib/v1/ndb_config_2_node.ini'
--- a/mysql-test/lib/v1/ndb_config_2_node.ini	2009-02-01 21:05:19 +0000
+++ b/mysql-test/lib/v1/ndb_config_2_node.ini	2009-02-17 13:20:12 +0000
@@ -20,6 +20,9 @@ DiskPageBufferMemory= CHOOSE_DiskPageBuf
 # test that the parameter exists
 InitialNoOfOpenFiles= 27
 
+FileSystemPathDD= CHOOSE_FILESYSTEM/uf
+FileSystemPathDataFiles= CHOOSE_FILESYSTEM/df
+
 #
 # Increase timeouts to cater for slow test-machines
 #   (possibly running several tests in parallell)

=== modified file 'mysql-test/suite/funcs_1/t/disabled.def'
--- a/mysql-test/suite/funcs_1/t/disabled.def	2009-03-02 14:02:08 +0000
+++ b/mysql-test/suite/funcs_1/t/disabled.def	2009-03-24 10:33:15 +0000
@@ -10,4 +10,4 @@
 #
 ##############################################################################
 
-ndb_views:  Bug#39125 Result file for test contains a "temporary error", how can this be stable?
+ndb_views:		Bug #40860 funcs_1.ndb_views fail Bug#39125 Result file for test contains a "temporary error", how can this be stable?

=== modified file 'mysql-test/suite/ndb/r/ndb_alter_table_online.result'
--- a/mysql-test/suite/ndb/r/ndb_alter_table_online.result	2009-03-16 21:55:46 +0000
+++ b/mysql-test/suite/ndb/r/ndb_alter_table_online.result	2009-03-24 10:33:15 +0000
@@ -654,6 +654,15 @@ ERROR 42000: This version of MySQL doesn
 ALTER ONLINE TABLE t1 ADD COLUMN h VARBINARY(20) COLUMN_FORMAT DYNAMIC;
 ERROR 42000: This version of MySQL doesn't yet support 'ALTER ONLINE TABLE t1 ADD COLUMN h VARBINARY(20) COLUMN_FORMAT DYNAMIC'
 DROP TABLE t1;
+create table t1 (a int primary key, b int) storage disk tablespace ts1 engine = ndb;
+alter online table t1 add column c0 int null column_format DYNAMIC;
+ERROR 42000: This version of MySQL doesn't yet support 'alter online table t1 add column c0 int null column_format DYNAMIC'
+alter online table t1 add column c1 int null column_format DYNAMIC storage memory;
+drop table t1;
+create table t1 (a int primary key, b int storage disk) tablespace ts1 engine = ndb;
+alter online table t1 add column c0 int null column_format DYNAMIC;
+alter online table t1 add column c1 int null column_format DYNAMIC storage memory;
+drop table t1;
 ALTER TABLESPACE ts1
 DROP DATAFILE 'datafile.dat'
 ENGINE = NDB;

=== modified file 'mysql-test/suite/ndb/r/ndb_dd_alter.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_alter.result	2009-02-17 10:14:26 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_alter.result	2009-02-27 14:17:35 +0000
@@ -316,9 +316,6 @@ SELECT * FROM information_schema.partiti
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
 def	test	t1	p0	NULL	1	NULL	KEY	NULL		NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	ts
 ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE;
-Warnings:
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
 SELECT * FROM information_schema.partitions WHERE table_name= 't1' AND partition_name = 'p0';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
 def	test	t1	p0	NULL	1	NULL	KEY	NULL		NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	ts
@@ -345,12 +342,6 @@ a1	a2	a3
 19	20.2345	20000019
 20	21.2345	20000020
 ALTER TABLE test.t1  ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME;
-Warnings:
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
-Warning	1478	Converted FIXED field to DYNAMIC to enable on-line ADD COLUMN
 SELECT a1,a2,a3,hex(a4), a5,a6,a7,a8 FROM test.t1 ORDER BY a1;
 a1	a2	a3	hex(a4)	a5	a6	a7	a8
 1	2.2345	20000001	0	1	23457	2006-01-01	07:04:00

=== modified file 'mysql-test/suite/ndb/r/ndb_dd_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_basic.result	2008-02-11 20:36:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_basic.result	2009-03-19 13:28:34 +0000
@@ -18,6 +18,12 @@ ADD DATAFILE 'datafile.dat'
 USE LOGFILE GROUP lg1
 INITIAL_SIZE 12M;
 ERROR HY000: Table storage engine 'MyISAM' does not support the create option 'TABLESPACE or LOGFILE GROUP'
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 5K
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+ERROR HY000: Failed to create UNDOFILE
 set storage_engine=ndb;
 CREATE LOGFILE GROUP lg1
 ADD UNDOFILE 'undofile.dat'

=== modified file 'mysql-test/suite/ndb/r/ndb_index_ordered.result'
--- a/mysql-test/suite/ndb/r/ndb_index_ordered.result	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_ordered.result	2009-03-16 18:08:48 +0000
@@ -852,3 +852,12 @@ PRIMARY KEY (DishID)
 create index i on nationaldish(countrycode,calories) using hash;
 ERROR 42000: Table 'nationaldish' uses an extension that doesn't exist in this MySQL version
 drop table nationaldish;
+drop table if exists t1;
+Warnings:
+Note	1051	Unknown table 't1'
+create table t1(c1 varchar(20) primary key, c2 char(20)) engine=ndbcluster;
+insert into t1(c1,c2) values ('ddd','jg');
+select * from t1 where  (c2 < 'b' AND c1 <> 'g')  OR  (c2 <> 'a' AND c1 <> 'd');
+c1	c2
+ddd	jg
+drop table t1;

=== modified file 'mysql-test/suite/ndb/r/ndbapi.result'
--- a/mysql-test/suite/ndb/r/ndbapi.result	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/r/ndbapi.result	2009-02-25 12:47:05 +0000
@@ -19,4 +19,6 @@ update t0 set c2 = 'G' where c0 = 1;
 update t0 set c0 = 5, c2 = 'H' where c0 = 3;
 delete from t0;
 drop table t0;
+drop database TEST_DB;
+drop database TEST_DB_1;
 Running mgmapi_logevent

=== modified file 'mysql-test/suite/ndb/t/disabled.def'
--- a/mysql-test/suite/ndb/t/disabled.def	2009-02-13 16:18:07 +0000
+++ b/mysql-test/suite/ndb/t/disabled.def	2009-02-16 12:10:48 +0000
@@ -12,5 +12,4 @@
 
 ndb_partition_error2	  : Bug#40989 msvensson 2007-06-27 HF is not sure if the test can work as internded on all the platforms
 # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
-#ndb_binlog_ddl_multi     : BUG#18976 2006-04-10 kent    CRBR: multiple binlog, second binlog may miss schema log events
 ndb_cache_trans           : Bug#42565 ndb_cache_trans failure since SERVER_STATUS_IN_TRANS added to hash key

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table_online.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_online.test	2009-02-02 15:58:48 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_online.test	2009-02-16 12:05:36 +0000
@@ -685,6 +685,20 @@ ALTER ONLINE TABLE t1 ADD COLUMN h BINAR
 ALTER ONLINE TABLE t1 ADD COLUMN h VARBINARY(20) COLUMN_FORMAT DYNAMIC;
 DROP TABLE t1;
 
+#
+# bug#42549
+#
+create table t1 (a int primary key, b int) storage disk tablespace ts1 engine = ndb;
+--error ER_NOT_SUPPORTED_YET
+alter online table t1 add column c0 int null column_format DYNAMIC;
+alter online table t1 add column c1 int null column_format DYNAMIC storage memory;
+drop table t1;
+
+create table t1 (a int primary key, b int storage disk) tablespace ts1 engine = ndb;
+alter online table t1 add column c0 int null column_format DYNAMIC;
+alter online table t1 add column c1 int null column_format DYNAMIC storage memory;
+drop table t1;
+
 ALTER TABLESPACE ts1
 DROP DATAFILE 'datafile.dat'
 ENGINE = NDB;

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_basic.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_basic.test	2008-02-11 20:36:27 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_basic.test	2009-03-19 13:28:34 +0000
@@ -44,6 +44,14 @@ ADD DATAFILE 'datafile.dat'
 USE LOGFILE GROUP lg1
 INITIAL_SIZE 12M;
 
+# bug#29574
+--error ER_CREATE_FILEGROUP_FAILED
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 5K
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+
 ##################################
 # Basic test of disk tables for NDB
 # Start by creating a logfile group

=== modified file 'mysql-test/suite/ndb/t/ndb_index_ordered.test'
--- a/mysql-test/suite/ndb/t/ndb_index_ordered.test	2007-07-04 20:38:53 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_ordered.test	2009-03-16 18:08:48 +0000
@@ -478,3 +478,12 @@ create table nationaldish (DishID int(10
 create index i on nationaldish(countrycode,calories) using hash;
 
 drop table nationaldish;
+
+# bug#42857 Got error 4541 -IndexBound has no bound information- from NDBCLUSTER
+# Test that query returns results expected
+
+drop table if exists t1;
+create table t1(c1 varchar(20) primary key, c2 char(20)) engine=ndbcluster;
+insert into t1(c1,c2) values ('ddd','jg');
+select * from t1 where  (c2 < 'b' AND c1 <> 'g')  OR  (c2 <> 'a' AND c1 <> 'd');
+drop table t1;

=== modified file 'mysql-test/suite/ndb/t/ndbapi.test'
--- a/mysql-test/suite/ndb/t/ndbapi.test	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/ndb/t/ndbapi.test	2009-02-25 12:47:05 +0000
@@ -5,6 +5,7 @@
 DROP TABLE IF EXISTS t0;
 drop database if exists mysqltest;
 --enable_warnings
+--let MASTER_MYSOCK=`select @@socket`;
 
 --exec echo Running ndbapi_simple
 --exec $NDB_EXAMPLES_DIR/ndbapi_simple/ndbapi_simple $MASTER_MYSOCK "$NDB_CONNECTSTRING" >> $NDB_EXAMPLES_OUTPUT
@@ -31,14 +32,16 @@ create table t0(c0 int, c1 int, c2 char(
 #--exec $NDB_EXAMPLES_DIR/ndbapi_event/ndbapi_event "$NDB_CONNECTSTRING" 1 >> $NDB_EXAMPLES_OUTPUT
 insert into t0 values (1, 2, 'a', 'b', null);
 insert into t0 values (3, 4, 'c', 'd', null);
-update t0 set c3 = 'e' where c0 = 1 and c2 = 'a'; -- use pk
-update t0 set c3 = 'f'; -- use scan
-update t0 set c3 = 'F'; -- use scan update to 'same'
-update t0 set c2 = 'g' where c0 = 1; -- update pk part
-update t0 set c2 = 'G' where c0 = 1; -- update pk part to 'same'
-update t0 set c0 = 5, c2 = 'H' where c0 = 3; -- update full PK
+update t0 set c3 = 'e' where c0 = 1 and c2 = 'a'; # use pk
+update t0 set c3 = 'f'; # use scan
+update t0 set c3 = 'F'; # use scan update to 'same'
+update t0 set c2 = 'g' where c0 = 1; # update pk part
+update t0 set c2 = 'G' where c0 = 1; # update pk part to 'same'
+update t0 set c0 = 5, c2 = 'H' where c0 = 3; # update full PK
 delete from t0;
 drop table t0;
+drop database TEST_DB;
+drop database TEST_DB_1;
 
 --exec echo Running mgmapi_logevent
 --exec $NDB_EXAMPLES_DIR/mgmapi_logevent/mgmapi_logevent "$NDB_CONNECTSTRING" 1 >> $NDB_EXAMPLES_OUTPUT

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result	2009-01-29 12:44:41 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_basic.result	2009-03-12 13:06:50 +0000
@@ -18,7 +18,7 @@ ndb_binlog_index	CREATE TABLE `ndb_binlo
   `orig_epoch` bigint(20) unsigned NOT NULL,
   `gci` int(10) unsigned NOT NULL,
   PRIMARY KEY (`epoch`,`orig_server_id`,`orig_epoch`)
-) ENGINE=MARIA DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
 reset master;
 create table t1 (a int primary key) engine=ndb;
 insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result	2009-03-16 21:55:46 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result	2009-03-24 10:33:15 +0000
@@ -4,9 +4,8 @@ drop table if exists t1;
 #
 create table t1 (a int key, b int) engine ndb;
 insert into t1 values (1,1);
-CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info
-(id INT, backup_id INT) ENGINE = MEMORY;
-LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
+CREATE TEMPORARY TABLE test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
+LOAD DATA INFILE 'DUMP_FILE' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
 DROP TABLE test.backup_info;
 #
 # extra table to be used to ensure data has arrived to binlog

=== modified file 'mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result'
--- a/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result	2009-03-16 21:55:46 +0000
+++ b/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result	2009-03-24 10:33:15 +0000
@@ -234,31 +234,31 @@ t6	CREATE TABLE `t6` (
  PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
 SELECT * FROM information_schema.partitions WHERE table_name= 't1';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t1	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p2	NULL	3	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p3	NULL	4	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p2	NULL	3	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p3	NULL	4	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
 SELECT * FROM information_schema.partitions WHERE table_name= 't2';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t2	p0	NULL	1	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t2	p1	NULL	2	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t2	p0	NULL	1	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t2	p1	NULL	2	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
 SELECT * FROM information_schema.partitions WHERE table_name= 't3';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t3	x1	NULL	1	NULL	RANGE	NULL	c3	NULL	105	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x2	NULL	2	NULL	RANGE	NULL	c3	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x3	NULL	3	NULL	RANGE	NULL	c3	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t3	x1	NULL	1	NULL	RANGE	NULL	c3	NULL	105	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t3	x2	NULL	2	NULL	RANGE	NULL	c3	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t3	x3	NULL	3	NULL	RANGE	NULL	c3	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
 SELECT * FROM information_schema.partitions WHERE table_name= 't4';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t4	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t4	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t4	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t4	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
 SELECT * FROM information_schema.partitions WHERE table_name= 't5';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t5	p0	NULL	1	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t5	p1	NULL	2	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t5	p0	NULL	1	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t5	p1	NULL	2	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
 SELECT * FROM information_schema.partitions WHERE table_name= 't6';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t6	x1	NULL	1	NULL	RANGE	NULL	pk1	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t6	x2	NULL	2	NULL	RANGE	NULL	pk1	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t6	x1	NULL	1	NULL	RANGE	NULL	pk1	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t6	x2	NULL	2	NULL	RANGE	NULL	pk1	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
 SELECT COUNT(*) FROM test.t1;
 COUNT(*)
 250
@@ -414,31 +414,31 @@ t6	CREATE TABLE `t6` (
  PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
 SELECT * FROM information_schema.partitions WHERE table_name= 't1';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t1	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p2	NULL	3	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p3	NULL	4	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p2	NULL	3	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
+def	test	t1	p3	NULL	4	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
 SELECT * FROM information_schema.partitions WHERE table_name= 't2';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t2	p0	NULL	1	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t2	p1	NULL	2	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t2	p0	NULL	1	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t2	p1	NULL	2	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
 SELECT * FROM information_schema.partitions WHERE table_name= 't3';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t3	x1	NULL	1	NULL	RANGE	NULL	c3	NULL	105	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x2	NULL	2	NULL	RANGE	NULL	c3	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x3	NULL	3	NULL	RANGE	NULL	c3	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t3	x1	NULL	1	NULL	RANGE	NULL	c3	NULL	105	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t3	x2	NULL	2	NULL	RANGE	NULL	c3	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
+def	test	t3	x3	NULL	3	NULL	RANGE	NULL	c3	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
 SELECT * FROM information_schema.partitions WHERE table_name= 't4';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t4	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t4	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t4	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t4	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
 SELECT * FROM information_schema.partitions WHERE table_name= 't5';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t5	p0	NULL	1	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t5	p1	NULL	2	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t5	p0	NULL	1	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t5	p1	NULL	2	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
 SELECT * FROM information_schema.partitions WHERE table_name= 't6';
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t6	x1	NULL	1	NULL	RANGE	NULL	pk1	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t6	x2	NULL	2	NULL	RANGE	NULL	pk1	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t6	x1	NULL	1	NULL	RANGE	NULL	pk1	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
+def	test	t6	x2	NULL	2	NULL	RANGE	NULL	pk1	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
 SELECT COUNT(*) FROM test.t1;
 COUNT(*)
 250

=== modified file 'mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result'
--- a/mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result	2009-02-02 15:58:48 +0000
+++ b/mysql-test/suite/ndb_team/r/rpl_ndb_extraColMaster.result	2009-02-16 13:12:46 +0000
@@ -456,7 +456,9 @@ f1	f2	f3	f4
 update t31 set f5=555555555555555 where f3=6;
 update t31 set f2=2 where f3=2;
 update t31 set f1=NULL where f3=1;
-update t31 set f3=0, f27=NULL, f35='f35 new value' where f3=3;
+update t31 set f3=NULL, f27=NULL, f35='f35 new value' where f3=3;
+Warnings:
+Warning	1048	Column 'f3' cannot be null
 
 ** Delete from Master **
 
@@ -1609,7 +1611,9 @@ f1	f2	f3	f4
 update t31 set f5=555555555555555 where f3=6;
 update t31 set f2=2 where f3=2;
 update t31 set f1=NULL where f3=1;
-update t31 set f3=0, f27=NULL, f35='f35 new value' where f3=3;
+update t31 set f3=NULL, f27=NULL, f35='f35 new value' where f3=3;
+Warnings:
+Warning	1048	Column 'f3' cannot be null
 
 ** Delete from Master **
 

=== removed file 'mysql-test/suite/parts/r/ndb_dd_backuprestore.result'
--- a/mysql-test/suite/parts/r/ndb_dd_backuprestore.result	2009-03-16 21:55:46 +0000
+++ b/mysql-test/suite/parts/r/ndb_dd_backuprestore.result	1970-01-01 00:00:00 +0000
@@ -1,512 +0,0 @@
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
-DROP TABLE IF EXISTS test.t4;
-DROP TABLE IF EXISTS test.t5;
-DROP TABLE IF EXISTS test.t6;
-**** Test 1 Simple DD backup and restore ****
-CREATE LOGFILE GROUP log_group1
-ADD UNDOFILE './log_group1/undofile.dat'
-INITIAL_SIZE 16M
-UNDO_BUFFER_SIZE = 1M
-ENGINE=NDB;
-CREATE TABLESPACE table_space1
-ADD DATAFILE './table_space1/datafile.dat'
-USE LOGFILE GROUP log_group1
-INITIAL_SIZE 12M
-ENGINE NDB;
-CREATE TABLE test.t1
-(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 CHAR(50) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB;
-SELECT COUNT(*) FROM test.t1;
-COUNT(*)
-500
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5;
-pk1	c2	c3	hex(c4)
-1	Sweden	500	1
-2	Sweden	499	1
-3	Sweden	498	1
-4	Sweden	497	1
-5	Sweden	496	1
-CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info
-(id INT, backup_id INT) ENGINE = MEMORY;
-LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
-DROP TABLE test.backup_info;
-DROP TABLE test.t1;
-ALTER TABLESPACE table_space1
-DROP DATAFILE './table_space1/datafile.dat'
-ENGINE = NDB;
-DROP TABLESPACE table_space1
-ENGINE = NDB;
-DROP LOGFILE GROUP log_group1
-ENGINE =NDB;
-SELECT COUNT(*) FROM test.t1;
-COUNT(*)
-500
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5;
-pk1	c2	c3	hex(c4)
-1	Sweden	500	1
-2	Sweden	499	1
-3	Sweden	498	1
-4	Sweden	497	1
-5	Sweden	496	1
-**** Test 2 Mixed Cluster Test backup and restore ****
-CREATE TABLE test.t2
-(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 VARCHAR(200) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL)ENGINE=NDB;
-CREATE TABLE test.t3 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB;
-CREATE TABLE test.t4 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))ENGINE=NDB;
-SELECT COUNT(*) FROM test.t1;
-COUNT(*)
-500
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5;
-pk1	c2	c3	hex(c4)
-1	Sweden	500	1
-2	Sweden	499	1
-3	Sweden	498	1
-4	Sweden	497	1
-5	Sweden	496	1
-SELECT COUNT(*) FROM test.t2;
-COUNT(*)
-500
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5;
-pk1	c2	c3	hex(c4)
-1	Sweden, Texas	500	0
-2	Sweden, Texas	499	0
-3	Sweden, Texas	498	0
-4	Sweden, Texas	497	0
-5	Sweden, Texas	496	0
-SELECT COUNT(*) FROM test.t3;
-COUNT(*)
-100
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1;
-LENGTH(data)
-1024
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2;
-LENGTH(data)
-16384
-SELECT COUNT(*) FROM test.t4;
-COUNT(*)
-100
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1;
-LENGTH(data)
-1024
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2;
-LENGTH(data)
-16384
-CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info
-(id INT, backup_id INT) ENGINE = MEMORY;
-LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
-DROP TABLE test.backup_info;
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-ALTER TABLESPACE table_space1
-DROP DATAFILE './table_space1/datafile.dat'
-ENGINE = NDB;
-DROP TABLESPACE table_space1
-ENGINE = NDB;
-DROP LOGFILE GROUP log_group1
-ENGINE =NDB;
-SELECT COUNT(*) FROM test.t1;
-COUNT(*)
-500
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5;
-pk1	c2	c3	hex(c4)
-1	Sweden	500	1
-2	Sweden	499	1
-3	Sweden	498	1
-4	Sweden	497	1
-5	Sweden	496	1
-SELECT COUNT(*) FROM test.t2;
-COUNT(*)
-500
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5;
-pk1	c2	c3	hex(c4)
-1	Sweden, Texas	500	0
-2	Sweden, Texas	499	0
-3	Sweden, Texas	498	0
-4	Sweden, Texas	497	0
-5	Sweden, Texas	496	0
-SELECT COUNT(*) FROM test.t3;
-COUNT(*)
-100
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1;
-LENGTH(data)
-1024
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2;
-LENGTH(data)
-16384
-SELECT COUNT(*) FROM test.t4;
-COUNT(*)
-100
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1;
-LENGTH(data)
-1024
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2;
-LENGTH(data)
-16384
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-**** Test 3 Adding  partition Test backup and restore ****
-CREATE TABLESPACE table_space2
-ADD DATAFILE './table_space2/datafile.dat'
-USE LOGFILE GROUP log_group1
-INITIAL_SIZE 12M
-ENGINE NDB;
-CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4;
-CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2;
-CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
-CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
-CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720));
-CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720));
-SHOW CREATE TABLE test.t1;
-Table	Create Table
-t1	CREATE TABLE `t1` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(150) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE `table_space1` STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (c3)
-PARTITIONS 4 */
-SHOW CREATE TABLE test.t2;
-Table	Create Table
-t2	CREATE TABLE `t2` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` text NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE `table_space2` STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY KEY (c3)
-(PARTITION p0 TABLESPACE = table_space2 ENGINE = ndbcluster,
- PARTITION p1 TABLESPACE = table_space2 ENGINE = ndbcluster) */
-SHOW CREATE TABLE test.t3;
-Table	Create Table
-t3	CREATE TABLE `t3` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(202) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE `table_space2` STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY RANGE (c3)
-(PARTITION x1 VALUES LESS THAN (105) TABLESPACE = table_space2 ENGINE = ndbcluster,
- PARTITION x2 VALUES LESS THAN (333) TABLESPACE = table_space2 ENGINE = ndbcluster,
- PARTITION x3 VALUES LESS THAN (720) TABLESPACE = table_space2 ENGINE = ndbcluster) */
-SHOW CREATE TABLE test.t4;
-Table	Create Table
-t4	CREATE TABLE `t4` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(180) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (c3)
-PARTITIONS 2 */
-SHOW CREATE TABLE test.t5;
-Table	Create Table
-t5	CREATE TABLE `t5` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` text NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY KEY (pk1)
-(PARTITION p0 ENGINE = ndbcluster,
- PARTITION p1 ENGINE = ndbcluster) */
-SHOW CREATE TABLE test.t6;
-Table	Create Table
-t6	CREATE TABLE `t6` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(220) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY RANGE (pk1)
-(PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster,
- PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
-SELECT * FROM information_schema.partitions WHERE table_name= 't1';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t1	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p2	NULL	3	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p3	NULL	4	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-SELECT * FROM information_schema.partitions WHERE table_name= 't2';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t2	p0	NULL	1	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t2	p1	NULL	2	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-SELECT * FROM information_schema.partitions WHERE table_name= 't3';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t3	x1	NULL	1	NULL	RANGE	NULL	c3	NULL	105	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x2	NULL	2	NULL	RANGE	NULL	c3	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x3	NULL	3	NULL	RANGE	NULL	c3	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-SELECT * FROM information_schema.partitions WHERE table_name= 't4';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t4	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t4	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-SELECT * FROM information_schema.partitions WHERE table_name= 't5';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t5	p0	NULL	1	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t5	p1	NULL	2	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-SELECT * FROM information_schema.partitions WHERE table_name= 't6';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t6	x1	NULL	1	NULL	RANGE	NULL	pk1	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t6	x2	NULL	2	NULL	RANGE	NULL	pk1	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-SELECT COUNT(*) FROM test.t1;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas	2	0
-249	Sweden, Texas	4	0
-248	Sweden, Texas	6	0
-247	Sweden, Texas	8	0
-246	Sweden, Texas	10	0
-SELECT COUNT(*) FROM test.t2;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	1	1
-249	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	3	1
-248	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	5	1
-247	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	7	1
-246	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	9	1
-SELECT COUNT(*) FROM test.t3;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	0	1
-249	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	2	1
-248	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	4	1
-247	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	6	1
-246	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	8	1
-SELECT COUNT(*) FROM test.t4;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas	2	0
-249	Sweden, Texas	4	0
-248	Sweden, Texas	6	0
-247	Sweden, Texas	8	0
-246	Sweden, Texas	10	0
-SELECT COUNT(*) FROM test.t5;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	1	1
-249	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	3	1
-248	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	5	1
-247	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	7	1
-246	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	9	1
-SELECT COUNT(*) FROM test.t6;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	0	1
-249	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	2	1
-248	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	4	1
-247	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	6	1
-246	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	8	1
-CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info
-(id INT, backup_id INT) ENGINE = MEMORY;
-LOAD DATA INFILE '<MYSQLTEST_VARDIR>/tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
-DROP TABLE test.backup_info;
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-DROP TABLE test.t5;
-DROP TABLE test.t6;
-ALTER TABLESPACE table_space1
-DROP DATAFILE './table_space1/datafile.dat'
-ENGINE = NDB;
-ALTER TABLESPACE table_space2
-DROP DATAFILE './table_space2/datafile.dat'
-ENGINE = NDB;
-DROP TABLESPACE table_space1
-ENGINE = NDB;
-DROP TABLESPACE table_space2
-ENGINE = NDB;
-DROP LOGFILE GROUP log_group1
-ENGINE =NDB;
-SHOW CREATE TABLE test.t1;
-Table	Create Table
-t1	CREATE TABLE `t1` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(150) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE `table_space1` STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (c3)
-PARTITIONS 4 */
-SHOW CREATE TABLE test.t2;
-Table	Create Table
-t2	CREATE TABLE `t2` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` text NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE `table_space2` STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY KEY (c3)
-(PARTITION p0 TABLESPACE = table_space2 ENGINE = ndbcluster,
- PARTITION p1 TABLESPACE = table_space2 ENGINE = ndbcluster) */
-SHOW CREATE TABLE test.t3;
-Table	Create Table
-t3	CREATE TABLE `t3` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(202) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE `table_space2` STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY RANGE (c3)
-(PARTITION x1 VALUES LESS THAN (105) TABLESPACE = table_space2 ENGINE = ndbcluster,
- PARTITION x2 VALUES LESS THAN (333) TABLESPACE = table_space2 ENGINE = ndbcluster,
- PARTITION x3 VALUES LESS THAN (720) TABLESPACE = table_space2 ENGINE = ndbcluster) */
-SHOW CREATE TABLE test.t4;
-Table	Create Table
-t4	CREATE TABLE `t4` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(180) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (c3)
-PARTITIONS 2 */
-SHOW CREATE TABLE test.t5;
-Table	Create Table
-t5	CREATE TABLE `t5` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` text NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY KEY (pk1)
-(PARTITION p0 ENGINE = ndbcluster,
- PARTITION p1 ENGINE = ndbcluster) */
-SHOW CREATE TABLE test.t6;
-Table	Create Table
-t6	CREATE TABLE `t6` (
-  `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
-  `c2` varchar(220) NOT NULL,
-  `c3` int(11) NOT NULL,
-  `c4` bit(1) NOT NULL,
-  PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY RANGE (pk1)
-(PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster,
- PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
-SELECT * FROM information_schema.partitions WHERE table_name= 't1';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t1	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p2	NULL	3	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-NULL	test	t1	p3	NULL	4	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space1
-SELECT * FROM information_schema.partitions WHERE table_name= 't2';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t2	p0	NULL	1	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t2	p1	NULL	2	NULL	KEY	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-SELECT * FROM information_schema.partitions WHERE table_name= 't3';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t3	x1	NULL	1	NULL	RANGE	NULL	c3	NULL	105	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x2	NULL	2	NULL	RANGE	NULL	c3	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-NULL	test	t3	x3	NULL	3	NULL	RANGE	NULL	c3	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	table_space2
-SELECT * FROM information_schema.partitions WHERE table_name= 't4';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t4	p0	NULL	1	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t4	p1	NULL	2	NULL	HASH	NULL	c3	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-SELECT * FROM information_schema.partitions WHERE table_name= 't5';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t5	p0	NULL	1	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t5	p1	NULL	2	NULL	KEY	NULL	pk1	NULL	NULL	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-SELECT * FROM information_schema.partitions WHERE table_name= 't6';
-TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	PARTITION_NAME	SUBPARTITION_NAME	PARTITION_ORDINAL_POSITION	SUBPARTITION_ORDINAL_POSITION	PARTITION_METHOD	SUBPARTITION_METHOD	PARTITION_EXPRESSION	SUBPARTITION_EXPRESSION	PARTITION_DESCRIPTION	TABLE_ROWS	AVG_ROW_LENGTH	DATA_LENGTH	MAX_DATA_LENGTH	INDEX_LENGTH	DATA_FREE	CREATE_TIME	UPDATE_TIME	CHECK_TIME	CHECKSUM	PARTITION_COMMENT	NODEGROUP	TABLESPACE_NAME
-NULL	test	t6	x1	NULL	1	NULL	RANGE	NULL	pk1	NULL	333	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-NULL	test	t6	x2	NULL	2	NULL	RANGE	NULL	pk1	NULL	720	0	0	0	NULL	0	0	NULL	NULL	NULL	NULL		default	NULL
-SELECT COUNT(*) FROM test.t1;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas	2	0
-249	Sweden, Texas	4	0
-248	Sweden, Texas	6	0
-247	Sweden, Texas	8	0
-246	Sweden, Texas	10	0
-SELECT COUNT(*) FROM test.t2;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	1	1
-249	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	3	1
-248	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	5	1
-247	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	7	1
-246	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	9	1
-SELECT COUNT(*) FROM test.t3;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	0	1
-249	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	2	1
-248	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	4	1
-247	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	6	1
-246	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	8	1
-SELECT COUNT(*) FROM test.t4;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas	2	0
-249	Sweden, Texas	4	0
-248	Sweden, Texas	6	0
-247	Sweden, Texas	8	0
-246	Sweden, Texas	10	0
-SELECT COUNT(*) FROM test.t5;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	1	1
-249	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	3	1
-248	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	5	1
-247	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	7	1
-246	Sweden, Texas, ITALY, Kyle, JO, JBM,TU	9	1
-SELECT COUNT(*) FROM test.t6;
-COUNT(*)
-250
-SELECT pk1, c2, c3,  hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
-pk1	c2	c3	hex(c4)
-250	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	0	1
-249	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	2	1
-248	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	4	1
-247	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	6	1
-246	TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU	8	1
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-DROP TABLE test.t5;
-DROP TABLE test.t6;
-ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB;
-ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB;
-DROP TABLESPACE table_space1 ENGINE = NDB;
-DROP TABLESPACE table_space2 ENGINE = NDB;
-DROP LOGFILE GROUP log_group1 ENGINE = NDB;

=== modified file 'mysql-test/suite/parts/t/disabled.def'
--- a/mysql-test/suite/parts/t/disabled.def	2009-02-13 16:30:54 +0000
+++ b/mysql-test/suite/parts/t/disabled.def	2009-02-27 14:17:35 +0000
@@ -1,9 +1,6 @@
 partition_basic_ndb            : Bug#19899 Crashing the server (cannot create t1)
-# http://dev.mysql.com/doc/refman/5.1/en/mysql-cluster-limitations-syntax.html
 partition_syntax_ndb           : Bug#36735 Not supported
-ndb_dd_backuprestore           : Bug#32659 2008-07-14 alik  Disabled to make 6.0 greaner (the test fails too often)
 partition_mgm_lc0_ndb          : Bug#38778 - master1 crashes
 partition_mgm_lc1_ndb          : Bug#38778 - master1 crashes
 partition_mgm_lc2_ndb          : Bug#38778 - master1 crashes
 partition_auto_increment_ndb   : Bug#39773 - auto_increment for NDB changed behavior
-#partition_engine_ndb           : cannot create t1

=== removed file 'mysql-test/suite/parts/t/ndb_dd_backuprestore.test'
--- a/mysql-test/suite/parts/t/ndb_dd_backuprestore.test	2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/t/ndb_dd_backuprestore.test	1970-01-01 00:00:00 +0000
@@ -1,347 +0,0 @@
-########################################
-# Author: JBM
-# Date: 2006-01-24
-# Purpose: Test CDD backup and restore
-########################################
-
--- source include/have_ndb.inc
-# range, list and hash partitioning in ndb requires new_mode
---disable_query_log
-set new=on;
---enable_query_log
-
---disable_warnings
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
-DROP TABLE IF EXISTS test.t4;
-DROP TABLE IF EXISTS test.t5;
-DROP TABLE IF EXISTS test.t6;
---enable_warnings
-
-############ Test 1 Simple DD backup and restore #############
--- echo **** Test 1 Simple DD backup and restore ****
-
-CREATE LOGFILE GROUP log_group1
-ADD UNDOFILE './log_group1/undofile.dat'
-INITIAL_SIZE 16M
-UNDO_BUFFER_SIZE = 1M
-ENGINE=NDB;
-
-CREATE TABLESPACE table_space1
-ADD DATAFILE './table_space1/datafile.dat'
-USE LOGFILE GROUP log_group1
-INITIAL_SIZE 12M
-ENGINE NDB;
-
-
-CREATE TABLE test.t1
-(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 CHAR(50) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB;
-
-let $j= 500;
---disable_query_log
-while ($j)
-{
-  eval INSERT INTO test.t1 VALUES (NULL, "Sweden", $j, b'1');
-  dec $j;
-}
---enable_query_log
-SELECT COUNT(*) FROM test.t1;
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5;
-
--- source include/ndb_backup.inc
-
-DROP TABLE test.t1;
-
-ALTER TABLESPACE table_space1
-DROP DATAFILE './table_space1/datafile.dat'
-ENGINE = NDB;
-
-DROP TABLESPACE table_space1
-ENGINE = NDB;
-
-DROP LOGFILE GROUP log_group1
-ENGINE =NDB;
-
--- source include/ndb_restore_master.inc
-
-SELECT COUNT(*) FROM test.t1; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5;
-
-################# Mixed Cluster Test ############################
--- echo **** Test 2 Mixed Cluster Test backup and restore ****
-
-CREATE TABLE test.t2
-(pk1 MEDIUMINT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 VARCHAR(200) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL)ENGINE=NDB;
-
-let $j= 500;
---disable_query_log
-while ($j)
-{
-  eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas", $j, b'0');
-  dec $j;
-}
---enable_query_log
-
-CREATE TABLE test.t3 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB;
-
-CREATE TABLE test.t4 (c1 int not null auto_increment, data LONGBLOB, PRIMARY KEY(c1))ENGINE=NDB;
-
-let $j= 50;
---disable_query_log
-while ($j)
-{
-  INSERT INTO test.t3 VALUES (NULL, repeat('a',1*1024));
-  INSERT INTO test.t3 VALUES (NULL, repeat('b',16*1024));
-  INSERT INTO test.t4 VALUES (NULL, repeat('a',1*1024));
-  INSERT INTO test.t4 VALUES (NULL, repeat('b',16*1024));
-  dec $j;
-}
---enable_query_log
-
-SELECT COUNT(*) FROM test.t1;
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t2; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t3; 
-
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; 
-
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; 
-
-SELECT COUNT(*) FROM test.t4; 
-
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; 
-
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2;
-
--- source include/ndb_backup.inc
-
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-
-ALTER TABLESPACE table_space1
-DROP DATAFILE './table_space1/datafile.dat'
-ENGINE = NDB;
-
-DROP TABLESPACE table_space1
-ENGINE = NDB;
-
-DROP LOGFILE GROUP log_group1
-ENGINE =NDB;
-
--- source include/ndb_restore_master.inc
-
-SELECT COUNT(*) FROM test.t1;
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY pk1 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t2; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY pk1 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t3; 
-
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 1; 
-
-SELECT LENGTH(data) FROM test.t3 WHERE c1 = 2; 
-
-SELECT COUNT(*) FROM test.t4; 
-
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 1; 
-
-SELECT LENGTH(data) FROM test.t4 WHERE c1 = 2;
-
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-###################### Adding partition #################################
--- echo **** Test 3 Adding  partition Test backup and restore ****
-
-CREATE TABLESPACE table_space2
-ADD DATAFILE './table_space2/datafile.dat'
-USE LOGFILE GROUP log_group1
-INITIAL_SIZE 12M
-ENGINE NDB;
-
-CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4;
-
-CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2;
-
-CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
-
-CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
-
-CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720));
-
-CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720));
-
-SHOW CREATE TABLE test.t1;
-
-SHOW CREATE TABLE test.t2;
-
-SHOW CREATE TABLE test.t3;
-
-SHOW CREATE TABLE test.t4;
-
-SHOW CREATE TABLE test.t5;
-
-SHOW CREATE TABLE test.t6;
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't1'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't2'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't3'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't4'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't5'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't6';
-
-
-let $j= 500;
---disable_query_log
-while ($j)
-{
-  eval INSERT INTO test.t1 VALUES (NULL, "Sweden, Texas", $j, b'0');
-  eval INSERT INTO test.t4 VALUES (NULL, "Sweden, Texas", $j, b'0');
-  dec $j;
-  eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1');
-  eval INSERT INTO test.t5 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1');
-  dec $j;
-  eval INSERT INTO test.t3 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1');
-  eval INSERT INTO test.t6 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1'); } --enable_query_log
-
-SELECT COUNT(*) FROM test.t1;
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t2; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t3; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t4; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t5; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t6; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
-
--- source include/ndb_backup.inc
-
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-DROP TABLE test.t5;
-DROP TABLE test.t6;
-
-ALTER TABLESPACE table_space1
-DROP DATAFILE './table_space1/datafile.dat'
-ENGINE = NDB;
-
-ALTER TABLESPACE table_space2
-DROP DATAFILE './table_space2/datafile.dat'
-ENGINE = NDB;
-
-DROP TABLESPACE table_space1
-ENGINE = NDB;
-
-DROP TABLESPACE table_space2
-ENGINE = NDB;
-
-DROP LOGFILE GROUP log_group1
-ENGINE =NDB;
-
--- source include/ndb_restore_master.inc
-
-
-SHOW CREATE TABLE test.t1;
-
-SHOW CREATE TABLE test.t2;
-
-SHOW CREATE TABLE test.t3;
-
-SHOW CREATE TABLE test.t4;
-
-SHOW CREATE TABLE test.t5;
-
-SHOW CREATE TABLE test.t6;
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't1'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't2'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't3'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't4'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't5'; 
-
-SELECT * FROM information_schema.partitions WHERE table_name= 't6'; 
-
-SELECT COUNT(*) FROM test.t1; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t2; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t3; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t4; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t5; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5; 
-
-SELECT COUNT(*) FROM test.t6; 
-
-SELECT pk1, c2, c3,  hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
-
-# Cleanup
-
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-DROP TABLE test.t4;
-DROP TABLE test.t5;
-DROP TABLE test.t6;
-
-ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB;
-
-ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB;
-
-DROP TABLESPACE table_space1 ENGINE = NDB;
-
-DROP TABLESPACE table_space2 ENGINE = NDB;
-
-DROP LOGFILE GROUP log_group1 ENGINE = NDB;
-
-#End 5.1 test case
-
-

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_auto_inc.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_auto_inc.result	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_auto_inc.result	2009-02-16 12:05:36 +0000
@@ -168,3 +168,31 @@ a
 32
 42
 drop table t1;
+set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
+set ndb_autoincrement_prefetch_sz = 32;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT KEY) ENGINE=ndb;
+INSERT INTO t1 () VALUES (),(),();
+set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
+set ndb_autoincrement_prefetch_sz = 32;
+select * from t1 order by id;
+id
+1
+2
+3
+INSERT INTO t1 () VALUES (),(),();
+select * from t1 order by id;
+id
+1
+2
+3
+4
+5
+6
+set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
+select * from t1 order by id;
+id
+1
+2
+3
+drop table t1;
+set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;

=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_circular_simplex.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_circular_simplex.result	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_circular_simplex.result	2009-02-16 14:33:19 +0000
@@ -53,6 +53,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	2
 SELECT * FROM t1 ORDER BY a;
 a	b
 1	2
@@ -102,3 +104,6 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
+DROP TABLE t1;

=== renamed file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_mixed_engines_transactions.result' => 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_mix_eng_trans.result'
=== modified file 'mysql-test/suite/rpl_ndb_big/r/rpl_ndb_multi.result'
--- a/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_multi.result	2008-11-13 19:40:36 +0000
+++ b/mysql-test/suite/rpl_ndb_big/r/rpl_ndb_multi.result	2009-03-19 13:28:34 +0000
@@ -5,10 +5,12 @@ reset slave;
 drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
 start slave;
 CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
+CREATE TABLE t2 (a int key) ENGINE = NDB ;
 reset master;
 SHOW TABLES;
 Tables_in_test
 t1
+t2
 INSERT INTO t1 VALUES ("row1","will go away",1);
 SELECT * FROM t1 ORDER BY c3;
 c1	c2	c3
@@ -23,6 +25,17 @@ SELECT * FROM t1 ORDER BY c3;
 c1	c2	c3
 row1	will go away	1
 stop slave;
+set SQL_LOG_BIN=0;
+insert into t2 values (1);
+show binlog events from <binlog_start>;
+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
+master-bin.000001	#	Query	3	#	BEGIN
+master-bin.000001	#	Table_map	3	#	table_id: # (test.t1)
+master-bin.000001	#	Table_map	3	#	table_id: # (mysql.ndb_apply_status)
+master-bin.000001	#	Write_rows	3	#	table_id: #
+master-bin.000001	#	Write_rows	3	#	table_id: # flags: STMT_END_F
+master-bin.000001	#	Query	3	#	COMMIT
+set SQL_LOG_BIN=1;
 SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
 FROM mysql.ndb_binlog_index WHERE epoch = <the_epoch> ;
 @the_pos:=Position	@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
@@ -54,4 +67,5 @@ row4	D	4
 row5	E	5
 ==== clean up ====
 DROP TABLE t1;
+DROP TABLE t2;
 STOP SLAVE;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_auto_inc.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_auto_inc.test	2008-09-11 08:01:28 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_auto_inc.test	2009-02-16 12:05:36 +0000
@@ -111,8 +111,26 @@ sync_slave_with_master;
 --echo
 select * from t1 ORDER BY a;
 connection master;
-
 drop table t1;
 
+#
+# Bug #42232 Cluster to cluster replication is not setting auto_increment
+# on slave cluster
+#
+set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
+set ndb_autoincrement_prefetch_sz = 32;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT KEY) ENGINE=ndb;
+INSERT INTO t1 () VALUES (),(),();
+sync_slave_with_master;
+set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
+set ndb_autoincrement_prefetch_sz = 32;
+select * from t1 order by id;
+INSERT INTO t1 () VALUES (),(),();
+select * from t1 order by id;
+set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
+connection master;
+select * from t1 order by id;
+drop table t1;
+set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
 # End cleanup
 sync_slave_with_master;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_circular_simplex.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_circular_simplex.test	2009-02-25 14:29:08 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_circular_simplex.test	2009-03-24 10:33:15 +0000
@@ -84,3 +84,8 @@ SELECT * FROM t1 ORDER BY a;
 --replace_result $MASTER_MYPORT MASTER_PORT
 --replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 # 35 # 36 #
 query_vertical SHOW SLAVE STATUS;
+
+# cleanup
+connection master;
+DROP TABLE t1;
+sync_slave_with_master;

=== renamed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mixed_engines_transactions-master.opt' => 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mix_eng_trans-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mixed_engines_transactions-slave.opt' => 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mix_eng_trans-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mixed_engines_transactions.test' => 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_mix_eng_trans.test'
=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndb_multi.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_multi.test	2008-11-13 19:40:36 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndb_multi.test	2009-03-19 13:28:34 +0000
@@ -11,6 +11,8 @@
 
 # create a table with one row, and make sure the other "master" gets it
 CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
+# a dummy table such that we can make sure data gets into binlog on server2
+CREATE TABLE t2 (a int key) ENGINE = NDB ;
 connection server2;
 reset master;
 SHOW TABLES;
@@ -31,6 +33,13 @@ stop slave;
 
 # get the master binlog pos from the epoch, from the _other_ "master", server2
 connection server2;
+# insert some data to t1 which should not come into the binlog
+# just so that we can do "show binlog events" to make sure the t1
+# update is actually in the server2 binlog
+set SQL_LOG_BIN=0;
+insert into t2 values (1);
+-- source include/show_binlog_events2.inc
+set SQL_LOG_BIN=1;
 --replace_result $the_epoch <the_epoch>
 eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
    FROM mysql.ndb_binlog_index WHERE epoch = $the_epoch ;
@@ -67,6 +76,7 @@ SELECT * FROM t1 ORDER BY c3;
 --echo ==== clean up ====
 connection server2;
 DROP TABLE t1;
+DROP TABLE t2;
 sync_slave_with_master;
 
 STOP SLAVE;

=== modified file 'mysql-test/suite/rpl_ndb_big/t/rpl_ndbapi_multi.test'
--- a/mysql-test/suite/rpl_ndb_big/t/rpl_ndbapi_multi.test	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/rpl_ndb_big/t/rpl_ndbapi_multi.test	2009-03-12 10:45:04 +0000
@@ -2,6 +2,7 @@
 -- source include/have_binlog_format_mixed_or_row.inc
 -- source include/have_ndbapi_examples.inc
 -- source include/ndb_master-slave.inc
+--let MASTER_MYSOCK=`select @@socket`;
 
 --exec echo Running ndbapi_simple_dual
 --exec $NDB_EXAMPLES_DIR/ndbapi_simple_dual/ndbapi_simple_dual $MASTER_MYSOCK "$NDB_CONNECTSTRING" $SLAVE_MYSOCK "$NDB_CONNECTSTRING_SLAVE" >> $NDB_EXAMPLES_OUTPUT

=== modified file 'mysql-test/t/mysqldump.test'
--- a/mysql-test/t/mysqldump.test	2009-03-12 16:18:40 +0000
+++ b/mysql-test/t/mysqldump.test	2009-03-24 10:33:15 +0000
@@ -1683,6 +1683,24 @@ SET NAMES latin1;
 
 
 --echo #
+--echo # Bug#33550 mysqldump 4.0 compatibility broken
+--echo #
+
+SET NAMES utf8;
+CREATE TABLE `straße` ( f1 INT );
+--exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=utf8 --compatible=mysql323 test
+--exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=latin1 --compatible=mysql323 test
+DROP TABLE `straße`;
+
+CREATE TABLE `כדשגכחךלדגכחשךדגחכךלדגכ` ( f1 INT );
+--exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=utf8 --compatible=mysql323 test
+--error 2
+--exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=latin1 --compatible=mysql323 test
+DROP TABLE `כדשגכחךלדגכחשךדגחכךלדגכ`;
+SET NAMES latin1;
+
+
+--echo #
 --echo # End of 5.0 tests
 --echo #
 

=== modified file 'scripts/mysql_system_tables.sql'
--- a/scripts/mysql_system_tables.sql	2009-03-11 21:22:33 +0000
+++ b/scripts/mysql_system_tables.sql	2009-03-24 10:33:15 +0000
@@ -86,5 +86,5 @@ CREATE TABLE IF NOT EXISTS backup_histor
 
 CREATE TABLE IF NOT EXISTS backup_progress ( backup_id BIGINT UNSIGNED NOT NULL COMMENT 'Key for backup_history table entries', object CHAR (30) NOT NULL DEFAULT '' COMMENT 'The object being operated on', start_time datetime NOT NULL DEFAULT 0 COMMENT 'The date/time of start of operation', stop_time datetime NOT NULL DEFAULT 0 COMMENT 'The date/time of end of operation', total_bytes BIGINT NOT NULL DEFAULT 0 COMMENT 'The size of the object in bytes', progress BIGINT UNSIGNED NOT NULL DEFAULT 0 COMMENT 'The number of bytes processed', error_num INT NOT NULL DEFAULT 0 COMMENT 'The error from this run 0 == none', notes CHAR(100) NOT NULL DEFAULT '' COMMENT 'Commentary from the backup engine') ENGINE=CSV DEFAULT CHARACTER SET utf8;
 
-CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts INT UNSIGNED NOT NULL, updates INT UNSIGNED NOT NULL, deletes INT UNSIGNED NOT NULL, schemaops INT UNSIGNED NOT NULL, orig_server_id INT UNSIGNED NOT NULL, orig_epoch BIGINT UNSIGNED NOT NULL, gci INT UNSIGNED NOT NULL, PRIMARY KEY(epoch, orig_server_id, orig_epoch)) ENGINE=MARIA;
+CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts INT UNSIGNED NOT NULL, updates INT UNSIGNED NOT NULL, deletes INT UNSIGNED NOT NULL, schemaops INT UNSIGNED NOT NULL, orig_server_id INT UNSIGNED NOT NULL, orig_epoch BIGINT UNSIGNED NOT NULL, gci INT UNSIGNED NOT NULL, PRIMARY KEY(epoch, orig_server_id, orig_epoch)) ENGINE=MYISAM;
 

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2009-02-18 10:23:38 +0000
+++ b/sql/ha_ndbcluster.cc	2009-03-12 10:45:04 +0000
@@ -4408,8 +4408,11 @@ THR_LOCK_DATA **ha_ndbcluster::store_loc
     /* Since NDB does not currently have table locks
        this is treated as a ordinary lock */
 
+    const bool in_lock_tables = thd_in_lock_tables(thd);
+    const uint sql_command = thd_sql_command(thd);
     if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
-         lock_type <= TL_WRITE) && !thd->in_lock_tables)      
+         lock_type <= TL_WRITE) &&
+        !(in_lock_tables && sql_command == SQLCOM_LOCK_TABLES))
       lock_type= TL_WRITE_ALLOW_WRITE;
     
     /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
@@ -5493,7 +5496,6 @@ int ha_ndbcluster::create(const char *na
           strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0))
     {
       DBUG_PRINT("info", ("Schema distribution table not setup"));
-      DBUG_ASSERT(ndb_schema_share);
       DBUG_RETURN(HA_ERR_NO_CONNECTION);
     }
     single_user_mode = NdbDictionary::Table::SingleUserModeReadWrite;
@@ -6305,7 +6307,6 @@ ha_ndbcluster::delete_table(THD *thd, ha
   if (!ndb_schema_share)
   {
     DBUG_PRINT("info", ("Schema distribution table not setup"));
-    DBUG_ASSERT(ndb_schema_share);
     DBUG_RETURN(HA_ERR_NO_CONNECTION);
   }
   /* ndb_share reference temporary */
@@ -8286,7 +8287,9 @@ ndbcluster_cache_retrieval_allowed(THD *
   {
     /* Don't allow qc to be used if table has been previously
        modified in transaction */
-    Thd_ndb *thd_ndb= get_thd_ndb(thd);
+    if (!check_ndb_in_thd(thd))
+      DBUG_RETURN(FALSE);
+   Thd_ndb *thd_ndb= get_thd_ndb(thd);
     if (!thd_ndb->changed_tables.is_empty())
     {
       NDB_SHARE* share;
@@ -10898,6 +10901,16 @@ int ha_ndbcluster::check_if_supported_al
      /*
         Check that we are only adding columns
      */
+     /*
+       HA_COLUMN_STORAGE & HA_COLUMN_FORMAT
+       are set if they are specified in an later cmd
+       even if they're no change. This is probably a bug
+       conclusion: add them to add_column-mask, so that we silently "accept" them
+       In case of someone trying to change a column, the HA_CHANGE_COLUMN would be set
+       which we don't support, so we will still return HA_ALTER_NOT_SUPPORTED in those cases
+     */
+     add_column.set_bit(HA_COLUMN_STORAGE);
+     add_column.set_bit(HA_COLUMN_FORMAT);     
      if ((*alter_flags & ~add_column).is_set())
      {
        DBUG_PRINT("info", ("Only add column exclusively can be performed on-line"));
@@ -10972,6 +10985,22 @@ int ha_ndbcluster::check_if_supported_al
     const NDBCOL *col= tab->getColumn(i);
 
     create_ndb_column(0, new_col, field, create_info);
+
+    /**
+     * This is a "copy" of code in ::create()
+     *   that "auto-converts" columns with keys into memory
+     *   (unless storage disk is explicitly added)
+     * This is needed to check if getStorageType() == getStorageType() further down
+     */
+    if (field->flags & (PRI_KEY_FLAG | UNIQUE_KEY_FLAG | MULTIPLE_KEY_FLAG))
+    {
+      if (field->field_storage_type() == HA_SM_DISK)
+      {
+        DBUG_RETURN(HA_ALTER_NOT_SUPPORTED);
+      }
+      new_col.setStorageType(NdbDictionary::Column::StorageTypeMemory);
+    }
+
     if (col->getStorageType() != new_col.getStorageType())
     {
       DBUG_PRINT("info", ("Column storage media is changed"));

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2009-03-04 13:33:47 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2009-03-24 10:33:15 +0000
@@ -2688,15 +2688,19 @@ ndb_binlog_thread_handle_schema_event_po
         if (ndb_extra_logging > 9)
           sql_print_information("SOT_RENAME_TABLE %s.%s", schema->db, schema->name);
         log_query= 1;
-        pthread_mutex_lock(&LOCK_open);
-        ndbcluster_rename_share(thd, share);
-        pthread_mutex_unlock(&LOCK_open);
+        if (share)
+        {
+          pthread_mutex_lock(&LOCK_open);
+          ndbcluster_rename_share(thd, share);
+          pthread_mutex_unlock(&LOCK_open);
+        }
         break;
       case SOT_RENAME_TABLE_PREPARE:
         if (ndb_extra_logging > 9)
           sql_print_information("SOT_RENAME_TABLE_PREPARE %s.%s -> %s",
                                 schema->db, schema->name, schema->query);
-        if (schema->node_id != g_ndb_cluster_connection->node_id())
+        if (share &&
+            schema->node_id != g_ndb_cluster_connection->node_id())
           ndbcluster_prepare_rename_share(share, schema->query);
         break;
       case SOT_ALTER_TABLE_COMMIT:
@@ -4729,7 +4733,7 @@ pthread_handler_t ndb_binlog_thread_func
   pthread_mutex_unlock(&LOCK_thread_count);
   thd->lex->start_transaction_opt= 0;
 
-  if (!(s_ndb= new Ndb(g_ndb_cluster_connection, "")) ||
+  if (!(s_ndb= new Ndb(g_ndb_cluster_connection, NDB_REP_DB)) ||
       s_ndb->init())
   {
     sql_print_error("NDB Binlog: Getting Schema Ndb object failed");

=== modified file 'sql/slave.cc'
--- a/sql/slave.cc	2009-02-23 14:53:18 +0000
+++ b/sql/slave.cc	2009-03-24 10:33:15 +0000
@@ -2791,7 +2791,6 @@ Slave SQL thread aborted. Can't execute 
     DBUG_ASSERT(rli->sql_thd == thd);
     THD_CHECK_SENTRY(thd);
 
-    sql_print_information("new_mode %u", thd->variables.new_mode);
     if (exec_relay_log_event(thd,rli))
     {
       DBUG_PRINT("info", ("exec_relay_log_event() failed"));

=== modified file 'sql/sql_table.cc'
--- a/sql/sql_table.cc	2009-03-09 12:17:41 +0000
+++ b/sql/sql_table.cc	2009-03-24 10:33:15 +0000
@@ -5855,21 +5855,6 @@ int create_temporary_table(THD *thd,
   else
     create_info->data_file_name=create_info->index_file_name=0;
 
-  if (new_db_type == old_db_type)
-  {
-    /*
-       Table has not changed storage engine.
-       If STORAGE and TABLESPACE have not been changed than copy them
-       from the original table
-    */
-    if (!create_info->tablespace &&
-        table->s->tablespace &&
-        create_info->default_storage_media == HA_SM_DEFAULT)
-      create_info->tablespace= table->s->tablespace;
-    if (create_info->default_storage_media == HA_SM_DEFAULT)
-      create_info->default_storage_media= table->s->default_storage_media;
-   }
-
   /*
     Create a table with a temporary name.
     With create_info->frm_only == 1 this creates a .frm file only.
@@ -6526,6 +6511,22 @@ mysql_prepare_alter_table(THD *thd, TABL
   if (table->s->tmp_table)
     create_info->options|=HA_LEX_CREATE_TMP_TABLE;
 
+  if (create_info->db_type == table->s->db_type())
+  {
+    /*
+       Table has not changed storage engine.
+       If STORAGE and TABLESPACE have not been changed than copy them
+       from the original table
+    */
+    if (!create_info->tablespace &&
+        table->s->tablespace &&
+        create_info->default_storage_media == HA_SM_DEFAULT)
+      create_info->tablespace= table->s->tablespace;
+
+    if (create_info->default_storage_media == HA_SM_DEFAULT)
+      create_info->default_storage_media= table->s->default_storage_media;
+  }
+
   rc= FALSE;
   alter_info->create_list.swap(new_create_list);
   alter_info->key_list.swap(new_key_list);

=== modified file 'storage/ndb/include/kernel/RefConvert.hpp'
--- a/storage/ndb/include/kernel/RefConvert.hpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/include/kernel/RefConvert.hpp	2009-02-17 07:52:13 +0000
@@ -27,6 +27,14 @@ BlockNumber refToBlock(BlockReference re
 }
 
 /**
+ * For allowing to write merge safe code
+ */
+inline
+BlockNumber refToMain(BlockReference ref){
+  return (BlockNumber)(ref >> 16);
+}
+
+/**
  * Convert BlockReference to NodeId
  */
 inline

=== modified file 'storage/ndb/include/kernel/signaldata/AllocNodeId.hpp'
--- a/storage/ndb/include/kernel/signaldata/AllocNodeId.hpp	2006-12-27 01:23:51 +0000
+++ b/storage/ndb/include/kernel/signaldata/AllocNodeId.hpp	2009-02-18 14:29:58 +0000
@@ -24,21 +24,28 @@
  */
 class AllocNodeIdReq {
 public:
-  STATIC_CONST( SignalLength = 4 );
+  STATIC_CONST( SignalLength = 5 );
+  STATIC_CONST( SignalLengthQMGR = 7 );
 
   Uint32 senderRef;
   Uint32 senderData;
   Uint32 nodeId;
   Uint32 nodeType;
+  Uint32 timeout;
+
+  Uint32 secret_lo;
+  Uint32 secret_hi;
 };
 
 class AllocNodeIdConf {
 public:
-  STATIC_CONST( SignalLength = 3 );
+  STATIC_CONST( SignalLength = 5 );
 
   Uint32 senderRef;
   Uint32 senderData;
   Uint32 nodeId;
+  Uint32 secret_lo;
+  Uint32 secret_hi;
 };
 
 class AllocNodeIdRef {

=== modified file 'storage/ndb/include/kernel/signaldata/BuildIndx.hpp'
--- a/storage/ndb/include/kernel/signaldata/BuildIndx.hpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/include/kernel/signaldata/BuildIndx.hpp	2009-02-27 13:18:49 +0000
@@ -239,7 +239,8 @@ public:
     InvalidIndexType = 4250,
     IndexNotUnique = 4251,
     AllocationFailure = 4252,
-    InternalError = 4346
+    InternalError = 4346,
+    DeadlockError = 4351
   };
   STATIC_CONST( SignalLength = BuildIndxConf::SignalLength + 2 );
 

=== modified file 'storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp	2007-11-12 14:53:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp	2009-03-13 07:51:21 +0000
@@ -164,7 +164,8 @@ struct CreateFileImplRef {
     OutOfMemory = 1511,
     FileReadError = 1512,
     FilegroupNotOnline = 1513,
-    FileSizeTooLarge = 1515
+    FileSizeTooLarge = 1515,
+    FileSizeTooSmall = 1516
   };
   
   Uint32 senderData;

=== modified file 'storage/ndb/include/kernel/signaldata/FsOpenReq.hpp'
--- a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp	2007-05-14 08:34:21 +0000
+++ b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp	2009-02-17 07:52:13 +0000
@@ -99,6 +99,15 @@ private:
     S_LOG = 7,
     S_CTL = 8
   };
+
+  enum BasePathSpec
+  {
+    BP_FS = 0,     // FileSystemPath
+    BP_BACKUP = 1, // BackupDataDir
+    BP_DD_DF = 2,  // FileSystemPathDataFiles
+    BP_DD_UF = 3,  // FileSystemPathUndoFiles
+    BP_MAX = 4
+  };
   
   static Uint32 getVersion(const Uint32 fileNumber[]);
   static Uint32 getSuffix(const Uint32 fileNumber[]);

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2008-12-18 09:16:45 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2009-02-17 07:52:13 +0000
@@ -132,6 +132,12 @@
 #define CFG_DB_MAX_BUFFERED_EPOCHS    182 /* subscriptions */
 #define CFG_DB_SUMA_HANDOVER_TIMEOUT  183
 
+#define CFG_DB_DD_FILESYSTEM_PATH     193
+#define CFG_DB_DD_DATAFILE_PATH       194
+#define CFG_DB_DD_UNDOFILE_PATH       195
+#define CFG_DB_DD_LOGFILEGROUP_SPEC   196
+#define CFG_DB_DD_TABLEPACE_SPEC      197
+
 #define CFG_DB_SGA                    198 /* super pool mem */
 #define CFG_DB_DATA_MEM_2             199 /* used in special build in 5.1 */
 

=== modified file 'storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp	2008-08-12 18:56:42 +0000
+++ b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp	2009-03-19 13:28:34 +0000
@@ -274,6 +274,7 @@ private:
                              Uint32 column_index,
                              const char *row,
                              Uint32 bound_type);
+  int insert_open_bound(const NdbRecord* key_record);
 
   virtual int equal_impl(const NdbColumnImpl*, const char*);
   virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*);

=== modified file 'storage/ndb/include/ndbapi/NdbReceiver.hpp'
--- a/storage/ndb/include/ndbapi/NdbReceiver.hpp	2008-04-25 09:17:19 +0000
+++ b/storage/ndb/include/ndbapi/NdbReceiver.hpp	2009-03-16 15:08:09 +0000
@@ -215,6 +215,7 @@ inline
 void
 NdbReceiver::prepareSend(){
   /* Set pointers etc. to prepare for receiving the first row of the batch. */
+  theMagicNumber = 0x11223344;
   m_current_row = 0;
   m_received_result_length = 0;
   m_expected_result_length = 0;

=== modified file 'storage/ndb/src/common/debugger/EventLogger.cpp'
--- a/storage/ndb/src/common/debugger/EventLogger.cpp	2008-10-24 11:00:37 +0000
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp	2009-02-20 08:46:10 +0000
@@ -804,7 +804,7 @@ void getTextMemoryUsage(QQQQ) {
 
 void getTextBackupStarted(QQQQ) {
   BaseString::snprintf(m_text, m_text_len, 
-		       "Backup %d started from node %d", 
+		       "Backup %u started from node %d", 
 		       theData[2], refToNode(theData[1]));
 }
 void getTextBackupFailedToStart(QQQQ) {
@@ -839,7 +839,7 @@ void getTextBackupStatus(QQQQ) {
 }
 void getTextBackupAborted(QQQQ) {
   BaseString::snprintf(m_text, m_text_len, 
-		       "Backup %d started from %d has been aborted. Error: %d",
+		       "Backup %u started from %d has been aborted. Error: %d",
 		       theData[2], 
 		       refToNode(theData[1]), 
 		       theData[3]);

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2009-01-27 14:32:31 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2009-02-20 08:46:10 +0000
@@ -463,7 +463,7 @@ Backup::execDUMP_STATE_ORD(Signal* signa
      */
     BackupRecordPtr ptr LINT_SET_PTR;
     for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)){
-      infoEvent("BackupRecord %d: BackupId: %d MasterRef: %x ClientRef: %x",
+      infoEvent("BackupRecord %d: BackupId: %u MasterRef: %x ClientRef: %x",
 		ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef);
       infoEvent(" State: %d", ptr.p->slaveState.getState());
       BackupFilePtr filePtr;
@@ -639,7 +639,7 @@ Backup::execBACKUP_CONF(Signal* signal)
   jamEntry();
   BackupConf * conf = (BackupConf*)signal->getDataPtr();
   
-  ndbout_c("Backup %d has started", conf->backupId);
+  ndbout_c("Backup %u has started", conf->backupId);
 }
 
 void
@@ -648,7 +648,7 @@ Backup::execBACKUP_REF(Signal* signal)
   jamEntry();
   BackupRef * ref = (BackupRef*)signal->getDataPtr();
 
-  ndbout_c("Backup (%d) has NOT started %d", ref->senderData, ref->errorCode);
+  ndbout_c("Backup (%u) has NOT started %d", ref->senderData, ref->errorCode);
 }
 
 void
@@ -659,7 +659,7 @@ Backup::execBACKUP_COMPLETE_REP(Signal* 
  
   startTime = NdbTick_CurrentMillisecond() - startTime;
   
-  ndbout_c("Backup %d has completed", rep->backupId);
+  ndbout_c("Backup %u has completed", rep->backupId);
   const Uint64 bytes =
     rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32);
   const Uint64 records =
@@ -691,7 +691,7 @@ Backup::execBACKUP_ABORT_REP(Signal* sig
   jamEntry();
   BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtr();
   
-  ndbout_c("Backup %d has been aborted %d", rep->backupId, rep->reason);
+  ndbout_c("Backup %u has been aborted %d", rep->backupId, rep->reason);
 }
 
 const TriggerEvent::Value triggerEventValues[] = {

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2008-11-08 20:46:53 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2009-02-23 11:26:17 +0000
@@ -428,6 +428,12 @@ void Cmvmi::execSTTOR(Signal* signal)
 
     if(m_ctx.m_config.lockPagesInMainMemory() == 1)
     {
+      jam();
+      /**
+       * Notify watchdog that we're locking memory...
+       *   which can be equally "heavy" as allocating it
+       */
+      refresh_watch_dog(9);
       int res = NdbMem_MemLockAll(0);
       if(res != 0){
         g_eventLogger->warning("Failed to memlock pages");
@@ -435,6 +441,19 @@ void Cmvmi::execSTTOR(Signal* signal)
       }
     }
     
+    /**
+     * Install "normal" watchdog value
+     */
+    {
+      Uint32 db_watchdog_interval = 0;
+      const ndb_mgm_configuration_iterator * p = 
+        m_ctx.m_config.getOwnConfigIterator();
+      ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, 
+                                &db_watchdog_interval);
+      ndbrequire(db_watchdog_interval);
+      update_watch_dog_timer(db_watchdog_interval);
+    }
+    
     sendSTTORRY(signal);
     return;
   } else if (theStartPhase == 3) {

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2008-11-21 10:51:05 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2009-02-19 12:22:38 +0000
@@ -5001,7 +5001,7 @@ void Dbdict::printTables()
   DLHashTable<DictObject>::Iterator iter;
   bool moreTables = c_obj_hash.first(iter);
   printf("OBJECTS IN DICT:\n");
-  char name[MAX_TAB_NAME_SIZE];
+  char name[PATH_MAX];
   while (moreTables) {
     Ptr<DictObject> tablePtr = iter.curr;
     ConstRope r(c_rope_pool, tablePtr.p->m_name);
@@ -7776,14 +7776,14 @@ void Dbdict::execGET_TABINFOREQ(Signal* 
     ndbrequire(signal->getNoOfSections() == 1);  
     const Uint32 len = req->tableNameLen;
     
-    if(len > MAX_TAB_NAME_SIZE){
+    if(len > PATH_MAX){
       jam();
       releaseSections(signal);
       sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNameTooLong);
       return;
     }
 
-    char tableName[MAX_TAB_NAME_SIZE];
+    char tableName[PATH_MAX];
     SegmentedSectionPtr ssPtr;
     signal->getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
     SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
@@ -8108,7 +8108,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
       pos = 0;
     }
     Uint32 i = 0;
-    char tmp[MAX_TAB_NAME_SIZE];
+    char tmp[PATH_MAX];
     name.copy(tmp);
     while (i < size) {
       char* p = (char*)&conf->tableData[pos];
@@ -8168,7 +8168,7 @@ void Dbdict::sendLIST_TABLES_CONF(Signal
    */
   ListTablesData ltd;
   const Uint32 listTablesDataSizeInWords = (sizeof(ListTablesData) + 3) / 4;
-  char tname[MAX_TAB_NAME_SIZE];
+  char tname[PATH_MAX];
   SimplePropertiesSectionWriter tableDataWriter(getSectionSegmentPool());
   SimplePropertiesSectionWriter tableNamesWriter(getSectionSegmentPool());
 
@@ -15922,6 +15922,15 @@ Dbdict::createObj_prepare_complete_done(
 void
 Dbdict::createObj_commit(Signal * signal, SchemaOp * op)
 {
+  if (ERROR_INSERTED(6016))
+  {
+    jam();
+    NodeReceiverGroup rg(CMVMI, c_aliveNodes);
+    signal->theData[0] = 9999;
+    sendSignal(rg, GSN_NDB_TAMPER, signal, 1, JBB);
+    return;
+  }
+
   OpCreateObj * createObj = (OpCreateObj*)op;
 
   createObj->m_callback.m_callbackFunction = 
@@ -16530,9 +16539,22 @@ Dbdict::create_fg_prepare_start(Signal* 
     {
       //fg.TS_DataGrow = group.m_grow_spec;
       fg_ptr.p->m_tablespace.m_extent_size = fg.TS_ExtentSize;
-      fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId;
 
       Ptr<Filegroup> lg_ptr;
+      if (fg.TS_LogfileGroupId == RNIL && fg.TS_LogfileGroupVersion == RNIL)
+      {
+        jam();
+        Filegroup_hash::Iterator it;
+        if (c_filegroup_hash.first(it))
+        {
+          jam();
+          fg.TS_LogfileGroupId = it.curr.p->key;
+          fg.TS_LogfileGroupVersion = it.curr.p->m_version;
+        }
+      }
+
+      fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId;
+
       if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId))
       {
         jam();
@@ -16719,6 +16741,28 @@ Dbdict::create_file_prepare_start(Signal
 
     // Get Filegroup
     FilegroupPtr fg_ptr;
+    if (f.FilegroupId == RNIL && f.FilegroupVersion == RNIL)
+    {
+      jam();
+      Filegroup_hash::Iterator it;
+      c_filegroup_hash.first(it);
+      while (!it.isNull())
+      {
+        jam();
+        if ((f.FileType == DictTabInfo::Undofile &&
+             it.curr.p->m_type == DictTabInfo::LogfileGroup) ||
+            (f.FileType == DictTabInfo::Datafile &&
+             it.curr.p->m_type == DictTabInfo::Tablespace))
+        {
+          jam();
+          f.FilegroupId = it.curr.p->key;
+          f.FilegroupVersion = it.curr.p->m_version;
+          break;
+        }
+        c_filegroup_hash.next(it);
+      }
+    }
+
     if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId)){
       jam();
       op->m_errorCode = CreateFileRef::NoSuchFilegroup;
@@ -16929,7 +16973,7 @@ Dbdict::create_file_prepare_complete(Sig
     ndbrequire(false);
   }
   
-  char name[MAX_TAB_NAME_SIZE];
+  char name[PATH_MAX];
   ConstRope tmp(c_rope_pool, f_ptr.p->m_path);
   tmp.copy(name);
   LinearSectionPtr ptr[3];
@@ -16965,6 +17009,15 @@ Dbdict::execCREATE_FILE_CONF(Signal* sig
 void
 Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op)
 {
+  if (ERROR_INSERTED(6017))
+  {
+    jam();
+    NodeReceiverGroup rg(CMVMI, c_aliveNodes);
+    signal->theData[0] = 9999;
+    sendSignal(rg, GSN_NDB_TAMPER, signal, 1, JBB);
+    return;
+  }
+
   /**
    * CONTACT TSMAN LGMAN PGMAN 
    */

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2009-02-02 21:21:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2009-03-18 09:09:16 +0000
@@ -7086,10 +7086,18 @@ void Dblqh::execACCKEYREF(Signal* signal
    *
    * -> ZNO_TUPLE_FOUND is possible
    */
-  ndbrequire
-    (tcPtr->seqNoReplica == 0 ||
-     errCode != ZTUPLE_ALREADY_EXIST ||
-     (tcPtr->operation == ZREAD && (tcPtr->dirtyOp || tcPtr->opSimple)));
+  if (unlikely(! (tcPtr->seqNoReplica == 0 ||
+                  errCode != ZTUPLE_ALREADY_EXIST ||
+                  (tcPtr->operation == ZREAD && 
+                   (tcPtr->dirtyOp || tcPtr->opSimple)))))
+  {
+    jamLine(Uint32(tcPtr->operation));
+    jamLine(Uint32(tcPtr->seqNoReplica));
+    jamLine(Uint32(errCode));
+    jamLine(Uint32(tcPtr->dirtyOp));
+    jamLine(Uint32(tcPtr->opSimple));
+    ndbrequire(false);
+  }
   
   tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
   abortCommonLab(signal);

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-01-27 14:32:31 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-02-27 13:18:49 +0000
@@ -1066,7 +1066,6 @@ DbUtil::prepareOperation(Signal* signal,
   Uint32 noOfPKAttribsStored = 0;
   Uint32 noOfNonPKAttribsStored = 0;
   Uint32 attrLength = 0;
-  Uint32 pkAttrLength = 0;
   char attrNameRequested[MAX_ATTR_NAME_SIZE];
   Uint32 attrIdRequested;
 
@@ -1222,8 +1221,6 @@ DbUtil::prepareOperation(Signal* signal,
 	break;
       }
       attrLength += len;
-      if (attrDesc.AttributeKeyFlag)
-	pkAttrLength += len;
 
       if (operationType == UtilPrepareReq::Read) {
 	AttributeHeader::init(rsInfoIt.data, 
@@ -1274,14 +1271,7 @@ DbUtil::prepareOperation(Signal* signal,
   prepOpPtr.p->tckey.tableId = tableDesc.TableId;
   prepOpPtr.p->tckey.tableSchemaVersion = tableDesc.TableVersion;
   prepOpPtr.p->noOfKeyAttr = tableDesc.NoOfKeyAttr;
-  prepOpPtr.p->keyLen = tableDesc.KeyLength; // Total no of words in PK
-  if (prepOpPtr.p->keyLen > TcKeyReq::MaxKeyInfo) {
-    jam();
-    prepOpPtr.p->tckeyLenInBytes = (static_len + TcKeyReq::MaxKeyInfo) * 4;
-  } else {
-    jam();
-    prepOpPtr.p->tckeyLenInBytes = (static_len + prepOpPtr.p->keyLen) * 4;
-  }
+  prepOpPtr.p->tckeyLen = static_len;
   prepOpPtr.p->keyDataPos = static_len;  // Start of keyInfo[] in tckeyreq
   
   Uint32 requestInfo = 0;
@@ -1391,14 +1381,13 @@ DbUtil::hardcodedPrepare() {
   /**
    * Prepare SequenceCurrVal (READ)
    */
+  Uint32 keyLen = 1;
   {
     PreparedOperationPtr ptr;
     ndbrequire(c_preparedOperationPool.seizeId(ptr, 0));
-    ptr.p->keyLen = 1;
     ptr.p->tckey.attrLen = 1;
     ptr.p->rsLen = 3;
-    ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength +
-                              ptr.p->keyLen + ptr.p->tckey.attrLen) * 4;
+    ptr.p->tckeyLen = TcKeyReq::StaticLength + keyLen + ptr.p->tckey.attrLen;
     ptr.p->keyDataPos = TcKeyReq::StaticLength; 
     ptr.p->tckey.tableId = 0;
     Uint32 requestInfo = 0;
@@ -1424,9 +1413,8 @@ DbUtil::hardcodedPrepare() {
   {
     PreparedOperationPtr ptr;
     ndbrequire(c_preparedOperationPool.seizeId(ptr, 1));
-    ptr.p->keyLen = 1;
     ptr.p->rsLen = 3;
-    ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength + ptr.p->keyLen + 5) * 4;
+    ptr.p->tckeyLen = TcKeyReq::StaticLength + keyLen + 5;
     ptr.p->keyDataPos = TcKeyReq::StaticLength; 
     ptr.p->tckey.attrLen = 11;
     ptr.p->tckey.tableId = 0;
@@ -1480,11 +1468,9 @@ DbUtil::hardcodedPrepare() {
   {
     PreparedOperationPtr ptr;
     ndbrequire(c_preparedOperationPool.seizeId(ptr, 2));
-    ptr.p->keyLen = 1;
     ptr.p->tckey.attrLen = 5;
     ptr.p->rsLen = 0;
-    ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength +
-                              ptr.p->keyLen + ptr.p->tckey.attrLen) * 4;
+    ptr.p->tckeyLen = TcKeyReq::StaticLength + keyLen + ptr.p->tckey.attrLen;
     ptr.p->keyDataPos = TcKeyReq::StaticLength;
     ptr.p->tckey.tableId = 0;
     Uint32 requestInfo = 0;
@@ -1530,7 +1516,7 @@ DbUtil::execUTIL_SEQUENCE_REQ(Signal* si
   ndbrequire(transPtr.p->operations.seize(opPtr));
   
   ndbrequire(opPtr.p->rs.seize(prepOp->rsLen));
-  ndbrequire(opPtr.p->keyInfo.seize(prepOp->keyLen));
+  ndbrequire(opPtr.p->keyInfo.seize(1));
 
   transPtr.p->gsn = GSN_UTIL_SEQUENCE_REQ;
   transPtr.p->clientRef = signal->senderBlockRef();
@@ -1779,7 +1765,7 @@ DbUtil::execUTIL_EXECUTE_REQ(Signal* sig
   ndbrequire(transPtr.p->operations.seize(opPtr));
   opPtr.p->prepOp   = prepOpPtr.p;
   opPtr.p->prepOp_i = prepOpPtr.i;
-  
+
 #if 0 //def EVENT_DEBUG
   printf("opPtr.p->rs.seize( %u )\n", prepOpPtr.p->rsLen);
 #endif
@@ -1854,13 +1840,6 @@ DbUtil::execUTIL_EXECUTE_REQ(Signal* sig
     return;
   }
 
-  // quick hack for hash index build
-  if (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo) != ZREAD){
-    prepOpPtr.p->tckey.attrLen =
-      prepOpPtr.p->attrInfo.getSize() + opPtr.p->attrInfo.getSize();
-    TcKeyReq::setKeyLength(prepOpPtr.p->tckey.requestInfo, keyInfo->getSize());
-  }
-
 #if 0
   const Uint32 l1 = prepOpPtr.p->tckey.attrLen;
   const Uint32 l2 = 
@@ -1944,7 +1923,7 @@ DbUtil::runOperation(Signal* signal, Tra
   
   TcKeyReq * tcKey = (TcKeyReq*)signal->getDataPtrSend();
   //ndbout << "*** 6 ***"<< endl; pop->print();
-  memcpy(tcKey, &pop->tckey, pop->tckeyLenInBytes);
+  memcpy(tcKey, &pop->tckey, 4*pop->tckeyLen);
   //ndbout << "*** 6b ***"<< endl; 
   //printTCKEYREQ(stdout, signal->getDataPtrSend(), 
   //              pop->tckeyLenInBytes >> 2, 0);
@@ -1961,7 +1940,12 @@ DbUtil::runOperation(Signal* signal, Tra
   printf("DbUtil::runOperation: ATTRINFO\n");
   op->attrInfo.print(stdout);
 #endif
-
+  
+  Uint32 attrLen = pop->attrInfo.getSize() + op->attrInfo.getSize();
+  Uint32 keyLen = op->keyInfo.getSize();
+  tcKey->attrLen = attrLen + TcKeyReq::getAIInTcKeyReq(tcKey->requestInfo);
+  TcKeyReq::setKeyLength(tcKey->requestInfo, keyLen);
+  
   /**
    * Key Info
    */
@@ -1975,12 +1959,13 @@ DbUtil::runOperation(Signal* signal, Tra
   //ndbout << "*** 7 ***" << endl;
   //printTCKEYREQ(stdout, signal->getDataPtrSend(), 
   //		pop->tckeyLenInBytes >> 2, 0);
-
+  
 #if 0 //def EVENT_DEBUG
-    printf("DbUtil::runOperation: sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, %d , JBB)\n",  pop->tckeyLenInBytes >> 2);
-    printTCKEYREQ(stdout, signal->getDataPtr(), pop->tckeyLenInBytes >> 2,0);
+  printf("DbUtil::runOperation: sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, %d , JBB)\n",  pop->tckeyLenInBytes >> 2);
+  printTCKEYREQ(stdout, signal->getDataPtr(), pop->tckeyLenInBytes >> 2,0);
 #endif
-  sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, pop->tckeyLenInBytes >> 2, JBB);
+  Uint32 sigLen = pop->tckeyLen + (keyLen > 8 ? 8 : keyLen);
+  sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, sigLen, JBB);
   
   /**
    * More the 8 words of key info not implemented

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2009-02-27 13:18:49 +0000
@@ -209,7 +209,6 @@ public:
     }
 
     /*** Various Operation Info ***/
-    Uint32    keyLen;          // Length of primary key (fixed size is assumed)
     Uint32    rsLen;           // Size of result set
     Uint32    noOfKeyAttr;     // Number of key attributes
     Uint32    noOfAttr;        // Number of attributes
@@ -231,7 +230,7 @@ public:
     AttrMappingBuffer    attrMapping;
 
     /*** First signal in tckeyreq train ***/
-    Uint32    tckeyLenInBytes;    // TcKeyReq total signal length (in bytes)
+    Uint32    tckeyLen;           // TcKeyReq total signal length
     Uint32    keyDataPos;         // Where to store keydata[] in tckey signal
                                   // (in #words from base in tckey signal)
     TcKeyReq  tckey;              // Signaldata for first signal in train
@@ -252,11 +251,10 @@ public:
     
     void print() const {
       ndbout << "[-PreparedOperation-" << endl
-	     << " keyLen: " << keyLen
 	     << ", rsLen: " << rsLen
 	     << ", noOfKeyAttr: " << noOfKeyAttr 
 	     << ", noOfAttr: " << noOfAttr 
-	     << ", tckeyLenInBytes: " << tckeyLenInBytes 
+	     << ", tckeyLen: " << tckeyLen 
 	     << ", keyDataPos: " << keyDataPos << endl
 	     << "-AttrMapping- (AttrId, KeyPos)-pairs "
 	     << "(Pos=3fff if non-key attr):" << endl;

=== modified file 'storage/ndb/src/kernel/blocks/lgman.cpp'
--- a/storage/ndb/src/kernel/blocks/lgman.cpp	2008-12-20 19:48:44 +0000
+++ b/storage/ndb/src/kernel/blocks/lgman.cpp	2009-03-13 07:51:21 +0000
@@ -120,7 +120,8 @@ Lgman::execREAD_CONFIG_REQ(Signal* signa
   m_log_waiter_pool.wo_pool_init(RT_LGMAN_LOG_WAITER, pc);
   m_file_pool.init(RT_LGMAN_FILE, pc);
   m_logfile_group_pool.init(RT_LGMAN_FILEGROUP, pc);
-  m_data_buffer_pool.setSize(10);
+  // 10 -> 150M
+  m_data_buffer_pool.setSize(40);
 
   ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
   conf->senderRef = reference();
@@ -191,11 +192,13 @@ Lgman::execCONTINUEB(Signal* signal){
     Ptr<Logfile_group> ptr;
     if(ptrI != RNIL)
     {
+      jam();
       m_logfile_group_pool.getPtr(ptr, ptrI);
       find_log_head(signal, ptr);
     }
     else
     {
+      jam();
       init_run_undo_log(signal);
     }
     return;
@@ -547,20 +550,20 @@ Lgman::execCREATE_FILE_REQ(Signal* signa
       break;
     }
     
-    if(ERROR_INSERTED(15000) ||
-       (sizeof(void*) == 4 && req->file_size_hi & 0xFFFFFFFF))
+    if (ERROR_INSERTED(15000) ||
+        (sizeof(void*) == 4 && req->file_size_hi & 0xFFFFFFFF))
     {
       jam();
-      if(signal->getNoOfSections())
-        releaseSections(signal);
-
-      CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr();
-      ref->senderData = senderData;
-      ref->senderRef = reference();
-      ref->errorCode = CreateFileImplRef::FileSizeTooLarge;
-      sendSignal(senderRef, GSN_CREATE_FILE_REF, signal,
-                 CreateFileImplRef::SignalLength, JBB);
-      return;
+      err = CreateFileImplRef::FileSizeTooLarge;
+      break;
+    }
+    
+    Uint64 sz = (Uint64(req->file_size_hi) << 32) + req->file_size_lo;
+    if (sz < 1024*1024)
+    {
+      jam();
+      err = CreateFileImplRef::FileSizeTooSmall;
+      break;
     }
 
     new (file_ptr.p) Undofile(req, ptr.i);
@@ -571,6 +574,9 @@ Lgman::execCREATE_FILE_REQ(Signal* signa
     open_file(signal, file_ptr, req->requestInfo);
     return;
   } while(0);
+
+  if(signal->getNoOfSections())
+    releaseSections(signal);
   
   CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr();
   ref->senderData = senderData;
@@ -924,8 +930,18 @@ Lgman::alloc_logbuffer_memory(Ptr<Logfil
 	Buffer_idx range;
 	range.m_ptr_i= ptrI;
 	range.m_idx = cnt;
-	
-	ndbrequire(map.append((Uint32*)&range, 2));
+        
+	if (map.append((Uint32*)&range, 2) == false)
+        {
+          /**
+           * Failed to append page-range...
+           *   jump out of alloc routine
+           */
+          jam();
+          m_ctx.m_mm.release_pages(RG_DISK_OPERATIONS, 
+                                   range.m_ptr_i, range.m_idx);
+          break;
+        }
 	pages -= range.m_idx;
       }
       else
@@ -2481,6 +2497,8 @@ Lgman::init_run_undo_log(Signal* signal)
   Logfile_group_list& list= m_logfile_group_list;
   Logfile_group_list tmp(m_logfile_group_pool);
 
+  bool found_any = false;
+
   list.first(group);
   while(!group.isNull())
   {
@@ -2488,6 +2506,18 @@ Lgman::init_run_undo_log(Signal* signal)
     list.next(group);
     list.remove(ptr);
 
+    if (ptr.p->m_state & Logfile_group::LG_ONLINE)
+    {
+      /**
+       * No logfiles in group
+       */
+      jam();
+      tmp.addLast(ptr);
+      continue;
+    }
+    
+    found_any = true;
+
     {
       /**
        * Init buffer pointers
@@ -2530,6 +2560,17 @@ Lgman::init_run_undo_log(Signal* signal)
   }
   list = tmp;
 
+  if (found_any == false)
+  {
+    /**
+     * No logfilegroup had any logfiles
+     */
+    jam();
+    signal->theData[0] = reference();
+    sendSignal(DBLQH_REF, GSN_START_RECCONF, signal, 1, JBB);
+    return;
+  }
+  
   execute_undo_record(signal);
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2008-08-11 10:41:11 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2009-02-17 07:52:13 +0000
@@ -189,6 +189,10 @@ private:
   void execDIH_RESTARTREF(Signal* signal);
   void execCREATE_TABLE_REF(Signal* signal);
   void execCREATE_TABLE_CONF(Signal* signal);
+  void execCREATE_FILEGROUP_REF(Signal* signal);
+  void execCREATE_FILEGROUP_CONF(Signal* signal);
+  void execCREATE_FILE_REF(Signal* signal);
+  void execCREATE_FILE_CONF(Signal* signal);
   void execNDB_STTORRY(Signal* signal);
   void execNDB_STARTCONF(Signal* signal);
   void execREAD_NODESREQ(Signal* signal);
@@ -230,6 +234,7 @@ private:
   void systemErrorLab(Signal* signal, int line);
 
   void createSystableLab(Signal* signal, unsigned index);
+  void createDDObjects(Signal*, unsigned index);
   void crSystab7Lab(Signal* signal);
   void crSystab8Lab(Signal* signal);
   void crSystab9Lab(Signal* signal);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp	2007-02-14 05:37:40 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp	2009-02-17 07:52:13 +0000
@@ -77,6 +77,10 @@ Ndbcntr::Ndbcntr(Block_context& ctx):
   addRecSignal(GSN_DIH_RESTARTREF, &Ndbcntr::execDIH_RESTARTREF);
   addRecSignal(GSN_CREATE_TABLE_REF, &Ndbcntr::execCREATE_TABLE_REF);
   addRecSignal(GSN_CREATE_TABLE_CONF, &Ndbcntr::execCREATE_TABLE_CONF);
+  addRecSignal(GSN_CREATE_FILEGROUP_REF, &Ndbcntr::execCREATE_FILEGROUP_REF);
+  addRecSignal(GSN_CREATE_FILEGROUP_CONF, &Ndbcntr::execCREATE_FILEGROUP_CONF);
+  addRecSignal(GSN_CREATE_FILE_REF, &Ndbcntr::execCREATE_FILE_REF);
+  addRecSignal(GSN_CREATE_FILE_CONF, &Ndbcntr::execCREATE_FILE_CONF);
   addRecSignal(GSN_NDB_STTORRY, &Ndbcntr::execNDB_STTORRY);
   addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF);
   addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2008-04-25 10:59:17 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2009-02-23 11:26:17 +0000
@@ -50,6 +50,8 @@
 #include <NdbOut.hpp>
 #include <NdbTick.h>
 
+#include <signaldata/CreateFilegroup.hpp>
+
 // used during shutdown for reporting current startphase
 // accessed from Emulator.cpp, NdbShutdown()
 Uint32 g_currentStartPhase;
@@ -248,6 +250,131 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* s
   return;
 }//Ndbcntr::execSYSTEM_ERROR()
 
+
+struct ddentry
+{
+  Uint32 type;
+  const char * name;
+  Uint64 size;
+};
+
+/**
+ * f_dd[] = {
+ * { DictTabInfo::LogfileGroup, "DEFAULT-LG", 32*1024*1024 },
+ * { DictTabInfo::Undofile, "undofile.dat", 64*1024*1024 },
+ * { DictTabInfo::Tablespace, "DEFAULT-TS", 1024*1024 },
+ * { DictTabInfo::Datafile, "datafile.dat", 64*1024*1024 },
+ * { ~0, 0, 0 }
+ * };
+ */
+Vector<ddentry> f_dd;
+
+Uint64
+parse_size(const char * src)
+{
+  Uint64 num = 0;
+  char * endptr = 0;
+  num = strtoll(src, &endptr, 10);
+
+  if (endptr)
+  {
+    switch(* endptr){
+    case 'k':
+    case 'K':
+      num *= 1024;
+      break;
+    case 'm':
+    case 'M':
+      num *= 1024;
+      num *= 1024;
+      break;
+    case 'g':
+    case 'G':
+      num *= 1024;
+      num *= 1024;
+      num *= 1024;
+      break;
+    }
+  }
+  return num;
+}
+
+static
+int
+parse_spec(Vector<ddentry> & dst,
+           const char * src,
+           Uint32 type)
+{
+  const char * key;
+  Uint32 filetype;
+
+  struct ddentry group;
+  if (type == DictTabInfo::LogfileGroup)
+  {
+    key = "undo_buffer_size=";
+    group.size = 64*1024*1024;
+    group.name = "DEFAULT-LG";
+    group.type = type;
+    filetype = DictTabInfo::Undofile;
+  }
+  else
+  {
+    key = "extent_size=";
+    group.size = 1024*1024;
+    group.name = "DEFAULT-TS";
+    group.type = type;
+    filetype = DictTabInfo::Datafile;
+  }
+  size_t keylen = strlen(key);
+
+  BaseString arg(src);
+  Vector<BaseString> list;
+  arg.split(list, ";");
+
+  bool first = true;
+  for (Uint32 i = 0; i<list.size(); i++)
+  {
+    list[i].trim();
+    if (strncasecmp(list[i].c_str(), "name=", sizeof("name=")-1) == 0)
+    {
+      group.name= strdup(list[i].c_str() + sizeof("name=")-1);
+    }
+    else if (strncasecmp(list[i].c_str(), key, keylen) == 0)
+    {
+      group.size = parse_size(list[i].c_str() + keylen);
+    }
+    else
+    {
+      /**
+       * interpret as filespec
+       */
+      struct ddentry entry;
+      const char * path = list[i].c_str();
+      char * sizeptr = const_cast<char*>(strchr(path, ':'));
+      if (sizeptr == 0)
+      {
+        return -1;
+      }
+      * sizeptr = 0;
+
+      entry.name = strdup(path);
+      entry.size = parse_size(sizeptr + 1);
+      entry.type = filetype;
+
+      if (first)
+      {
+        /**
+         * push group aswell
+         */
+        first = false;
+        dst.push_back(group);
+      }
+      dst.push_back(entry);
+    }
+  }
+  return 0;
+}
+
 void 
 Ndbcntr::execREAD_CONFIG_REQ(Signal* signal)
 {
@@ -262,6 +389,50 @@ Ndbcntr::execREAD_CONFIG_REQ(Signal* sig
     m_ctx.m_config.getOwnConfigIterator();
   ndbrequire(p != 0);
 
+  Uint32 dl = 0;
+  ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl);
+  if (dl == 0)
+  {
+    const char * lgspec = 0;
+    char buf[1024];
+    if (!ndb_mgm_get_string_parameter(p, CFG_DB_DD_LOGFILEGROUP_SPEC, &lgspec))
+    {
+      jam();
+
+      if (parse_spec(f_dd, lgspec, DictTabInfo::LogfileGroup))
+      {
+        BaseString::snprintf(buf, sizeof(buf),
+                             "Unable to parse InitalLogfileGroup: %s", lgspec);
+        progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+      }
+    }
+
+    const char * tsspec = 0;
+    if (!ndb_mgm_get_string_parameter(p, CFG_DB_DD_TABLEPACE_SPEC, &tsspec))
+    {
+      if (f_dd.size() == 0)
+      {
+        warningEvent("InitalTablespace specified, "
+                     "but InitalLogfileGroup is not!");
+        warningEvent("Ignoring InitalTablespace: %s",
+                     tsspec);
+      }
+      else
+      {
+        if (parse_spec(f_dd, tsspec, DictTabInfo::Tablespace))
+        {
+          BaseString::snprintf(buf, sizeof(buf),
+                               "Unable to parse InitalTablespace: %s", tsspec);
+          progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+        }
+      }
+    }
+  }
+
+  struct ddentry empty;
+  empty.type = ~0;
+  f_dd.push_back(empty);
+
   ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
   conf->senderRef = reference();
   conf->senderData = senderData;
@@ -289,14 +460,6 @@ void Ndbcntr::execSTTOR(Signal* signal) 
     break;
   case ZSTART_PHASE_1:
     jam();
-    {
-      Uint32 db_watchdog_interval = 0;
-      const ndb_mgm_configuration_iterator * p = 
-        m_ctx.m_config.getOwnConfigIterator();
-      ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, &db_watchdog_interval);
-      ndbrequire(db_watchdog_interval);
-      update_watch_dog_timer(db_watchdog_interval);
-    }
     startPhase1Lab(signal);
     break;
   case ZSTART_PHASE_2:
@@ -1685,11 +1848,169 @@ void Ndbcntr::systemErrorLab(Signal* sig
 /*       |  :  |   :             |                   v                       */
 /*       | 2048|   0             |                   v                       */
 /*---------------------------------------------------------------------------*/
+void
+Ndbcntr::createDDObjects(Signal * signal, unsigned index)
+{
+  const ndb_mgm_configuration_iterator * p =
+    m_ctx.m_config.getOwnConfigIterator();
+  ndbrequire(p != 0);
+
+  Uint32 propPage[256];
+  LinearWriter w(propPage, 256);
+
+  const ddentry* entry = &f_dd[index];
+
+  switch(entry->type){
+  case DictTabInfo::LogfileGroup:
+  case DictTabInfo::Tablespace:
+  {
+    jam();
+
+    DictFilegroupInfo::Filegroup fg; fg.init();
+    BaseString::snprintf(fg.FilegroupName, sizeof(fg.FilegroupName),
+                         entry->name);
+    fg.FilegroupType = entry->type;
+    if (entry->type == DictTabInfo::LogfileGroup)
+    {
+      jam();
+      fg.LF_UndoBufferSize = Uint32(entry->size);
+    }
+    else
+    {
+      jam();
+      fg.TS_ExtentSize = Uint32(entry->size);
+      fg.TS_LogfileGroupId = RNIL;
+      fg.TS_LogfileGroupVersion = RNIL;
+    }
+
+    SimpleProperties::UnpackStatus s;
+    s = SimpleProperties::pack(w,
+                               &fg,
+                               DictFilegroupInfo::Mapping,
+                               DictFilegroupInfo::MappingSize, true);
+
+
+    Uint32 length = w.getWordsUsed();
+    LinearSectionPtr ptr[3];
+    ptr[0].p = &propPage[0];
+    ptr[0].sz = length;
+
+    CreateFilegroupReq * req = (CreateFilegroupReq*)signal->getDataPtrSend();
+    req->senderRef = reference();
+    req->senderData = index;
+    req->objType = entry->type;
+    sendSignal(DBDICT_REF, GSN_CREATE_FILEGROUP_REQ, signal,
+               CreateFilegroupReq::SignalLength, JBB, ptr, 1);
+    return;
+  }
+  case DictTabInfo::Undofile:
+  case DictTabInfo::Datafile:
+  {
+    jam();
+    Uint32 propPage[256];
+    LinearWriter w(propPage, 256);
+    DictFilegroupInfo::File f; f.init();
+    BaseString::snprintf(f.FileName, sizeof(f.FileName), entry->name);
+    f.FileType = entry->type;
+    f.FilegroupId = RNIL;
+    f.FilegroupVersion = RNIL;
+    f.FileSizeHi = Uint32(entry->size >> 32);
+    f.FileSizeLo = Uint32(entry->size);
+
+    SimpleProperties::UnpackStatus s;
+    s = SimpleProperties::pack(w,
+                               &f,
+                               DictFilegroupInfo::FileMapping,
+                               DictFilegroupInfo::FileMappingSize, true);
+
+    Uint32 length = w.getWordsUsed();
+    LinearSectionPtr ptr[3];
+    ptr[0].p = &propPage[0];
+    ptr[0].sz = length;
+
+    CreateFileReq * req = (CreateFileReq*)signal->getDataPtrSend();
+    req->senderRef = reference();
+    req->senderData = index;
+    req->objType = entry->type;
+    req->requestInfo = CreateFileReq::ForceCreateFile;
+    sendSignal(DBDICT_REF, GSN_CREATE_FILE_REQ, signal,
+               CreateFileReq::SignalLength, JBB, ptr, 1);
+    return;
+  }
+  default:
+    break;
+  }
+
+  startInsertTransactions(signal);
+}
+
+void
+Ndbcntr::execCREATE_FILEGROUP_REF(Signal* signal)
+{
+  jamEntry();
+  CreateFilegroupRef* ref = (CreateFilegroupRef*)signal->getDataPtr();
+  char buf[1024];
+
+  const ddentry* entry = &f_dd[ref->senderData];
+
+  if (entry->type == DictTabInfo::LogfileGroup)
+  {
+    BaseString::snprintf(buf, sizeof(buf), "create logfilegroup err %u",
+                         ref->errorCode);
+  }
+  else if (entry->type == DictTabInfo::Tablespace)
+  {
+    BaseString::snprintf(buf, sizeof(buf), "create tablespace err %u",
+                         ref->errorCode);
+  }
+  progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+}
+
+void
+Ndbcntr::execCREATE_FILEGROUP_CONF(Signal* signal)
+{
+  jamEntry();
+  CreateFilegroupConf* conf = (CreateFilegroupConf*)signal->getDataPtr();
+  createDDObjects(signal, conf->senderData + 1);
+}
+
+void
+Ndbcntr::execCREATE_FILE_REF(Signal* signal)
+{
+  jamEntry();
+  CreateFileRef* ref = (CreateFileRef*)signal->getDataPtr();
+  char buf[1024];
+
+  const ddentry* entry = &f_dd[ref->senderData];
+
+  if (entry->type == DictTabInfo::Undofile)
+  {
+    BaseString::snprintf(buf, sizeof(buf), "create undofile %s err %u",
+                         entry->name,
+                         ref->errorCode);
+  }
+  else if (entry->type == DictTabInfo::Datafile)
+  {
+    BaseString::snprintf(buf, sizeof(buf), "create datafile %s err %u",
+                         entry->name,
+                         ref->errorCode);
+  }
+  progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+}
+
+void
+Ndbcntr::execCREATE_FILE_CONF(Signal* signal)
+{
+  jamEntry();
+  CreateFileConf* conf = (CreateFileConf*)signal->getDataPtr();
+  createDDObjects(signal, conf->senderData + 1);
+}
+
 void Ndbcntr::createSystableLab(Signal* signal, unsigned index)
 {
   if (index >= g_sysTableCount) {
     ndbassert(index == g_sysTableCount);
-    startInsertTransactions(signal);
+    createDDObjects(signal, 0);
     return;
   }
   const SysTable& table = *g_sysTableList[index];
@@ -2780,25 +3101,30 @@ void Ndbcntr::execSTART_ORD(Signal* sign
 
 #define CLEAR_DX 13
 #define CLEAR_LCP 3
+#define CLEAR_DD 2
+// FileSystemPathDataFiles FileSystemPathUndoFiles
 
 void
 Ndbcntr::clearFilesystem(Signal* signal)
 {
-  const Uint32 lcp = c_fsRemoveCount >= CLEAR_DX;
-  
+  jam();
   FsRemoveReq * req  = (FsRemoveReq *)signal->getDataPtrSend();
   req->userReference = reference();
   req->userPointer   = 0;
   req->directory     = 1;
   req->ownDirectory  = 1;
 
-  if (lcp == 0)
+  const Uint32 DX = CLEAR_DX;
+  const Uint32 LCP = CLEAR_DX + CLEAR_LCP;
+  const Uint32 DD = CLEAR_DX + CLEAR_LCP + CLEAR_DD;
+
+  if (c_fsRemoveCount < DX)
   {
     FsOpenReq::setVersion(req->fileNumber, 3);
     FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any...
     FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount);
   }
-  else
+  else if (c_fsRemoveCount < LCP)
   {
     FsOpenReq::setVersion(req->fileNumber, 5);
     FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
@@ -2806,6 +3132,19 @@ Ndbcntr::clearFilesystem(Signal* signal)
     FsOpenReq::v5_setTableId(req->fileNumber, 0);
     FsOpenReq::v5_setFragmentId(req->fileNumber, 0);
   }
+  else if (c_fsRemoveCount < DD)
+  {
+    req->ownDirectory  = 0;
+    FsOpenReq::setVersion(req->fileNumber, 6);
+    FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
+    FsOpenReq::v5_setLcpNo(req->fileNumber,
+                           FsOpenReq::BP_DD_DF + c_fsRemoveCount - LCP);
+  }
+  else
+  {
+    ndbrequire(false);
+  }
+
   sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, 
              FsRemoveReq::SignalLength, JBA);
   c_fsRemoveCount++;
@@ -2814,12 +3153,12 @@ Ndbcntr::clearFilesystem(Signal* signal)
 void
 Ndbcntr::execFSREMOVECONF(Signal* signal){
   jamEntry();
-  if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP){
+  if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP + CLEAR_DD){
     jam();
     sendSttorry(signal);
   } else {
     jam();
-    ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP);
+    ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP + CLEAR_DD);
     clearFilesystem(signal);
   }//if
 }
@@ -3132,3 +3471,5 @@ UpgradeStartup::execCNTR_MASTER_REPLY(Si
   block.progError(__LINE__,NDBD_EXIT_NDBREQUIRE,
 		  "UpgradeStartup::execCNTR_MASTER_REPLY");
 }
+
+template class Vector<ddentry>;

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2009-02-20 08:46:10 +0000
@@ -46,7 +46,7 @@ Filename::~Filename(){
 }
 
 void 
-Filename::set(Filename::NameSpec& spec,
+Filename::set(const BaseString basepath[],
 	      BlockReference blockReference, 
 	      const Uint32 filenumber[4], bool dir) 
 {
@@ -59,14 +59,14 @@ Filename::set(Filename::NameSpec& spec,
   if (version == 2)
   {
     sz = BaseString::snprintf(theName, sizeof(theName), "%s", 
-         spec.backup_path.c_str());
-    m_base_name = theName + spec.backup_path.length();
+                              basepath[FsOpenReq::BP_BACKUP].c_str());
+    m_base_name = theName + basepath[FsOpenReq::BP_BACKUP].length();
   }
   else
   {
     sz = BaseString::snprintf(theName, sizeof(theName), "%s", 
-         spec.fs_path.c_str());
-    m_base_name = theName + spec.fs_path.length();
+                              basepath[FsOpenReq::BP_FS].c_str());
+    m_base_name = theName + basepath[FsOpenReq::BP_FS].length();
   }
   
   switch(version){
@@ -120,14 +120,14 @@ Filename::set(Filename::NameSpec& spec,
     const Uint32 nodeId = FsOpenReq::v2_getNodeId(filenumber);
     const Uint32 count = FsOpenReq::v2_getCount(filenumber);
     
-    BaseString::snprintf(buf, sizeof(buf), "BACKUP%sBACKUP-%d%s",
+    BaseString::snprintf(buf, sizeof(buf), "BACKUP%sBACKUP-%u%s",
 	     DIR_SEPARATOR, seq, DIR_SEPARATOR); 
     strcat(theName, buf);
     if(count == 0xffffffff) {
-      BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d.%d",
+      BaseString::snprintf(buf, sizeof(buf), "BACKUP-%u.%d",
 	       seq, nodeId); strcat(theName, buf);
     } else {
-      BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d-%d.%d",
+      BaseString::snprintf(buf, sizeof(buf), "BACKUP-%u-%d.%d",
 	       seq, count, nodeId); strcat(theName, buf);
     }
     break;
@@ -153,6 +153,13 @@ Filename::set(Filename::NameSpec& spec,
     strcat(theName, buf);
     break;
   }
+  case 6:
+  {
+    Uint32 bp = FsOpenReq::v5_getLcpNo(filenumber);
+    sz = BaseString::snprintf(theName, sizeof(theName), "%s",
+                              basepath[bp].c_str());
+    break;
+  }
   default:
     ERROR_SET(ecError, NDBD_EXIT_AFS_PARAMETER,"","Wrong version");
   }
@@ -173,7 +180,7 @@ Filename::set(Filename::NameSpec& spec,
 }
 
 void 
-Filename::set(Filename::NameSpec& spec,
+Filename::set(const BaseString & basepath,
 	      SegmentedSectionPtr ptr, class SectionSegmentPool& pool)
 {
   char buf[PATH_MAX];
@@ -185,7 +192,8 @@ Filename::set(Filename::NameSpec& spec,
   }
   else 
   {
-    snprintf(theName, sizeof(theName), "%s%s", spec.fs_path.c_str(), buf);
-    m_base_name = theName + spec.fs_path.length();
+    snprintf(theName, sizeof(theName), "%s%s",
+             basepath.c_str(), buf);
+    m_base_name = theName + basepath.length();
   }
 }

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp	2009-02-17 07:52:13 +0000
@@ -62,19 +62,12 @@ public:
   Filename();
   ~Filename();
 
-  struct NameSpec {
-    NameSpec(BaseString& f, BaseString&b) :
-      fs_path(f), backup_path(b) {}
-    BaseString& fs_path;
-    BaseString& backup_path;
-  };
-  
-  void set(NameSpec& spec, 
+  void set(const BaseString basepath[],
 	   BlockReference, const Uint32 fileno[4], bool = false);
-  void set(NameSpec& spec, 
+  void set(const BaseString & basepath,
 	   SegmentedSectionPtr ptr, class SectionSegmentPool&);
   
-  const char* c_str() const;     // Complete name including dirname
+  const char* c_str() const;         // Complete name including dirname
   const char* get_base_name() const; // Exclude fs (or backup) path
 private:
   char theName[PATH_MAX];

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2008-10-24 11:00:37 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2009-02-17 08:14:33 +0000
@@ -88,6 +88,53 @@ Ndbfs::~Ndbfs()
     delete theRequestPool;
 }
 
+static
+bool
+do_mkdir(const char * path)
+{
+#ifdef NDB_WIN32
+  return CreateDirectory(path, 0);
+#else
+  return ::mkdir(path, S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP) == 0;
+#endif
+}
+
+static
+void
+add_path(BaseString& dst, const char * add)
+{
+  const char * tmp = dst.c_str();
+  unsigned len = dst.length();
+  unsigned dslen = (unsigned)strlen(DIR_SEPARATOR);
+
+  if (len > dslen && strcmp(tmp+(len - dslen), DIR_SEPARATOR) != 0)
+    dst.append(DIR_SEPARATOR);
+  dst.append(add);
+}
+
+static
+bool
+validate_path(BaseString & dst,
+              const char * path)
+{
+  char buf2[PATH_MAX];
+  memset(buf2, 0,sizeof(buf2));
+#ifdef NDB_WIN32
+  CreateDirectory(path, 0);
+  char* szFilePart;
+  if(!GetFullPathName(path, sizeof(buf2), buf2, &szFilePart) ||
+     (GetFileAttributes(buf2) & FILE_ATTRIBUTE_READONLY))
+    return false;
+#else
+  if (::realpath(path, buf2) == NULL ||
+      ::access(buf2, W_OK) != 0)
+    return false;
+#endif
+  dst.assign(buf2);
+  add_path(dst, "");
+  return true;
+}
+
 void 
 Ndbfs::execREAD_CONFIG_REQ(Signal* signal)
 {
@@ -99,12 +146,67 @@ Ndbfs::execREAD_CONFIG_REQ(Signal* signa
   const ndb_mgm_configuration_iterator * p = 
     m_ctx.m_config.getOwnConfigIterator();
   ndbrequire(p != 0);
-  theFileSystemPath.assfmt("%sndb_%u_fs%s", m_ctx.m_config.fileSystemPath(),
-			   getOwnNodeId(), DIR_SEPARATOR);
-  theBackupFilePath.assign(m_ctx.m_config.backupFilePath());
+  BaseString tmp;
+  tmp.assfmt("ndb_%u_fs%s", getOwnNodeId(), DIR_SEPARATOR);
+  m_base_path[FsOpenReq::BP_FS].assfmt("%s%s",
+                                       m_ctx.m_config.fileSystemPath(),
+                                       tmp.c_str());
+  m_base_path[FsOpenReq::BP_BACKUP].assign(m_ctx.m_config.backupFilePath());
 
   theRequestPool = new Pool<Request>;
 
+  const char * ddpath = 0;
+  ndb_mgm_get_string_parameter(p, CFG_DB_DD_FILESYSTEM_PATH, &ddpath);
+
+  {
+    const char * datapath = 0;
+    ndb_mgm_get_string_parameter(p, CFG_DB_DD_DATAFILE_PATH, &datapath);
+    if (datapath == 0)
+    {
+      if (ddpath)
+        datapath = ddpath;
+      else
+        datapath = m_ctx.m_config.fileSystemPath();
+    }
+
+    BaseString path;
+    add_path(path, datapath);
+    do_mkdir(path.c_str());
+    add_path(path, tmp.c_str());
+    do_mkdir(path.c_str());
+    if (!validate_path(m_base_path[FsOpenReq::BP_DD_DF], path.c_str()))
+    {
+      ERROR_SET(fatal, NDBD_EXIT_AFS_INVALIDPATH,
+                m_base_path[FsOpenReq::BP_DD_DF].c_str(),
+                "FileSystemPathDataFiles");
+    }
+  }
+
+  {
+    const char * undopath = 0;
+    ndb_mgm_get_string_parameter(p, CFG_DB_DD_UNDOFILE_PATH, &undopath);
+    if (undopath == 0)
+    {
+      if (ddpath)
+        undopath = ddpath;
+      else
+        undopath = m_ctx.m_config.fileSystemPath();
+    }
+
+    BaseString path;
+    add_path(path, undopath);
+    do_mkdir(path.c_str());
+    add_path(path, tmp.c_str());
+    do_mkdir(path.c_str());
+
+    if (!validate_path(m_base_path[FsOpenReq::BP_DD_UF], path.c_str()))
+    {
+      ERROR_SET(fatal, NDBD_EXIT_AFS_INVALIDPATH,
+                m_base_path[FsOpenReq::BP_DD_UF].c_str(),
+                "FileSystemPathUndoFiles");
+    }
+  }
+
   m_maxFiles = 0;
   ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles);
   Uint32 noIdleFiles = 27;
@@ -141,14 +243,7 @@ Ndbfs::execSTTOR(Signal* signal)
   if(signal->theData[1] == 0){ // StartPhase 0
     jam();
     
-    {
-#ifdef NDB_WIN32
-      CreateDirectory(theFileSystemPath.c_str(), 0);
-#else
-      mkdir(theFileSystemPath.c_str(),
-	    S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
-#endif
-    }      
+    do_mkdir(m_base_path[FsOpenReq::BP_FS].c_str());
     
     cownref = NDBFS_REF;
     // close all open files
@@ -182,7 +277,6 @@ Ndbfs::execFSOPENREQ(Signal* signal)
   const BlockReference userRef = fsOpenReq->userReference;
   AsyncFile* file = getIdleFile();
   ndbrequire(file != NULL);
-  Filename::NameSpec spec(theFileSystemPath, theBackupFilePath);
 
   Uint32 userPointer = fsOpenReq->userPointer;
   
@@ -208,12 +302,28 @@ Ndbfs::execFSOPENREQ(Signal* signal)
   
   if(signal->getNoOfSections() == 0){
     jam();
-    file->theFileName.set(spec, userRef, fsOpenReq->fileNumber);
+    file->theFileName.set(m_base_path, userRef, fsOpenReq->fileNumber);
   } else {
     jam();
     SegmentedSectionPtr ptr;
     signal->getSection(ptr, FsOpenReq::FILENAME);
-    file->theFileName.set(spec, ptr, g_sectionSegmentPool);
+    // QOD, should be arg to FSOPEN
+    if (refToMain(userRef) == TSMAN)
+    {
+      file->theFileName.set(m_base_path[FsOpenReq::BP_DD_DF],
+                            ptr, g_sectionSegmentPool);
+    }
+    else if (refToMain(userRef) == LGMAN)
+    {
+      file->theFileName.set(m_base_path[FsOpenReq::BP_DD_UF],
+                            ptr, g_sectionSegmentPool);
+    }
+    else
+    {
+      file->theFileName.set(m_base_path[FsOpenReq::BP_FS],
+                            ptr, g_sectionSegmentPool);
+    }
+
     releaseSections(signal);
   }
   file->reportTo(&theFromThreads);
@@ -245,10 +355,11 @@ Ndbfs::execFSREMOVEREQ(Signal* signal)
   AsyncFile* file = getIdleFile();
   ndbrequire(file != NULL);
 
-  Filename::NameSpec spec(theFileSystemPath, theBackupFilePath);
-  file->theFileName.set(spec, userRef, req->fileNumber, req->directory);
+  Uint32 version = FsOpenReq::getVersion(req->fileNumber);
+  Uint32 bp = FsOpenReq::v5_getLcpNo(req->fileNumber);
+  file->theFileName.set(m_base_path, userRef, req->fileNumber, req->directory);
   file->reportTo(&theFromThreads);
-  
+
   Request* request = theRequestPool->get();
   request->action = Request::rmrf;
   request->par.rmrf.directory = req->directory;
@@ -258,7 +369,16 @@ Ndbfs::execFSREMOVEREQ(Signal* signal)
   request->file = file;
   request->theTrace = signal->getTrace();
   
+  if (version == 6)
+  {
+    if (m_base_path[FsOpenReq::BP_FS] == m_base_path[bp])
+      goto ignore;
+  }
+
   ndbrequire(forward(file, request));
+  return;
+ignore:
+  report(request, signal);
 }
 
 /*

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp	2009-02-17 07:52:13 +0000
@@ -21,7 +21,7 @@
 #include "Pool.hpp"
 #include "AsyncFile.hpp"
 #include "OpenFiles.hpp"
-
+#include <signaldata/FsOpenReq.hpp>
 
 
 // Because one NDB Signal request can result in multiple requests to
@@ -81,8 +81,7 @@ private:
   Vector<AsyncFile*> theIdleFiles; // List of idle AsyncFiles
   OpenFiles theOpenFiles;          // List of open AsyncFiles
 
-  BaseString theFileSystemPath;
-  BaseString theBackupFilePath;
+  BaseString m_base_path[FsOpenReq::BP_MAX];
   
   // Statistics variables
   Uint32 m_maxOpenedFiles;

=== modified file 'storage/ndb/src/kernel/blocks/print_file.cpp'
--- a/storage/ndb/src/kernel/blocks/print_file.cpp	2006-12-31 00:04:59 +0000
+++ b/storage/ndb/src/kernel/blocks/print_file.cpp	2009-02-18 21:56:20 +0000
@@ -193,7 +193,7 @@ print_extent_page(int count, void* ptr, 
   int no = count * per_page;
   
   const int max = count < g_df_zero.m_extent_pages ? 
-    per_page : g_df_zero.m_extent_count % per_page;
+    per_page : g_df_zero.m_extent_count - (g_df_zero.m_extent_count - 1) * per_page;
 
   File_formats::Datafile::Extent_page * page = 
     (File_formats::Datafile::Extent_page*)ptr;
@@ -201,7 +201,7 @@ print_extent_page(int count, void* ptr, 
   ndbout << "Extent page: " << count
 	 << ", lsn = [ " 
 	 << page->m_page_header.m_page_lsn_hi << " " 
-	 << page->m_page_header.m_page_lsn_lo << "]" 
+	 << page->m_page_header.m_page_lsn_lo << "] " 
 	 << endl;
   for(int i = 0; i<max; i++)
   {

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp	2009-02-18 14:29:58 +0000
@@ -156,6 +156,8 @@ public:
     QmgrState sendPresToStatus;
     FailState failState;
     BlockReference blockRef;
+    Uint64 m_secret;
+    Uint64 m_alloc_timeout;
 
     NodeRec() { }
   }; /* p2c: size = 52 bytes */
@@ -308,7 +310,7 @@ private:
   void electionWon(Signal* signal);
   void cmInfoconf010Lab(Signal* signal);
   
-  void apiHbHandlingLab(Signal* signal);
+  void apiHbHandlingLab(Signal* signal, Uint64 now);
   void timerHandlingLab(Signal* signal);
   void hbReceivedLab(Signal* signal);
   void sendCmRegrefLab(Signal* signal, BlockReference ref, 

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp	2008-11-13 13:15:56 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp	2009-02-18 14:29:58 +0000
@@ -27,6 +27,10 @@ void Qmgr::initData() 
 
   // Records with constant sizes
   nodeRec = new NodeRec[MAX_NODES];
+  for (Uint32 i = 0; i<MAX_NODES; i++)
+  {
+    nodeRec[i].m_secret = 0;
+  }
 
   cnoCommitFailedNodes = 0;
   c_maxDynamicId = 0;

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2009-01-29 10:56:52 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2009-03-15 10:03:02 +0000
@@ -436,9 +436,14 @@ void Qmgr::execCONNECT_REP(Signal* signa
     infoEvent("Discarding CONNECT_REP(%d)", nodeId);
     return;
   }
-  
+
   c_connectedNodes.set(nodeId);
+
   NodeRecPtr nodePtr;
+  nodePtr.i = nodeId;
+  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
+  nodePtr.p->m_secret = 0;
+
   nodePtr.i = getOwnNodeId();
   ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
   NodeInfo nodeInfo = getNodeInfo(nodeId);
@@ -2357,7 +2362,7 @@ void Qmgr::timerHandlingLab(Signal* sign
   {
     jam();
     hb_api_timer.reset();
-    apiHbHandlingLab(signal);
+    apiHbHandlingLab(signal, TcurrentTime);
   }
 
   if (cactivateApiCheck != 0) {
@@ -2463,7 +2468,7 @@ void Qmgr::checkHeartbeat(Signal* signal
   }//if
 }//Qmgr::checkHeartbeat()
 
-void Qmgr::apiHbHandlingLab(Signal* signal) 
+void Qmgr::apiHbHandlingLab(Signal* signal, Uint64 now)
 {
   NodeRecPtr TnodePtr;
 
@@ -2508,6 +2513,14 @@ void Qmgr::apiHbHandlingLab(Signal* sign
         api_failed(signal, nodeId);
       }//if
     }//if
+    else if (TnodePtr.p->phase == ZAPI_INACTIVE &&
+             TnodePtr.p->m_secret != 0 && now > TnodePtr.p->m_alloc_timeout)
+    {
+      jam();
+      TnodePtr.p->m_secret = 0;
+      warningEvent("Releasing node id allocation for node %u",
+                   TnodePtr.i);
+    }
   }//for
   return;
 }//Qmgr::apiHbHandlingLab()
@@ -2542,6 +2555,7 @@ void Qmgr::checkStartInterface(Signal* s
 	 * IS COMPLETE.
 	 *-------------------------------------------------------------------*/
         nodePtr.p->failState = NORMAL;
+        nodePtr.p->m_secret = 0;
         Uint32 type = getNodeInfo(nodePtr.i).m_type;
         switch(type){
         case NodeInfo::DB:
@@ -2838,6 +2852,7 @@ void Qmgr::node_failed(Signal* signal, U
    *-----------------------------------------------------------------------*/
   failedNodePtr.i = aFailedNode;
   ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+  failedNodePtr.p->m_secret = 0; // Not yet Uint64(rand()) << 32 + rand();
 
   ndbrequire(getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB);
   
@@ -2908,7 +2923,8 @@ Qmgr::api_failed(Signal* signal, Uint32 
    *-----------------------------------------------------------------------*/
   failedNodePtr.i = nodeId;
   ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
-  
+  failedNodePtr.p->m_secret = 0; // Not yet Uint64(rand()) << 32 + rand();
+
   if (failedNodePtr.p->phase == ZFAIL_CLOSING)
   {
     /**
@@ -5304,10 +5320,17 @@ Qmgr::execAPI_BROADCAST_REP(Signal* sign
     if (nodePtr.p->phase == ZAPI_ACTIVE && 
 	getNodeInfo(nodePtr.i).m_version >= api.minVersion)
     {
+      jam();
       mask.set(nodePtr.i);
     }
   }
   
+  if (mask.isclear())
+  {
+    jam();
+    return;
+  }
+
   NodeReceiverGroup rg(API_CLUSTERMGR, mask);
   sendSignal(rg, api.gsn, signal, len, JBB); // forward sections
 }
@@ -5325,22 +5348,37 @@ void
 Qmgr::execALLOC_NODEID_REQ(Signal * signal)
 {
   jamEntry();
-  const AllocNodeIdReq * req = (AllocNodeIdReq*)signal->getDataPtr();
-  Uint32 senderRef = req->senderRef;
-  Uint32 nodeId = req->nodeId;
-  Uint32 nodeType = req->nodeType;
+  AllocNodeIdReq req = *(AllocNodeIdReq*)signal->getDataPtr();
   Uint32 error = 0;
 
-  if (refToBlock(senderRef) != QMGR) // request from management server
+  NodeRecPtr nodePtr;
+  nodePtr.i = req.nodeId;
+  ptrAss(nodePtr, nodeRec);
+
+  if (refToBlock(req.senderRef) != QMGR) // request from management server
   {
     /* master */
 
     if (getOwnNodeId() != cpresident)
+    {
+      jam();
       error = AllocNodeIdRef::NotMaster;
+    }
     else if (!opAllocNodeIdReq.m_tracker.done())
+    {
+      jam();
       error = AllocNodeIdRef::Busy;
-    else if (c_connectedNodes.get(nodeId))
+    }
+    else if (c_connectedNodes.get(req.nodeId))
+    {
+      jam();
       error = AllocNodeIdRef::NodeConnected;
+    }
+    else if (nodePtr.p->m_secret != 0)
+    {
+      jam();
+      error = AllocNodeIdRef::NodeReserved;
+    }
 
     if (error)
     {
@@ -5349,60 +5387,99 @@ Qmgr::execALLOC_NODEID_REQ(Signal * sign
       ref->senderRef = reference();
       ref->errorCode = error;
       ref->masterRef = numberToRef(QMGR, cpresident);
-      sendSignal(senderRef, GSN_ALLOC_NODEID_REF, signal,
+      ref->senderData = req.senderData;
+      ref->nodeId = req.nodeId;
+      sendSignal(req.senderRef, GSN_ALLOC_NODEID_REF, signal,
                  AllocNodeIdRef::SignalLength, JBB);
       return;
     }
 
-    if (ERROR_INSERTED(934) && nodeId != getOwnNodeId())
+    if (ERROR_INSERTED(934) && req.nodeId != getOwnNodeId())
     {
       CRASH_INSERTION(934);
     }
+
+    /**
+     * generate secret
+     */
+    Uint64 now = NdbTick_CurrentMillisecond();
+    Uint32 secret_hi = now >> 24;
+    Uint32 secret_lo = Uint32(now << 8) + getOwnNodeId();
+    req.secret_hi = secret_hi;
+    req.secret_lo = secret_lo;
+
+    if (req.timeout > 60000)
+      req.timeout = 60000;
+
+    nodePtr.p->m_secret = (Uint64(secret_hi) << 32) + secret_lo;
+    nodePtr.p->m_alloc_timeout = now + req.timeout;
     
-    opAllocNodeIdReq.m_req = *req;
+    opAllocNodeIdReq.m_req = req;
     opAllocNodeIdReq.m_error = 0;
-    opAllocNodeIdReq.m_connectCount = getNodeInfo(refToNode(senderRef)).m_connectCount;
+    opAllocNodeIdReq.m_connectCount =
+      getNodeInfo(refToNode(req.senderRef)).m_connectCount;
 
     jam();
-    AllocNodeIdReq * req = (AllocNodeIdReq*)signal->getDataPtrSend();
-    req->senderRef = reference();
+    AllocNodeIdReq * req2 = (AllocNodeIdReq*)signal->getDataPtrSend();
+    * req2 = req;
+    req2->senderRef = reference();
     NodeReceiverGroup rg(QMGR, c_clusterNodes);
     RequestTracker & p = opAllocNodeIdReq.m_tracker;
     p.init<AllocNodeIdRef>(c_counterMgr, rg, GSN_ALLOC_NODEID_REF, 0);
 
     sendSignal(rg, GSN_ALLOC_NODEID_REQ, signal,
-               AllocNodeIdReq::SignalLength, JBB);
+               AllocNodeIdReq::SignalLengthQMGR, JBB);
     return;
   }
 
   /* participant */
-
-  if (c_connectedNodes.get(nodeId))
+  if (c_connectedNodes.get(req.nodeId))
+  {
+    jam();
     error = AllocNodeIdRef::NodeConnected;
-  else
+  }
+  else if (req.nodeType != getNodeInfo(req.nodeId).m_type)
   {
-    NodeRecPtr nodePtr;
-    nodePtr.i = nodeId;
-    ptrAss(nodePtr, nodeRec);
-    if (nodeType != getNodeInfo(nodeId).m_type)
-      error = AllocNodeIdRef::NodeTypeMismatch;
-    else if (nodePtr.p->failState != NORMAL)
-      error = AllocNodeIdRef::NodeFailureHandlingNotCompleted;
+    jam();
+    error = AllocNodeIdRef::NodeTypeMismatch;
   }
+  else if (nodePtr.p->failState != NORMAL)
+  {
+    jam();
+    error = AllocNodeIdRef::NodeFailureHandlingNotCompleted;
+  }
+#if 0
+  /**
+   * For now only make "time/secret" based reservation on master
+   *   as we otherwise also need to clear it on failure + handle
+   *   master failure
+   */
+  else if (nodePtr.p->m_secret != 0)
+  {
+    jam();
+    error = AllocNodeIdRef::NodeReserved;
+  }
+#endif
 
   if (error)
   {
+    jam();
     AllocNodeIdRef * ref = (AllocNodeIdRef*)signal->getDataPtrSend();
     ref->senderRef = reference();
     ref->errorCode = error;
-    sendSignal(senderRef, GSN_ALLOC_NODEID_REF, signal,
+    ref->senderData = req.senderData;
+    ref->nodeId = req.nodeId;
+    ref->masterRef = numberToRef(QMGR, cpresident);
+    sendSignal(req.senderRef, GSN_ALLOC_NODEID_REF, signal,
                AllocNodeIdRef::SignalLength, JBB);
     return;
   }
 
   AllocNodeIdConf * conf = (AllocNodeIdConf*)signal->getDataPtrSend();
   conf->senderRef = reference();
-  sendSignal(senderRef, GSN_ALLOC_NODEID_CONF, signal,
+  conf->secret_hi = req.secret_hi;
+  conf->secret_lo = req.secret_lo;
+  sendSignal(req.senderRef, GSN_ALLOC_NODEID_CONF, signal,
              AllocNodeIdConf::SignalLength, JBB);
 }
 
@@ -5415,6 +5492,22 @@ Qmgr::execALLOC_NODEID_CONF(Signal * sig
   const AllocNodeIdConf * conf = (AllocNodeIdConf*)signal->getDataPtr();
   opAllocNodeIdReq.m_tracker.reportConf(c_counterMgr,
                                         refToNode(conf->senderRef));
+
+  if (signal->getLength() >= AllocNodeIdConf::SignalLength)
+  {
+    jam();
+    if (opAllocNodeIdReq.m_req.secret_hi != conf->secret_hi ||
+        opAllocNodeIdReq.m_req.secret_lo != conf->secret_lo)
+    {
+      jam();
+      if (opAllocNodeIdReq.m_error == 0)
+      {
+        jam();
+        opAllocNodeIdReq.m_error = AllocNodeIdRef::Undefined;
+      }
+    }
+  }
+
   completeAllocNodeIdReq(signal);
 }
 
@@ -5428,15 +5521,20 @@ Qmgr::execALLOC_NODEID_REF(Signal * sign
   const AllocNodeIdRef * ref = (AllocNodeIdRef*)signal->getDataPtr();
   if (ref->errorCode == AllocNodeIdRef::NF_FakeErrorREF)
   {
+    jam();
     opAllocNodeIdReq.m_tracker.ignoreRef(c_counterMgr,
                                          refToNode(ref->senderRef));    
   }
   else
   {
+    jam();
     opAllocNodeIdReq.m_tracker.reportRef(c_counterMgr,
                                          refToNode(ref->senderRef));
     if (opAllocNodeIdReq.m_error == 0)
+    {
+      jam();
       opAllocNodeIdReq.m_error = ref->errorCode;
+    }
   }
   completeAllocNodeIdReq(signal);
 }
@@ -5463,6 +5561,17 @@ Qmgr::completeAllocNodeIdReq(Signal *sig
   if (opAllocNodeIdReq.m_tracker.hasRef())
   {
     jam();
+
+    {
+      /**
+       * Clear reservation
+       */
+      NodeRecPtr nodePtr;
+      nodePtr.i = opAllocNodeIdReq.m_req.nodeId;
+      ptrAss(nodePtr, nodeRec);
+      nodePtr.p->m_secret = 0;
+    }
+
     AllocNodeIdRef * ref = (AllocNodeIdRef*)signal->getDataPtrSend();
     ref->senderRef = reference();
     ref->senderData = opAllocNodeIdReq.m_req.senderData;
@@ -5474,12 +5583,15 @@ Qmgr::completeAllocNodeIdReq(Signal *sig
                AllocNodeIdRef::SignalLength, JBB);
     return;
   }
+
   jam();
+
   AllocNodeIdConf * conf = (AllocNodeIdConf*)signal->getDataPtrSend();
   conf->senderRef = reference();
   conf->senderData = opAllocNodeIdReq.m_req.senderData;
   conf->nodeId = opAllocNodeIdReq.m_req.nodeId;
-  ndbassert(AllocNodeIdConf::SignalLength == 3);
+  conf->secret_lo = opAllocNodeIdReq.m_req.secret_lo;
+  conf->secret_hi = opAllocNodeIdReq.m_req.secret_hi;
   sendSignal(opAllocNodeIdReq.m_req.senderRef, GSN_ALLOC_NODEID_CONF, signal,
              AllocNodeIdConf::SignalLength, JBB);
 }

=== modified file 'storage/ndb/src/kernel/blocks/trix/Trix.cpp'
--- a/storage/ndb/src/kernel/blocks/trix/Trix.cpp	2008-09-26 12:55:06 +0000
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.cpp	2009-02-27 13:18:49 +0000
@@ -34,6 +34,17 @@
 
 #define CONSTRAINT_VIOLATION 893
 
+static
+bool
+check_timeout(Uint32 errCode)
+{
+  switch(errCode){
+  case 266:
+    return true;
+  }
+  return false;
+}
+
 #define DEBUG(x) { ndbout << "TRIX::" << x << endl; }
 
 /**
@@ -603,9 +614,20 @@ void Trix::execUTIL_EXECUTE_REF(Signal* 
   subRecPtr.p = subRec;
   ndbrequire(utilExecuteRef->errorCode == UtilExecuteRef::TCError);
   if(utilExecuteRef->TCErrorCode == CONSTRAINT_VIOLATION)
+  {
+    jam();
     buildFailed(signal, subRecPtr, BuildIndxRef::IndexNotUnique);
+  }
+  else if (check_timeout(utilExecuteRef->TCErrorCode))
+  {
+    jam();
+    buildFailed(signal, subRecPtr, BuildIndxRef::DeadlockError);
+  }
   else
+  {
+    jam();
     buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
+  }
 }
 
 void Trix::execSUB_CREATE_CONF(Signal* signal)

=== modified file 'storage/ndb/src/kernel/blocks/tsman.cpp'
--- a/storage/ndb/src/kernel/blocks/tsman.cpp	2008-08-18 05:43:50 +0000
+++ b/storage/ndb/src/kernel/blocks/tsman.cpp	2009-03-12 10:45:04 +0000
@@ -835,7 +835,7 @@ Tsman::execFSWRITEREQ(Signal* signal)
     }
     if (page_no == extent_pages)
     {
-      Uint32 last = extents % per_page; 
+      Uint32 last = extents - ((extent_pages - 1) * per_page);
       page->get_header(last - 1, size)->m_next_free_extent = RNIL;
     }
   }
@@ -1133,6 +1133,8 @@ Tsman::load_extent_page_callback(Signal*
   Ptr<Tablespace> ts_ptr;
   m_tablespace_pool.getPtr(ts_ptr, ptr.p->m_tablespace_ptr_i);
   if (getNodeState().startLevel >= NodeState::SL_STARTED ||
+      (getNodeState().startLevel == NodeState::SL_STARTING &&
+       getNodeState().starting.restartType == NodeState::ST_INITIAL_START) ||
       (getNodeState().getNodeRestartInProgress() &&
        getNodeState().starting.restartType == NodeState::ST_INITIAL_NODE_RESTART))
   {
@@ -1237,7 +1239,8 @@ Tsman::scan_extent_headers(Signal* signa
        * Last extent header page...
        *   set correct no of extent headers
        */
-      extents= (datapages / size) % per_page;
+      Uint32 total_extents = datapages / size;
+      extents= total_extents - (pages - 1)*per_page;
     }
     for(Uint32 j = 0; j<extents; j++)
     {

=== modified file 'storage/ndb/src/kernel/vm/DataBuffer.hpp'
--- a/storage/ndb/src/kernel/vm/DataBuffer.hpp	2006-12-23 19:20:40 +0000
+++ b/storage/ndb/src/kernel/vm/DataBuffer.hpp	2009-02-19 10:01:01 +0000
@@ -325,7 +325,6 @@ DataBuffer<sz>::seize(Uint32 n){
    */
   Uint32 free = thePool.getNoOfFree() * sz + rest;
   if(n > free){
-    release();
     return false;
   }
     

=== modified file 'storage/ndb/src/mgmclient/CommandInterpreter.cpp'
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2008-12-03 13:49:39 +0000
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2009-02-20 08:46:10 +0000
@@ -744,7 +744,7 @@ printLogEvent(struct ndb_logevent* event
 #undef  EVENT
 #define EVENT BackupStarted
   case NDB_LE_BackupStarted:
-      ndbout_c("Node %u: Backup %d started from node %d",
+      ndbout_c("Node %u: Backup %u started from node %d",
                R, Q(backup_id), Q(starting_node));
       break;
 #undef EVENT
@@ -784,7 +784,7 @@ printLogEvent(struct ndb_logevent* event
 #undef  EVENT
 #define EVENT BackupAborted
     case NDB_LE_BackupAborted:
-      ndbout_c("Node %u: Backup %d started from %d has been aborted. Error: %d",
+      ndbout_c("Node %u: Backup %u started from %d has been aborted. Error: %d",
                R, Q(backup_id), Q(starting_node), Q(error));
       break;
     /** 

=== modified file 'storage/ndb/src/mgmsrv/ConfigInfo.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2009-03-12 10:45:04 +0000
@@ -1454,6 +1454,61 @@ const ConfigInfo::ParamInfo ConfigInfo::
     "false",
     "true"},
 
+  {
+    CFG_DB_DD_FILESYSTEM_PATH,
+    "FileSystemPathDD",
+    DB_TOKEN,
+    "Path to directory where the "DB_TOKEN_PRINT" node stores its disk-data/undo-files",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_STRING,
+    UNDEFINED,
+    0, 0 },
+
+  {
+    CFG_DB_DD_DATAFILE_PATH,
+    "FileSystemPathDataFiles",
+    DB_TOKEN,
+    "Path to directory where the "DB_TOKEN_PRINT" node stores its disk-data-files",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_STRING,
+    UNDEFINED,
+    0, 0 },
+
+  {
+    CFG_DB_DD_UNDOFILE_PATH,
+    "FileSystemPathUndoFiles",
+    DB_TOKEN,
+    "Path to directory where the "DB_TOKEN_PRINT" node stores its disk-undo-files",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_STRING,
+    UNDEFINED,
+    0, 0 },
+
+  {
+    CFG_DB_DD_LOGFILEGROUP_SPEC,
+    "InitialLogfileGroup",
+    DB_TOKEN,
+    "Logfile group that will be created during initial start",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_STRING,
+    UNDEFINED,
+    0, 0 },
+
+  {
+    CFG_DB_DD_TABLEPACE_SPEC,
+    "InitialTablespace",
+    DB_TOKEN,
+    "Tablespace that will be created during initial start",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_STRING,
+    UNDEFINED,
+    0, 0 },
+
   /***************************************************************************
    * API
    ***************************************************************************/

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2009-01-08 14:35:49 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2009-02-23 10:44:03 +0000
@@ -496,7 +496,7 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_
     int error_code;
 
     if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
-		       0, 0, error_code, error_string)){
+		       0, 0, error_code, error_string, 20)){
       ndbout << "Unable to obtain requested nodeid: "
 	     << error_string.c_str() << endl;
       require(false);
@@ -2130,7 +2130,9 @@ MgmtSrvr::get_connected_nodes(NodeBitmas
 }
 
 int
-MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type)
+MgmtSrvr::alloc_node_id_req(NodeId free_node_id,
+                            enum ndb_mgm_node_type type,
+                            Uint32 timeout_ms)
 {
   SignalSender ss(theFacade);
   ss.lock(); // lock will be released on exit
@@ -2144,6 +2146,7 @@ MgmtSrvr::alloc_node_id_req(NodeId free_
   req->senderData = 19;
   req->nodeId = free_node_id;
   req->nodeType = type;
+  req->timeout = timeout_ms;
 
   int do_send = 1;
   NodeId nodeId = 0;
@@ -2227,141 +2230,165 @@ MgmtSrvr::alloc_node_id_req(NodeId free_
   return 0;
 }
 
-bool
-MgmtSrvr::alloc_node_id(NodeId * nodeId, 
-			enum ndb_mgm_node_type type,
-			struct sockaddr *client_addr, 
-			SOCKET_SIZE_TYPE *client_addr_len,
-			int &error_code, BaseString &error_string,
-                        int log_event)
+int
+MgmtSrvr::match_hostname(const struct sockaddr *clnt_addr,
+                         const char *config_hostname) const
 {
-  DBUG_ENTER("MgmtSrvr::alloc_node_id");
-  DBUG_PRINT("enter", ("nodeid: %d  type: %d  client_addr: 0x%ld",
-		       *nodeId, type, (long) client_addr));
-  if (g_no_nodeid_checks) {
-    if (*nodeId == 0) {
-      error_string.appfmt("no-nodeid-checks set in management server.\n"
-			  "node id must be set explicitly in connectstring");
-      error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-      DBUG_RETURN(false);
+  struct in_addr config_addr= {0};
+  if (clnt_addr)
+  {
+    const struct in_addr *clnt_in_addr = &((sockaddr_in*)clnt_addr)->sin_addr;
+
+    if (Ndb_getInAddr(&config_addr, config_hostname) != 0
+        || memcmp(&config_addr, clnt_in_addr, sizeof(config_addr)) != 0)
+    {
+      struct in_addr tmp_addr;
+      if (Ndb_getInAddr(&tmp_addr, "localhost") != 0
+          || memcmp(&tmp_addr, clnt_in_addr, sizeof(config_addr)) != 0)
+      {
+        // not localhost
+#if 0
+        ndbout << "MgmtSrvr::getFreeNodeId compare failed for \""
+               << config_hostname
+               << "\" id=" << tmp << endl;
+#endif
+        return -1;
+      }
+
+      // connecting through localhost
+      // check if config_hostname is local
+      if (!SocketServer::tryBind(0, config_hostname))
+        return -1;
     }
-    DBUG_RETURN(true);
   }
-  Guard g(m_node_id_mutex);
-  int no_mgm= 0;
-  NodeBitmask connected_nodes(m_reserved_nodes);
-  get_connected_nodes(connected_nodes);
+  else
   {
-    for(Uint32 i = 0; i < MAX_NODES; i++)
-      if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM)
-	no_mgm++;
+    if (!SocketServer::tryBind(0, config_hostname))
+      return -1;
   }
-  bool found_matching_id= false;
-  bool found_matching_type= false;
-  bool found_free_node= false;
-  unsigned id_found= 0;
-  const char *config_hostname= 0;
-  struct in_addr config_addr= {0};
-  int r_config_addr= -1;
-  unsigned type_c= 0;
+  return 0;
+}
+
+int
+MgmtSrvr::find_node_type(unsigned node_id, enum ndb_mgm_node_type type,
+                         const struct sockaddr *client_addr,
+                         NodeBitmask &nodes,
+                         NodeBitmask &exact_nodes,
+                         Vector<struct nodeid_and_host> &nodes_info,
+                         int &error_code, BaseString &error_string)
+{
+  const char *found_config_hostname= 0;
+  unsigned type_c= (unsigned)type;
+
+  Guard g(m_configMutex);
 
-  if(NdbMutex_Lock(m_configMutex))
-  {
-    // should not happen
-    error_string.appfmt("unable to lock configuration mutex");
-    error_code = NDB_MGM_ALLOCID_ERROR;
-    DBUG_RETURN(false);
-  }
   ndb_mgm_configuration_iterator
     iter(* _config->m_configValues, CFG_SECTION_NODE);
-  for(iter.first(); iter.valid(); iter.next()) {
-    unsigned tmp= 0;
-    if(iter.get(CFG_NODE_ID, &tmp)) require(false);
-    if (*nodeId && *nodeId != tmp)
-      continue;
-    found_matching_id= true;
-    if(iter.get(CFG_TYPE_OF_SECTION, &type_c)) require(false);
-    if(type_c != (unsigned)type)
-      continue;
-    found_matching_type= true;
-    if (connected_nodes.get(tmp))
+  for(iter.first(); iter.valid(); iter.next())
+  {
+    unsigned id;
+    if (iter.get(CFG_NODE_ID, &id))
+      require(false);
+    if (node_id && node_id != id)
       continue;
-    found_free_node= true;
-    if(iter.get(CFG_NODE_HOST, &config_hostname)) require(false);
-    if (config_hostname && config_hostname[0] == 0)
-      config_hostname= 0;
-    else if (client_addr) {
-      // check hostname compatability
-      const void *tmp_in= &(((sockaddr_in*)client_addr)->sin_addr);
-      if((r_config_addr= Ndb_getInAddr(&config_addr, config_hostname)) != 0
-	 || memcmp(&config_addr, tmp_in, sizeof(config_addr)) != 0) {
-	struct in_addr tmp_addr;
-	if(Ndb_getInAddr(&tmp_addr, "localhost") != 0
-	   || memcmp(&tmp_addr, tmp_in, sizeof(config_addr)) != 0) {
-	  // not localhost
-#if 0
-	  ndbout << "MgmtSrvr::getFreeNodeId compare failed for \""
-		 << config_hostname
-		 << "\" id=" << tmp << endl;
-#endif
-	  continue;
-	}
-	// connecting through localhost
-	// check if config_hostname is local
-	if (!SocketServer::tryBind(0,config_hostname)) {
-	  continue;
-	}
-      }
-    } else { // client_addr == 0
-      if (!SocketServer::tryBind(0,config_hostname)) {
-	continue;
+    if (iter.get(CFG_TYPE_OF_SECTION, &type_c))
+      require(false);
+    if (type_c != (unsigned)type)
+    {
+      if (!node_id)
+        continue;
+      goto error;
+    }
+    const char *config_hostname= 0;
+    if (iter.get(CFG_NODE_HOST, &config_hostname))
+      require(false);
+    if (config_hostname == 0 || config_hostname[0] == 0)
+    {
+      config_hostname= "";
+    }
+    else
+    {
+      found_config_hostname= config_hostname;
+      if (match_hostname(client_addr, config_hostname))
+      {
+        if (!node_id)
+          continue;
+        goto error;
       }
+      exact_nodes.set(id);
     }
-    if (*nodeId != 0 ||
-	type != NDB_MGM_NODE_TYPE_MGM ||
-	no_mgm == 1) { // any match is ok
-
-      if (config_hostname == 0 &&
-	  *nodeId == 0 &&
-	  type != NDB_MGM_NODE_TYPE_MGM)
-      {
-	if (!id_found) // only set if not set earlier
-	  id_found= tmp;
-	continue; /* continue looking for a nodeid with specified
-		   * hostname
-		   */
-      }
-      assert(id_found == 0);
-      id_found= tmp;
-      break;
-    }
-    if (id_found) { // mgmt server may only have one match
-      error_string.appfmt("Ambiguous node id's %d and %d.\n"
-			  "Suggest specifying node id in connectstring,\n"
-			  "or specifying unique host names in config file.",
-			  id_found, tmp);
-      NdbMutex_Unlock(m_configMutex);
-      error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-      DBUG_RETURN(false);
+    nodes.set(id);
+    struct nodeid_and_host a= {id, config_hostname};
+    nodes_info.push_back(a);
+    if (node_id)
+      break;
+  }
+  if (nodes_info.size() != 0)
+  {
+    return 0;
+  }
+
+ error:
+  /*
+    lock on m_configMutex held because found_config_hostname may have
+    reference inot config structure
+  */
+  error_code= NDB_MGM_ALLOCID_CONFIG_MISMATCH;
+  if (node_id)
+  {
+    if (type_c != (unsigned) type)
+    {
+      BaseString type_string, type_c_string;
+      const char *alias, *str;
+      alias= ndb_mgm_get_node_type_alias_string(type, &str);
+      type_string.assfmt("%s(%s)", alias, str);
+      alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c,
+                                                &str);
+      type_c_string.assfmt("%s(%s)", alias, str);
+      error_string.appfmt("Id %d configured as %s, connect attempted as %s.",
+                          node_id, type_c_string.c_str(),
+                          type_string.c_str());
+      return -1;
     }
-    if (config_hostname == 0) {
-      error_string.appfmt("Ambiguity for node id %d.\n"
-			  "Suggest specifying node id in connectstring,\n"
-			  "or specifying unique host names in config file,\n"
-			  "or specifying just one mgmt server in config file.",
-			  tmp);
-      NdbMutex_Unlock(m_configMutex);
-      error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-      DBUG_RETURN(false);
+    if (found_config_hostname)
+    {
+      struct in_addr config_addr= {0};
+      int r_config_addr= Ndb_getInAddr(&config_addr, found_config_hostname);
+      error_string.appfmt("Connection with id %d done from wrong host ip %s,",
+                          node_id, inet_ntoa(((struct sockaddr_in *)
+                                              (client_addr))->sin_addr));
+      error_string.appfmt(" expected %s(%s).", found_config_hostname,
+                          r_config_addr ?
+                          "lookup failed" : inet_ntoa(config_addr));
+      return -1;
     }
-    id_found= tmp; // mgmt server matched, check for more matches
+    error_string.appfmt("No node defined with id=%d in config file.", node_id);
+    return -1;
   }
-  NdbMutex_Unlock(m_configMutex);
 
-  if (id_found && client_addr != 0)
+  // node_id == 0 and nodes_info.size() == 0
+  if (found_config_hostname)
   {
-    int res = alloc_node_id_req(id_found, type);
-    unsigned save_id_found = id_found;
+    error_string.appfmt("Connection done from wrong host ip %s.",
+                        (client_addr)?
+                        inet_ntoa(((struct sockaddr_in *)
+                                   (client_addr))->sin_addr):"");
+    return -1;
+  }
+
+  error_string.append("No nodes defined in config file.");
+  return -1;
+}
+
+int
+MgmtSrvr::try_alloc(unsigned id, const char *config_hostname,
+                    enum ndb_mgm_node_type type,
+                    const struct sockaddr *client_addr,
+                    Uint32 timeout_ms)
+{
+  if (client_addr != 0)
+  {
+    int res = alloc_node_id_req(id, type, timeout_ms);
     switch (res)
     {
     case 0:
@@ -2372,155 +2399,176 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
       break;
     default:
       // something wrong
-      id_found = 0;
-      break;
-
-    }
-    if (id_found == 0)
-    {
-      char buf[128];
-      ndb_error_string(res, buf, sizeof(buf));
-      error_string.appfmt("Cluster refused allocation of id %d. Error: %d (%s).",
-			  save_id_found, res, buf);
-      g_eventLogger->warning("Cluster refused allocation of id %d. "
-                             "Connection from ip %s. "
-                             "Returned error string \"%s\"", save_id_found,
-                             inet_ntoa(((struct sockaddr_in *)
-                                        (client_addr))->sin_addr),
-                             error_string.c_str());
-      DBUG_RETURN(false);
+      return -1;
     }
   }
 
-  if (id_found)
+  DBUG_PRINT("info", ("allocating node id %d",id));
   {
-    *nodeId= id_found;
-    DBUG_PRINT("info", ("allocating node id %d",*nodeId));
+    int r= 0;
+    if (client_addr)
+    {
+      m_connect_address[id]= ((struct sockaddr_in *)client_addr)->sin_addr;
+    }
+    else if (config_hostname)
+    {
+      r= Ndb_getInAddr(&(m_connect_address[id]), config_hostname);
+    }
+    else
     {
-      int r= 0;
-      if (client_addr)
-	m_connect_address[id_found]=
-	  ((struct sockaddr_in *)client_addr)->sin_addr;
-      else if (config_hostname)
-	r= Ndb_getInAddr(&(m_connect_address[id_found]), config_hostname);
-      else {
-	char name[256];
-	r= gethostname(name, sizeof(name));
-	if (r == 0) {
-	  name[sizeof(name)-1]= 0;
-	  r= Ndb_getInAddr(&(m_connect_address[id_found]), name);
-	}
-      }
-      if (r)
-	m_connect_address[id_found].s_addr= 0;
-    }
-    m_reserved_nodes.set(id_found);
-    if (theFacade && id_found != theFacade->ownId())
-    {
-      /**
-       * Make sure we're ready to accept connections from this node
-       */
-      theFacade->lock_mutex();
-      theFacade->doConnect(id_found);
-      theFacade->unlock_mutex();
+      char name[256];
+      r= gethostname(name, sizeof(name));
+      if (r == 0)
+      {
+        name[sizeof(name)-1]= 0;
+        r= Ndb_getInAddr(&(m_connect_address[id]), name);
+      }
+    }
+    if (r)
+    {
+      m_connect_address[id].s_addr= 0;
     }
+  }
+  m_reserved_nodes.set(id);
+  if (theFacade && id != theFacade->ownId())
+  {
+    /**
+     * Make sure we're ready to accept connections from this node
+     */
+    theFacade->lock_mutex();
+    theFacade->doConnect(id);
+    theFacade->unlock_mutex();
+  }
     
-    char tmp_str[128];
-    m_reserved_nodes.getText(tmp_str);
-    g_eventLogger->info("Mgmt server state: nodeid %d reserved for ip %s, "
-                        "m_reserved_nodes %s.",
-                        id_found, get_connect_address(id_found), tmp_str);
+  char tmp_str[128];
+  m_reserved_nodes.getText(tmp_str);
+  g_eventLogger->info("Mgmt server state: nodeid %d reserved for ip %s, "
+                      "m_reserved_nodes %s.",
+                      id, get_connect_address(id), tmp_str);
+
+  return 0;
+}
+
+bool
+MgmtSrvr::alloc_node_id(NodeId * nodeId,
+			enum ndb_mgm_node_type type,
+			const struct sockaddr *client_addr,
+			SOCKET_SIZE_TYPE *client_addr_len,
+			int &error_code, BaseString &error_string,
+                        int log_event,
+                        int timeout_s)
+{
+  DBUG_ENTER("MgmtSrvr::alloc_node_id");
+  DBUG_PRINT("enter", ("nodeid: %d  type: %d  client_addr: 0x%ld",
+		       *nodeId, type, (long) client_addr));
+
+  if (g_no_nodeid_checks) {
+    if (*nodeId == 0) {
+      error_string.appfmt("no-nodeid-checks set in management server. "
+			  "node id must be set explicitly in connectstring");
+      error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
+      DBUG_RETURN(false);
+    }
     DBUG_RETURN(true);
   }
 
-  if (found_matching_type && !found_free_node) {
-    // we have a temporary error which might be due to that 
-    // we have got the latest connect status from db-nodes.  Force update.
-    updateStatus();
-  }
+  Uint32 timeout_ms = Uint32(1000 * timeout_s);
+
+  Guard g(m_node_id_mutex);
+
+  NodeBitmask connected_nodes;
+  get_connected_nodes(connected_nodes);
+
+  NodeBitmask nodes, exact_nodes;
+  Vector<struct nodeid_and_host> nodes_info;
+
+  /* find all nodes with correct type */
+  if (find_node_type(*nodeId, type, client_addr, nodes, exact_nodes, nodes_info,
+                     error_code, error_string))
+    goto error;
 
-  BaseString type_string, type_c_string;
+  // nodes_info.size() == 0 handled inside find_node_type
+  DBUG_ASSERT(nodes_info.size() != 0);
+
+  if (type == NDB_MGM_NODE_TYPE_MGM && nodes_info.size() > 1)
   {
-    const char *alias, *str;
-    alias= ndb_mgm_get_node_type_alias_string(type, &str);
-    type_string.assfmt("%s(%s)", alias, str);
-    alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c,
-					      &str);
-    type_c_string.assfmt("%s(%s)", alias, str);
+    // mgmt server may only have one match
+    error_string.appfmt("Ambiguous node id's %d and %d. "
+                        "Suggest specifying node id in connectstring, "
+                        "or specifying unique host names in config file.",
+                        nodes_info[0].id, nodes_info[1].id);
+    error_code= NDB_MGM_ALLOCID_CONFIG_MISMATCH;
+    goto error;
   }
 
-  if (*nodeId == 0)
+  /* remove connected and reserved nodes from possible nodes to allocate */
+  nodes.bitANDC(connected_nodes);
+  nodes.bitANDC(m_reserved_nodes);
+
+  /* first try all nodes with exact match of hostname */
+  for (Uint32 i = 0; i < nodes_info.size(); i++)
   {
-    if (found_matching_id)
+    unsigned id= nodes_info[i].id;
+    if (!nodes.get(id))
+      continue;
+
+    if (!exact_nodes.get(id))
+      continue;
+
+    const char *config_hostname= nodes_info[i].host.c_str();
+    if (!try_alloc(id, config_hostname, type, client_addr, timeout_ms))
     {
-      if (found_matching_type)
-      {
-	if (found_free_node)
-        {
-	  error_string.appfmt("Connection done from wrong host ip %s.",
-			      (client_addr)?
-                              inet_ntoa(((struct sockaddr_in *)
-					 (client_addr))->sin_addr):"");
-          error_code = NDB_MGM_ALLOCID_ERROR;
-        }
-	else
-        {
-	  error_string.appfmt("No free node id found for %s.",
-			      type_string.c_str());
-          error_code = NDB_MGM_ALLOCID_ERROR;
-        }
-      }
-      else
-      {
-	error_string.appfmt("No %s node defined in config file.",
-			    type_string.c_str());
-        error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-      }
+      // success
+      *nodeId= id;
+      DBUG_RETURN(true);
     }
-    else
+  }
+
+  /* now try the open nodes */
+  for (Uint32 i = 0; i < nodes_info.size(); i++)
+  {
+    unsigned id= nodes_info[i].id;
+    if (!nodes.get(id))
+      continue;
+
+    /**
+     * exact node tried in loop above
+     */
+    if (exact_nodes.get(id))
+      continue;
+
+    if (!try_alloc(id, NULL, type, client_addr, timeout_ms))
     {
-      error_string.append("No nodes defined in config file.");
-      error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
+      // success
+      *nodeId= id;
+      DBUG_RETURN(true);
     }
   }
+
+  /*
+    there are nodes with correct type available but
+    allocation failed for some reason
+  */
+  if (*nodeId)
+  {
+    error_string.appfmt("Id %d already allocated by another node.",
+                        *nodeId);
+  }
   else
   {
-    if (found_matching_id)
-    {
-      if (found_matching_type)
-      {
-	if (found_free_node)
-        {
-	  // have to split these into two since inet_ntoa overwrites itself
-	  error_string.appfmt("Connection with id %d done from wrong host ip %s,",
-			      *nodeId, inet_ntoa(((struct sockaddr_in *)
-						  (client_addr))->sin_addr));
-	  error_string.appfmt(" expected %s(%s).", config_hostname,
-			      r_config_addr ?
-			      "lookup failed" : inet_ntoa(config_addr));
-          error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-	}
-        else
-        {
-	  error_string.appfmt("Id %d already allocated by another node.",
-			      *nodeId);
-          error_code = NDB_MGM_ALLOCID_ERROR;
-        }
-      }
-      else
-      {
-	error_string.appfmt("Id %d configured as %s, connect attempted as %s.",
-			    *nodeId, type_c_string.c_str(),
-			    type_string.c_str());
-        error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-      }
-    }
-    else
-    {
-      error_string.appfmt("No node defined with id=%d in config file.",
-			  *nodeId);
-      error_code = NDB_MGM_ALLOCID_CONFIG_MISMATCH;
-    }
+    const char *alias, *str;
+    alias= ndb_mgm_get_node_type_alias_string(type, &str);
+    error_string.appfmt("No free node id found for %s(%s).",
+                        alias, str);
+  }
+  error_code = NDB_MGM_ALLOCID_ERROR;
+
+ error:
+  if (error_code != NDB_MGM_ALLOCID_CONFIG_MISMATCH)
+  {
+    // we have a temporary error which might be due to that
+    // we have got the latest connect status from db-nodes.  Force update.
+    updateStatus();
   }
 
   if (log_event || error_code == NDB_MGM_ALLOCID_CONFIG_MISMATCH)
@@ -2534,27 +2582,35 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
 			   : "<none>",
 			   error_string.c_str());
 
-    NodeBitmask connected_nodes2;
-    get_connected_nodes(connected_nodes2);
     BaseString tmp_connected, tmp_not_connected;
     for(Uint32 i = 0; i < MAX_NODES; i++)
     {
-      if (connected_nodes2.get(i))
+      if (connected_nodes.get(i))
       {
-	if (!m_reserved_nodes.get(i))
-	  tmp_connected.appfmt(" %d", i);
+        if (!m_reserved_nodes.get(i))
+        {
+          tmp_connected.appfmt("%d ", i);
+        }
       }
       else if (m_reserved_nodes.get(i))
       {
-	tmp_not_connected.appfmt(" %d", i);
+        tmp_not_connected.appfmt("%d ", i);
       }
     }
+
     if (tmp_connected.length() > 0)
-      g_eventLogger->info("Mgmt server state: node id's %s connected but not reserved", 
-			  tmp_connected.c_str());
+    {
+      g_eventLogger->info
+        ("Mgmt server state: node id's %sconnected but not reserved",
+         tmp_connected.c_str());
+    }
+
     if (tmp_not_connected.length() > 0)
-      g_eventLogger->info("Mgmt server state: node id's %s not connected but reserved",
-			  tmp_not_connected.c_str());
+    {
+      g_eventLogger->info
+        ("Mgmt server state: node id's %snot connected but reserved",
+         tmp_not_connected.c_str());
+    }
   }
   DBUG_RETURN(false);
 }
@@ -3091,3 +3147,4 @@ template class MutexVector<unsigned shor
 template class MutexVector<Ndb_mgmd_event_service::Event_listener>;
 template class Vector<EventSubscribeReq>;
 template class MutexVector<EventSubscribeReq>;
+template class Vector<MgmtSrvr::nodeid_and_host>;

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.hpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2009-01-08 14:35:49 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2009-02-18 14:29:58 +0000
@@ -399,10 +399,11 @@ public:
    */
   bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ;
   bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type,
-		     struct sockaddr *client_addr,
+		     const struct sockaddr *client_addr,
                      SOCKET_SIZE_TYPE *client_addr_len,
 		     int &error_code, BaseString &error_string,
-                     int log_event = 1);
+                     int log_event = 1,
+                     int timeout_s = 20);
   
   /**
    *
@@ -491,7 +492,9 @@ private:
    */
   int getBlockNumber(const BaseString &blockName);
 
-  int alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type);
+  int alloc_node_id_req(NodeId free_node_id,
+                        enum ndb_mgm_node_type type,
+                        Uint32 timeout_ms);
 
   int check_nodes_starting();
   int check_nodes_stopping();
@@ -643,6 +646,21 @@ private:
   Config *_props;
 
   ConfigRetriever *m_config_retriever;
+
+  struct nodeid_and_host
+  {
+    unsigned id;
+    BaseString host;
+  };
+  int find_node_type(unsigned node_id, enum ndb_mgm_node_type type,
+                     const struct sockaddr *client_addr,
+                     NodeBitmask &nodes,
+                     NodeBitmask &exact_nodes,
+                     Vector<nodeid_and_host> &nodes_info,
+                     int &error_code, BaseString &error_string);
+  int match_hostname(const struct sockaddr *, const char *) const;
+  int try_alloc(unsigned id,  const char *, enum ndb_mgm_node_type type,
+                const struct sockaddr *client_addr, Uint32 timeout_ms);
 };
 
 inline

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2009-01-23 11:03:00 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2009-03-12 10:45:04 +0000
@@ -509,7 +509,8 @@ MgmApiSession::get_nodeid(Parser_t::Cont
     while (!m_mgmsrv.alloc_node_id(&tmp, (enum ndb_mgm_node_type)nodetype, 
                                    (struct sockaddr*)&addr, &addrlen,
                                    error_code, error_string,
-                                   tick == 0 ? 0 : log_event))
+                                   tick == 0 ? 0 : log_event,
+                                   timeout))
     {
       /* NDB_MGM_ALLOCID_CONFIG_MISMATCH is a non retriable error */
       if (tick == 0 && error_code != NDB_MGM_ALLOCID_CONFIG_MISMATCH)

=== modified file 'storage/ndb/src/ndbapi/Ndb.cpp'
--- a/storage/ndb/src/ndbapi/Ndb.cpp	2008-12-09 18:59:54 +0000
+++ b/storage/ndb/src/ndbapi/Ndb.cpp	2009-02-16 12:05:36 +0000
@@ -1408,7 +1408,10 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* 
   if (initAutoIncrement() == -1)
     goto error_handler;
 
-  tConnection = this->startTransaction();
+  // Start transaction with table id as hint
+  tConnection = this->startTransaction(table,
+                                       (const char *) &aTableId,
+                                       sizeof(Uint32));
   if (tConnection == NULL)
     goto error_handler;
 

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2008-08-08 09:40:47 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2009-03-19 13:28:34 +0000
@@ -501,7 +501,7 @@ NdbTableImpl::init(){
   m_min_rows = 0;
   m_max_rows = 0;
   m_tablespace_name.clear();
-  m_tablespace_id = ~0;
+  m_tablespace_id = RNIL;
   m_tablespace_version = ~0;
   m_single_user_mode = 0;
 }
@@ -1413,7 +1413,7 @@ NdbDictionaryImpl::fetchGlobalTableImplR
                                  m_ndb.usingFullyQualifiedNames());
     else
       m_error.code = 4000;
-    if (impl != 0 && (obj.init(*impl)))
+    if (impl != 0 && (obj.init(this, *impl)))
     {
       delete impl;
       impl = 0;
@@ -2813,7 +2813,7 @@ NdbDictInterface::serializeTableDesc(Ndb
 
   const char *tablespace_name= impl.m_tablespace_name.c_str();
 loop:
-  if(impl.m_tablespace_id != ~(Uint32)0)
+  if(impl.m_tablespace_version != ~(Uint32)0)
   {
     tmpTab->TablespaceId = impl.m_tablespace_id;
     tmpTab->TablespaceVersion = impl.m_tablespace_version;
@@ -3950,7 +3950,7 @@ NdbDictionaryImpl::getEvent(const char *
   DBUG_PRINT("info",("table %s", ev->getTableName()));
   if (tab == NULL)
   {
-    tab= fetchGlobalTableImplRef(InitTable(this, ev->getTableName()));
+    tab= fetchGlobalTableImplRef(InitTable(ev->getTableName()));
     if (tab == 0)
     {
       DBUG_PRINT("error",("unable to find table %s", ev->getTableName()));
@@ -3964,7 +3964,7 @@ NdbDictionaryImpl::getEvent(const char *
     {
       DBUG_PRINT("info", ("mismatch on verison in cache"));
       releaseTableGlobal(*tab, 1);
-      tab= fetchGlobalTableImplRef(InitTable(this, ev->getTableName()));
+      tab= fetchGlobalTableImplRef(InitTable(ev->getTableName()));
       if (tab == 0)
       {
         DBUG_PRINT("error",("unable to find table %s", ev->getTableName()));
@@ -5066,7 +5066,7 @@ NdbFilegroupImpl::NdbFilegroupImpl(NdbDi
 {
   m_extent_size = 0;
   m_undo_buffer_size = 0;
-  m_logfile_group_id = ~0;
+  m_logfile_group_id = RNIL;
   m_logfile_group_version = ~0;
 }
 
@@ -5147,7 +5147,7 @@ NdbFileImpl::NdbFileImpl(NdbDictionary::
 {
   m_size = 0;
   m_free = 0;
-  m_filegroup_id = ~0;
+  m_filegroup_id = RNIL;
   m_filegroup_version = ~0;
 }
 
@@ -5311,34 +5311,160 @@ cmp_ndbrec_attr(const void *a, const voi
     return 1;
 }
 
+struct BitRange{
+  Uint64 start; /* First occupied bit */
+  Uint64 end; /* Last occupied bit */
+};
+
+static int
+cmp_bitrange(const void* a, const void* b)
+{
+  /* Sort them by start bit */
+  const BitRange& brA= *(const BitRange*)a;
+  const BitRange& brB= *(const BitRange*)b;
+
+  if (brA.start < brB.start)
+    return -1;
+  else if (brA.start == brB.start)
+    return 0;
+  else
+    return 1;
+}
+
+bool
+NdbDictionaryImpl::validateRecordSpec(const NdbDictionary::RecordSpecification *recSpec,
+                                      Uint32 length,
+                                      Uint32 flags) 
+{
+  /* We check that there's no overlap between any of the data values
+   * or Null bits
+   */
+  
+  /* Column data + NULL bits with at least 1 non nullable PK */
+  const Uint32 MaxRecordElements= (2* NDB_MAX_ATTRIBUTES_IN_TABLE) - 1;
+  Uint32 numElements= 0;
+  BitRange bitRanges[ MaxRecordElements ];
+
+  if (length > NDB_MAX_ATTRIBUTES_IN_TABLE)
+  {
+    m_error.code= 4548;
+    return false;
+  }
+  
+  /* Populate bitRanges array with ranges of bits occupied by 
+   * data values and null bits
+   */
+  for (Uint32 rs=0; rs < length; rs++)
+  {
+    const NdbDictionary::Column* col= recSpec[rs].column;
+    Uint64 elementByteOffset= recSpec[rs].offset;
+    Uint64 elementByteLength= col->getSizeInBytes();
+    Uint64 nullLength= col->getNullable() ? 1 : 0;
+
+    /* Blobs 'data' just occupies the size of an NdbBlob ptr */
+    const NdbDictionary::Column::Type type= col->getType();
+    const bool isBlob= 
+      (type == NdbDictionary::Column::Blob) || 
+      (type == NdbDictionary::Column::Text);
+
+    if (isBlob)
+    {
+      elementByteLength= sizeof(NdbBlob*);
+    }
+    
+    if ((type == NdbDictionary::Column::Bit) &&
+        (flags & NdbDictionary::RecMysqldBitfield))
+    {
+      /* MySQLD Bit format puts 'fractional' part of bit types 
+       * in with the null bits - so there's 1 optional Null 
+       * bit followed by n (max 7) databits, at position 
+       * given by the nullbit offsets.  Then the rest of
+       * the bytes go at the normal offset position.
+       */
+      Uint32 bitLength= col->getLength();
+      Uint32 fractionalBits= bitLength % 8;
+      nullLength+= fractionalBits;
+      elementByteLength= bitLength / 8;
+    }
+
+    /* Does the element itself have any bytes?
+     * (MySQLD bit format may have all data as 'null bits'
+     */
+    if (elementByteLength)
+    {
+      bitRanges[numElements].start= 8 * elementByteOffset;
+      bitRanges[numElements].end= (8 * (elementByteOffset + elementByteLength)) - 1;
+      
+      numElements++;
+    }
+
+    if (nullLength)
+    {
+      bitRanges[numElements].start= 
+        (8* recSpec[rs].nullbit_byte_offset) + 
+        recSpec[rs].nullbit_bit_in_byte;
+      bitRanges[numElements].end= bitRanges[numElements].start + 
+        (nullLength -1);
+
+      numElements++;
+    }
+  }
+  
+  /* Now sort the 'elements' by start bit */
+  qsort(bitRanges,
+        numElements,
+        sizeof(BitRange),
+        cmp_bitrange);
+
+  Uint64 endOfPreviousRange= bitRanges[0].end;
+
+  /* Now check that there's no overlaps */
+  for (Uint32 rangeNum= 1; rangeNum < numElements; rangeNum++)
+  {
+    if (unlikely((bitRanges[rangeNum].start <= endOfPreviousRange)))
+    {
+      /* Oops, this range overlaps with previous one */
+      m_error.code= 4547;
+      return false;
+    }
+    endOfPreviousRange= bitRanges[rangeNum].end;
+  }
+
+  /* All relevant ranges are distinct */
+  return true;
+}
+
 
 /* ndb_set_record_specification
  * This procedure sets the contents of the passed RecordSpecification
  * for the given column in the given table.
  * The column is placed at the storageOffset given, and a new
  * storageOffset, beyond the end of this column, is returned.
- * Null bits are stored at the start of the row, in attrid position.
- * Note that non nullable columns must therefore still have 
- * space reserved.
- * The caller must ensure that sufficient space is reserved before the 
- * offset of the first column.
+ * Null bits are stored at the start of the row in consecutive positions.
+ * The caller must ensure that enough space exists for all of the nullable
+ * columns, before the first bit of data.
  * The new storageOffset is returned.
  */
 static Uint32
 ndb_set_record_specification(Uint32 storageOffset,
                              Uint32 field_num,
+                             Uint32& nullableColNum,
                              NdbDictionary::RecordSpecification *spec,
                              NdbColumnImpl *col)
 {
   spec->column= col->m_facade;
 
   spec->offset= storageOffset;
-  Uint32 nextOffset= storageOffset + spec->column->getSizeInBytes();
-
+  /* For Blobs we just need the NdbBlob* */
+  const Uint32 sizeOfElement= col->getBlobType() ? 
+    sizeof(NdbBlob*) :
+    spec->column->getSizeInBytes();
+  
   if (spec->column->getNullable())
   {
-    spec->nullbit_byte_offset= (field_num >> 3);
-    spec->nullbit_bit_in_byte= (field_num & 7);
+    spec->nullbit_byte_offset= (nullableColNum >> 3);
+    spec->nullbit_bit_in_byte= (nullableColNum & 7);
+    nullableColNum ++;
   }
   else
   {
@@ -5346,7 +5472,7 @@ ndb_set_record_specification(Uint32 stor
     spec->nullbit_bit_in_byte= 0;
   }
 
-  return nextOffset;
+  return storageOffset + sizeOfElement;
 }
 
 
@@ -5406,7 +5532,26 @@ NdbDictionaryImpl::createDefaultNdbRecor
   /* Determine number of nullable columns */
   for (i=0; i<numCols; i++)
   {
-    if (tableOrIndex->m_columns[i]->m_nullable)
+    /* As the Index NdbRecord is built using Columns from the base table,
+     * it will get/set Null according to their Nullability.
+     * If this is an index, then we need to take the 'Nullability' from
+     * the base table column objects - unique index table column objects
+     * will not be nullable as they are part of the key.
+     */
+    const NdbColumnImpl* col= NULL;
+    
+    if (isIndex)
+    {
+      Uint32 baseTableColNum= 
+        tableOrIndex->m_index->m_columns[i]->m_keyInfoPos;
+      col= baseTableForIndex->m_columns[baseTableColNum];
+    }
+    else
+    {
+      col= tableOrIndex->m_columns[i];
+    }
+    
+    if (col->m_nullable)
       nullableCols ++;
   }
 
@@ -5424,6 +5569,8 @@ NdbDictionaryImpl::createDefaultNdbRecor
     return -1;
   }
   
+  Uint32 nullableColNum= 0;
+
   /* Build record specification array for this table. */
   for (i= 0; i < numCols; i++)
   {
@@ -5480,7 +5627,8 @@ NdbDictionaryImpl::createDefaultNdbRecor
     }
 
     offset= ndb_set_record_specification(offset, 
-                                         i, 
+                                         i,
+                                         nullableColNum,
                                          &spec[i], 
                                          col);
   }
@@ -5626,6 +5774,12 @@ NdbDictionaryImpl::createRecord(const Nd
     return NULL;
   }
 
+  if (!validateRecordSpec(recSpec, length, flags))
+  {
+    /* Error set in call */
+    return NULL;
+  }
+
   isIndex= (table->m_indexType==NdbDictionary::Object::OrderedIndex ||
             table->m_indexType==NdbDictionary::Object::UniqueHashIndex);
 
@@ -6140,7 +6294,7 @@ NdbDictionaryImpl::setNull(const NdbReco
       assert(attrIdIndex < (int)record->noOfColumns);
       NdbRecord::Attr attr= record->columns[attrIdIndex];
       
-      if (record->flags & NdbRecord::IsNullable)
+      if (attr.flags & NdbRecord::IsNullable)
       {
         if (value)
           *(row + attr.nullbit_byte_offset) |= 
@@ -6491,7 +6645,7 @@ NdbDictInterface::get_filegroup(NdbFileg
 		     DICT_WAITFOR_TIMEOUT, 100);
   if (r)
   {
-    dst.m_id = -1;
+    dst.m_id = RNIL;
     dst.m_version = ~0;
     
     DBUG_PRINT("info", ("get_filegroup failed dictSignal"));

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2008-11-11 11:40:42 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2009-03-16 23:49:57 +0000
@@ -627,15 +627,12 @@ class NdbDictionaryImpl;
 class GlobalCacheInitObject
 {
 public:
-  NdbDictionaryImpl *m_dict;
   const BaseString &m_name;
-  GlobalCacheInitObject(NdbDictionaryImpl *dict,
-                        const BaseString &name) :
-    m_dict(dict),
+  GlobalCacheInitObject(const BaseString &name) :
     m_name(name)
   {}
   virtual ~GlobalCacheInitObject() {}
-  virtual int init(NdbTableImpl &tab) const = 0;
+  virtual int init(NdbDictionaryImpl *dict, NdbTableImpl &tab) const = 0;
 };
 
 class NdbDictionaryImpl : public NdbDictionary::Dictionary {
@@ -744,6 +741,10 @@ public:
   int createDefaultNdbRecord(NdbTableImpl* tableOrIndex,
                              const NdbTableImpl* baseTableForIndex);
 
+  bool validateRecordSpec(const NdbDictionary::RecordSpecification *recSpec,
+                          Uint32 length,
+                          Uint32 flags);
+
   NdbRecord *createRecord(const NdbTableImpl *table,
                           const NdbDictionary::RecordSpecification *recSpec,
                           Uint32 length,
@@ -1109,15 +1110,14 @@ NdbDictionaryImpl::getImpl(const NdbDict
 class InitTable : public GlobalCacheInitObject
 {
 public:
-  InitTable(NdbDictionaryImpl *dict,
-            const BaseString &name) :
-    GlobalCacheInitObject(dict, name)
+  InitTable(const BaseString &name) :
+    GlobalCacheInitObject(name)
   {}
-  int init(NdbTableImpl &tab) const
+  int init(NdbDictionaryImpl *dict, NdbTableImpl &tab) const
   {
-    int res= m_dict->getBlobTables(tab);
+    int res= dict->getBlobTables(tab);
     if (res == 0)
-      res= m_dict->createDefaultNdbRecord(&tab, NULL);
+      res= dict->createDefaultNdbRecord(&tab, NULL);
     
     return res;
   }
@@ -1128,7 +1128,7 @@ NdbTableImpl *
 NdbDictionaryImpl::getTableGlobal(const char * table_name)
 {
   const BaseString internal_tabname(m_ndb.internalize_table_name(table_name));
-  return fetchGlobalTableImplRef(InitTable(this, internal_tabname));
+  return fetchGlobalTableImplRef(InitTable(internal_tabname));
 }
 
 inline
@@ -1167,7 +1167,7 @@ NdbDictionaryImpl::get_local_table_info(
   if (info == 0)
   {
     NdbTableImpl *tab=
-      fetchGlobalTableImplRef(InitTable(this, internalTableName));
+      fetchGlobalTableImplRef(InitTable(internalTableName));
     if (tab)
     {
       info= Ndb_local_table_info::create(tab, m_local_table_data_size);
@@ -1189,12 +1189,12 @@ public:
   InitIndex(const BaseString &internal_indexname,
 	    const char *index_name,
 	    const NdbTableImpl &prim) :
-    GlobalCacheInitObject(0, internal_indexname),
+    GlobalCacheInitObject(internal_indexname),
     m_index_name(index_name),
     m_prim(prim)
     {}
   
-  int init(NdbTableImpl &tab) const {
+  int init(NdbDictionaryImpl *dict, NdbTableImpl &tab) const {
     DBUG_ENTER("InitIndex::init");
     DBUG_ASSERT(tab.m_indexType != NdbDictionary::Object::TypeUndefined);
     /**
@@ -1210,7 +1210,7 @@ public:
       tab.m_index = idx;
 
       /* Finally, create default NdbRecord for this index */
-      DBUG_RETURN(m_dict->createDefaultNdbRecord(&tab, &m_prim));
+      DBUG_RETURN(dict->createDefaultNdbRecord(&tab, &m_prim));
     }
     DBUG_RETURN(1);
   }

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2008-11-11 11:40:42 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2009-03-17 00:51:41 +0000
@@ -623,6 +623,7 @@ NdbOperation::buildSignalsNdbRecord(Uint
   const char *key_row= m_key_row;
   const NdbRecord *attr_rec= m_attribute_record;
   const char *updRow;
+  const bool isScanTakeover= (key_rec == NULL);
 
   TcKeyReq *tcKeyReq= CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend());
   Uint32 hdrSize= fillTcKeyReqHdr(tcKeyReq, aTC_ConnectPtr, aTransId);
@@ -630,7 +631,7 @@ NdbOperation::buildSignalsNdbRecord(Uint
   remain= TcKeyReq::MaxKeyInfo;
 
   /* Fill in keyinfo (in TCKEYREQ signal, spilling into KEYINFO signals). */
-  if (!key_rec)
+  if (isScanTakeover)
   {
     /* This means that key_row contains the KEYINFO20 data. */
     /* i.e. lock takeover */
@@ -869,18 +870,72 @@ NdbOperation::buildSignalsNdbRecord(Uint
 
       if (likely(!(col->flags & (NdbRecord::IsBlob|NdbRecord::IsMysqldBitfield))))
       {
-        if (col->is_null(updRow))
-          length= 0;
-        else if (!col->get_var_length(updRow, length))
+        if (( ! (col->flags & NdbRecord::IsKey)) ||
+            ( isScanTakeover ) )
+        {
+          /* Normal path where we get data from the attr row 
+           * Always get ATTRINFO data from the attr row for ScanTakeover
+           * Update as there's no key row
+           * This allows scan-takeover update to update pk within
+           * collation rules
+           */
+          if (col->is_null(updRow))
+            length= 0;
+          else if (!col->get_var_length(updRow, length))
+          {
+            /* Hm, corrupt varchar length. */
+            setErrorCodeAbort(4209);
+            return -1;
+          }
+          data= &updRow[col->offset];
+        }
+        else
         {
-          /* Hm, corrupt varchar length. */
-          setErrorCodeAbort(4209);
-          return -1;
+          /* For Insert/Write where user provides PK columns,
+           * take them from the key record row to avoid sending different
+           * values in KeyInfo and AttrInfo
+           * Need the correct Attr struct from the key
+           * record
+           */
+          assert(key_rec != 0); /* Not scan takeover */
+          assert(key_rec->m_attrId_indexes_length > attrId);
+          int keyColIdx= key_rec->m_attrId_indexes[attrId];
+          assert(keyColIdx != -1);
+          col= &key_rec->columns[keyColIdx];
+          assert(col->attrId == attrId);
+          assert(col->flags & NdbRecord::IsKey);
+          
+          /* Now get the data and length from the key row 
+           * Any issues with key nullness should've been 
+           * caught above
+           */
+          assert(!col->is_null(key_row));
+          length= 0;
+          
+          bool len_ok;
+          
+          if (col->flags & NdbRecord::IsMysqldShrinkVarchar)
+          {
+            /* Used to support special varchar format for mysqld keys. 
+             * Ideally we'd avoid doing this shrink twice...
+             */
+            len_ok= col->shrink_varchar(key_row, length, buf);
+            data= buf;
+          }
+          else
+          {
+            len_ok= col->get_var_length(key_row, length);
+            data= &key_row[col->offset];
+          }
+          
+          /* Should have 'seen' any length issues when generating keyinfo above */
+          assert(len_ok); 
         }
-        data= &updRow[col->offset];
       }
       else
       {
+        /* Blob or MySQLD bitfield handling */
+        assert(! (col->flags & NdbRecord::IsKey));
         if (likely(col->flags & NdbRecord::IsMysqldBitfield))
         {
           /* Mysqld format bitfield. */

=== modified file 'storage/ndb/src/ndbapi/NdbReceiver.cpp'
--- a/storage/ndb/src/ndbapi/NdbReceiver.cpp	2008-02-19 15:00:29 +0000
+++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp	2009-03-16 15:08:09 +0000
@@ -85,6 +85,7 @@ NdbReceiver::init(ReceiverType type, boo
 
 void
 NdbReceiver::release(){
+  theMagicNumber = 0;
   NdbRecAttr* tRecAttr = theFirstRecAttr;
   while (tRecAttr != NULL)
   {

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2009-02-10 08:24:37 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2009-03-19 13:28:34 +0000
@@ -611,14 +611,12 @@ NdbIndexScanOperation::setBound(const Nd
     return -1;
   }
 
-  if (((bound.low_key == NULL) && (bound.high_key == NULL)) ||
-      ((bound.low_key_count == 0) && (bound.high_key_count == 0)))
-  {
-    /* IndexBound passed has no bound information */
-    setErrorCodeAbort(4541);
-    return -1;
-  }
-
+  /* Has the user supplied an open range (no bounds)? */
+  const bool openRange= (((bound.low_key == NULL) && 
+                          (bound.high_key == NULL)) ||
+                         ((bound.low_key_count == 0) && 
+                          (bound.high_key_count == 0)));
+  
   m_num_bounds++;
 
   if (unlikely((m_num_bounds > 1) &&
@@ -670,28 +668,68 @@ NdbIndexScanOperation::setBound(const Nd
     return -1;
   }
 
-  for (j= 0; j<key_count; j++)
+  if (likely(!openRange))
   {
-    Uint32 bound_type;
-    /* If key is part of lower bound */
-    if (bound.low_key && j<bound.low_key_count)
-    {
-      /* Inclusive if defined, or matching rows can include this value */
-      bound_type= bound.low_inclusive  || j+1 < bound.low_key_count ?
-        BoundLE : BoundLT;
-      ndbrecord_insert_bound(key_record, key_record->key_indexes[j],
-                             bound.low_key, bound_type);
-    }
-    /* If key is part of upper bound */
-    if (bound.high_key && j<bound.high_key_count)
-    {
-      /* Inclusive if defined, or matching rows can include this value */
-      bound_type= bound.high_inclusive  || j+1 < bound.high_key_count ?
-        BoundGE : BoundGT;
-      ndbrecord_insert_bound(key_record, key_record->key_indexes[j],
-                             bound.high_key, bound_type);
+    /* If low and high key pointers are the same and key counts are
+     * the same, we send as an Eq bound to save bandwidth.
+     * This will not send an EQ bound if :
+     *   - Different numbers of high and low keys are EQ
+     *   - High and low keys are EQ, but use different ptrs
+     * This could be improved in future with another setBound() variant.
+     */
+    const bool isEqRange= 
+      (bound.low_key == bound.high_key) &&
+      (bound.low_key_count == bound.high_key_count) &&
+      (bound.low_inclusive && bound.high_inclusive); // Does this matter?
+    
+    if (isEqRange)
+    {
+      /* Using BoundEQ will result in bound being sent only once */
+      for (j= 0; j<key_count; j++)
+      {
+        ndbrecord_insert_bound(key_record, key_record->key_indexes[j],
+                               bound.low_key, BoundEQ);
+      }
+    }
+    else
+    {
+      /* Distinct upper and lower bounds, must specify them independently */
+      /* Note :  Protocol allows individual columns to be specified as EQ
+       * or some prefix of columns.  This is not currently supported from
+       * NDBAPI.
+       */
+      for (j= 0; j<key_count; j++)
+      {
+        Uint32 bound_type;
+        /* If key is part of lower bound */
+        if (bound.low_key && j<bound.low_key_count)
+        {
+          /* Inclusive if defined, or matching rows can include this value */
+          bound_type= bound.low_inclusive  || j+1 < bound.low_key_count ?
+            BoundLE : BoundLT;
+          ndbrecord_insert_bound(key_record, key_record->key_indexes[j],
+                                 bound.low_key, bound_type);
+        }
+        /* If key is part of upper bound */
+        if (bound.high_key && j<bound.high_key_count)
+        {
+          /* Inclusive if defined, or matching rows can include this value */
+          bound_type= bound.high_inclusive  || j+1 < bound.high_key_count ?
+            BoundGE : BoundGT;
+          ndbrecord_insert_bound(key_record, key_record->key_indexes[j],
+                                 bound.high_key, bound_type);
+        }
+      }
     }
   }
+  else
+  {
+    /* Open range - all rows must be returned.
+     * To encode this, we'll request all rows where the first
+     * key column value is >= NULL
+     */
+    insert_open_bound(key_record);
+  }
 
   /* Set the length of this bound
    * Length = bound end - bound start
@@ -730,8 +768,11 @@ NdbIndexScanOperation::setBound(const Nd
                                     bound.low_key,
                                     bound.high_key,
                                     distkey_min))
+    {
+      assert(! openRange);
       setDistKeyFromRange(key_record, m_attribute_record,
                           bound.low_key, distkey_min);
+    }
   }
   return 0;
 } // ::setBound();
@@ -3001,6 +3042,33 @@ NdbIndexScanOperation::getCurrentKeySize
 }
 
 
+int
+NdbIndexScanOperation::insert_open_bound(const NdbRecord* key_record)
+{
+  /* We want to insert an open bound into a scan
+   * This is done by requesting all rows with first key column
+   * >= NULL (so, confusingly, bound is <= NULL)
+   * Sending this as bound info for an open bound allows us to 
+   * also send the range number etc so that MRR scans can include
+   * open ranges.
+   * Note that MRR scans with open ranges are an inefficient use of
+   * MRR.  Really the application should realise that all rows are
+   * being processed and only fetch them once.
+   */
+  const NdbRecord::Attr *column= &key_record->columns[0];
+  
+  /* Create NULL attribute header. */
+  AttributeHeader ah(column->index_attrId, 0);
+  
+  Uint32 buf[2] = { NdbIndexScanOperation::BoundLE, ah.m_value };
+  insertBOUNDS(buf, 2);
+  
+  theTupKeyLen+= 2;
+  
+  return 0;
+}
+
+
 /* IndexScan readTuples - part of old scan API
  * This call does the minimum amount of validation and state
  * storage possible.  Most of the scan initialisation is done

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2009-02-09 13:34:12 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2009-03-19 13:28:34 +0000
@@ -288,6 +288,7 @@ ErrorBundle ErrorCodes[] = {
   { 4348, DMEC, IE, "Inconsistency detected at alter index" },
   { 4349, DMEC, IE, "Inconsistency detected at index usage" },
   { 4350, DMEC, IE, "Transaction already aborted" },
+  { 4351, DMEC, TO, "Timeout/deadlock during index build" },
 
   /**
    * Application error
@@ -432,7 +433,8 @@ ErrorBundle ErrorCodes[] = {
   { 1513, DMEC, IE, "Filegroup not online" },
   { 1514, DMEC, SE, "Currently there is a limit of one logfile group" },
   { 1515, DMEC, SE, "Currently there is a 4G limit of one undo/data-file in 32-bit host" },
-  
+  { 1516, DMEC, SE, "File to small" },
+
   { 773,  DMEC, SE, "Out of string memory, please modify StringMemory config parameter" },
   { 775,  DMEC, SE, "Create file is not supported when Diskless=1" },
   { 776,  DMEC, AE, "Index created on temporary table must itself be temporary" },
@@ -587,7 +589,10 @@ ErrorBundle ErrorCodes[] = {
   { 4538, DMEC, AE, "NdbInterpretedCode instruction requires that table is set" },
   { 4539, DMEC, AE, "NdbInterpretedCode not supported for operation type" },
   { 4540, DMEC, AE, "Attempt to pass an Index column to createRecord.  Use base table columns only" },
-  { 4541, DMEC, AE, "IndexBound has no bound information" },
+  { 4541, DMEC, AE, "IndexBound has no bound information" }, // No longer generated
+  /* 4542-4546 used in later releases */
+  { 4547, DMEC, AE, "RecordSpecification has overlapping offsets" },
+  { 4548, DMEC, AE, "RecordSpecification has too many elements" },
 
   { 4200, DMEC, AE, "Status Error when defining an operation" },
   { 4201, DMEC, AE, "Variable Arrays not yet supported" },

=== modified file 'storage/ndb/test/include/HugoOperations.hpp'
--- a/storage/ndb/test/include/HugoOperations.hpp	2008-11-17 09:26:25 +0000
+++ b/storage/ndb/test/include/HugoOperations.hpp	2009-03-16 12:37:05 +0000
@@ -53,12 +53,14 @@ public:  
   int pkReadRecord(Ndb*,
                    int record,
                    int numRecords = 1,
-                   NdbOperation::LockMode lm = NdbOperation::LM_Read);
+                   NdbOperation::LockMode lm = NdbOperation::LM_Read,
+                   NdbOperation::LockMode * lmused = 0);
   
   int pkReadRandRecord(Ndb*,
                        int records,
                        int numRecords = 1,
-                       NdbOperation::LockMode lm = NdbOperation::LM_Read);
+                       NdbOperation::LockMode lm = NdbOperation::LM_Read,
+                       NdbOperation::LockMode * lmused = 0);
   
   int pkUpdateRecord(Ndb*,
 		     int recordNo,

=== modified file 'storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp'
--- a/storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp	2008-09-30 08:18:41 +0000
+++ b/storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp	2009-03-16 23:49:57 +0000
@@ -411,21 +411,41 @@ NDB_COMMAND(DbAsyncGenerator, "DbAsyncGe
     cols[1].offset= offsetof(TransactionData, permission);
     cols[1].nullbit_byte_offset= 0;
     cols[1].nullbit_bit_in_byte=  0;
-    cols[2].column= tab->getColumn((int) IND_GROUP_ALLOW_INSERT);
-    cols[2].offset= offsetof(TransactionData, permission);
-    cols[2].nullbit_byte_offset= 0;
-    cols[2].nullbit_bit_in_byte=  0;
-    cols[3].column= tab->getColumn((int) IND_GROUP_ALLOW_DELETE);
-    cols[3].offset= offsetof(TransactionData, permission);
-    cols[3].nullbit_byte_offset= 0;
-    cols[3].nullbit_bit_in_byte=  0;
 
-    ndbRecordSharedDataPtr->groupTableNdbRecord=
-      dict->createRecord(tab, cols, 4, sizeof(cols[0]), 0);
+    ndbRecordSharedDataPtr->groupTableAllowReadNdbRecord=
+      dict->createRecord(tab, cols, 2, sizeof(cols[0]), 0);
+
+    if (ndbRecordSharedDataPtr->groupTableAllowReadNdbRecord == NULL)
+    {
+      ndbout << "Error creating record 2.1: " << dict->getNdbError() << endl;
+      return -1;
+    }
+
+    cols[1].column= tab->getColumn((int) IND_GROUP_ALLOW_INSERT);
+    cols[1].offset= offsetof(TransactionData, permission);
+    cols[1].nullbit_byte_offset= 0;
+    cols[1].nullbit_bit_in_byte=  0;
+
+    ndbRecordSharedDataPtr->groupTableAllowInsertNdbRecord=
+      dict->createRecord(tab, cols, 2, sizeof(cols[0]), 0);
+
+    if (ndbRecordSharedDataPtr->groupTableAllowInsertNdbRecord == NULL)
+    {
+      ndbout << "Error creating record 2.2: " << dict->getNdbError() << endl;
+      return -1;
+    }
+
+    cols[1].column= tab->getColumn((int) IND_GROUP_ALLOW_DELETE);
+    cols[1].offset= offsetof(TransactionData, permission);
+    cols[1].nullbit_byte_offset= 0;
+    cols[1].nullbit_bit_in_byte=  0;
+
+    ndbRecordSharedDataPtr->groupTableAllowDeleteNdbRecord=
+      dict->createRecord(tab, cols, 2, sizeof(cols[0]), 0);
 
-    if (ndbRecordSharedDataPtr->groupTableNdbRecord == NULL)
+    if (ndbRecordSharedDataPtr->groupTableAllowDeleteNdbRecord == NULL)
     {
-      ndbout << "Error creating record 2: " << dict->getNdbError() << endl;
+      ndbout << "Error creating record 2.3: " << dict->getNdbError() << endl;
       return -1;
     }
 

=== modified file 'storage/ndb/test/ndbapi/bench/ndb_async2.cpp'
--- a/storage/ndb/test/ndbapi/bench/ndb_async2.cpp	2008-09-30 08:18:41 +0000
+++ b/storage/ndb/test/ndbapi/bench/ndb_async2.cpp	2009-03-16 23:49:57 +0000
@@ -345,7 +345,7 @@ T3_Callback_1(int result, NdbConnection 
   {
     char* rowPtr= (char*) &td->transactionData;
     const NdbRecord* record= td->ndbRecordSharedData->
-      groupTableNdbRecord;
+      groupTableAllowReadNdbRecord;
     Uint32 m=0;
     unsigned char* mask= (unsigned char*) &m;
     
@@ -673,7 +673,7 @@ T4_Callback_1(int result, NdbConnection 
   {
     char* rowPtr= (char*) &td->transactionData;
     const NdbRecord* record= td->ndbRecordSharedData->
-      groupTableNdbRecord;
+      groupTableAllowInsertNdbRecord;
     Uint32 m=0;
     unsigned char* mask= (unsigned char*) &m;
 
@@ -1011,7 +1011,7 @@ T5_Callback_1(int result, NdbConnection 
   {
     char* rowPtr= (char*) &td->transactionData;
     const NdbRecord* record= td->ndbRecordSharedData->
-      groupTableNdbRecord;
+      groupTableAllowDeleteNdbRecord;
     Uint32 m=0;
     unsigned char* mask= (unsigned char*) &m;
 

=== modified file 'storage/ndb/test/ndbapi/bench/testData.h'
--- a/storage/ndb/test/ndbapi/bench/testData.h	2008-09-30 08:18:41 +0000
+++ b/storage/ndb/test/ndbapi/bench/testData.h	2009-03-16 23:49:57 +0000
@@ -122,7 +122,9 @@ typedef struct {
 
 typedef struct {
   const struct NdbRecord* subscriberTableNdbRecord;
-  const struct NdbRecord* groupTableNdbRecord;
+  const struct NdbRecord* groupTableAllowReadNdbRecord;
+  const struct NdbRecord* groupTableAllowInsertNdbRecord;
+  const struct NdbRecord* groupTableAllowDeleteNdbRecord;
   const struct NdbRecord* sessionTableNdbRecord;
   const struct NdbInterpretedCode* incrServerReadsProg;
   const struct NdbInterpretedCode* incrServerInsertsProg;

=== modified file 'storage/ndb/test/ndbapi/testBlobs.cpp'
--- a/storage/ndb/test/ndbapi/testBlobs.cpp	2008-05-23 10:20:10 +0000
+++ b/storage/ndb/test/ndbapi/testBlobs.cpp	2009-03-16 23:49:57 +0000
@@ -203,7 +203,9 @@ static unsigned g_pk1_offset= 0;
 static unsigned g_pk2_offset= 0;
 static unsigned g_pk3_offset= 0;
 static unsigned g_blob1_offset= 0;
+static unsigned g_blob1_null_offset= 0;
 static unsigned g_blob2_offset= 0;
+static unsigned g_blob2_null_offset= 0;
 static unsigned g_rowsize= 0;
 static const char* g_tsName= "DEFAULT-TS";
 static Uint32 g_batchSize= 0;
@@ -345,7 +347,9 @@ initConstants()
   g_pk3_offset= g_pk2_offset + g_opt.m_pk2chr.m_totlen;
   g_blob1_offset= g_pk3_offset + 2;
   g_blob2_offset= g_blob1_offset + sizeof(NdbBlob *);
-  g_rowsize= g_blob2_offset + sizeof(NdbBlob *);
+  g_blob1_null_offset= g_blob2_offset + sizeof(NdbBlob *);
+  g_blob2_null_offset= g_blob1_null_offset + 1;
+  g_rowsize= g_blob2_null_offset + 1;
 }
 
 static int
@@ -580,6 +584,8 @@ createTable(int storageType)
   spec[0].offset= g_pk1_offset;
   spec[numpks].column= dict_table->getColumn("BL1");
   spec[numpks].offset= g_blob1_offset;
+  spec[numpks].nullbit_byte_offset= g_blob1_null_offset;
+  spec[numpks].nullbit_bit_in_byte= 0;
   if (g_opt.m_pk2chr.m_len != 0)
   {
     spec[1].column= dict_table->getColumn("PK2");
@@ -591,6 +597,8 @@ createTable(int storageType)
   {
     spec[numpks+1].column= dict_table->getColumn("BL2");
     spec[numpks+1].offset= g_blob2_offset;
+    spec[numpks+1].nullbit_byte_offset= g_blob2_null_offset;
+    spec[numpks+1].nullbit_bit_in_byte= 0;
   }
   CHK((g_key_record= g_dic->createRecord(dict_table, &spec[0], numpks,
                                          sizeof(spec[0]))) != 0);

=== modified file 'storage/ndb/test/ndbapi/testIndex.cpp'
--- a/storage/ndb/test/ndbapi/testIndex.cpp	2008-08-21 22:05:42 +0000
+++ b/storage/ndb/test/ndbapi/testIndex.cpp	2009-02-27 13:18:49 +0000
@@ -199,7 +199,10 @@ int create_index(NDBT_Context* ctx, int 
     ndbout << "FAILED!" << endl;
     const NdbError err = pNdb->getDictionary()->getNdbError();
     ERR(err);
-    if(err.classification == NdbError::ApplicationError)
+    if (err.classification == NdbError::ApplicationError)
+      return SKIP_INDEX;
+
+    if (err.status == NdbError::TemporaryError)
       return SKIP_INDEX;
     
     return NDBT_FAILED;

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2008-11-08 21:06:51 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2009-03-18 07:14:50 +0000
@@ -22,6 +22,7 @@
 #include <Vector.hpp>
 #include <random.h>
 #include <NdbTick.h>
+#include <my_sys.h>
 
 #define MAX_NDB_OBJECTS 32678
 
@@ -1982,6 +1983,600 @@ simpleReadAbortOnError(NDBT_Context* ctx
 }
 
 
+int
+testNdbRecordPkAmbiguity(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* NdbRecord Insert and Write can take 2 record and row ptrs
+   * In all cases, the AttrInfo sent to TC for PK columns
+   * should be the same as the KeyInfo sent to TC to avoid
+   * inconsistency
+   * Approach :
+   *   1) Use Insert/Write to insert tuple with different 
+   *      values for pks in attr row
+   *   2) Read back all data, including PKs
+   *   3) Verify all values.
+   */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab= ctx->getTab();
+  const NdbRecord* tabRec= pTab->getDefaultRecord();
+  const Uint32 sizeOfTabRec= NdbDictionary::getRecordRowLength(tabRec);
+  char keyRowBuf[ NDB_MAX_TUPLE_SIZE_IN_WORDS << 2 ];
+  char attrRowBuf[ NDB_MAX_TUPLE_SIZE_IN_WORDS << 2 ];
+  bzero(keyRowBuf, sizeof(keyRowBuf));
+  bzero(attrRowBuf, sizeof(attrRowBuf));
+
+  HugoCalculator calc(*pTab);
+
+  const int numRecords= 100;
+
+  for (int optype=0; optype < 2; optype++)
+  {
+    /* First, let's calculate the correct Hugo values for this row */
+
+    for (int record=0; record < numRecords; record++)
+    {
+      int updates= 0;
+      for (int col=0; col<pTab->getNoOfColumns(); col++)
+      {
+        char* valPtr= NdbDictionary::getValuePtr(tabRec,
+                                                 keyRowBuf,
+                                                 col);
+        CHECK(valPtr != NULL);
+        
+        int len= pTab->getColumn(col)->getSizeInBytes();
+        Uint32 real_len;
+        bool isNull= (calc.calcValue(record, col, updates, valPtr,
+                                     len, &real_len) == NULL);
+        if (pTab->getColumn(col)->getNullable())
+        {
+          NdbDictionary::setNull(tabRec,
+                                 keyRowBuf,
+                                 col,
+                                 isNull);
+        }
+      }
+      
+      /* Now copy the values to the Attr record */
+      memcpy(attrRowBuf, keyRowBuf, sizeOfTabRec);
+      
+      Uint32 mippleAttempts= 3;
+      
+      while (memcmp(keyRowBuf, attrRowBuf, sizeOfTabRec) == 0)
+      {
+        /* Now doctor the PK values in the Attr record */
+        for (int col=0; col<pTab->getNoOfColumns(); col++)
+        {
+          if (pTab->getColumn(col)->getPrimaryKey())
+          {
+            char* valPtr= NdbDictionary::getValuePtr(tabRec,
+                                                     attrRowBuf,
+                                                     col);
+            CHECK(valPtr != NULL);
+            
+            int len= pTab->getColumn(col)->getSizeInBytes();
+            Uint32 real_len;
+            /* We use the PK value for some other record */
+            int badRecord= record + (rand() % 1000);
+            bool isNull= (calc.calcValue(badRecord, col, updates, valPtr,
+                                         len, &real_len) == NULL);
+            CHECK(! isNull);
+          }
+        }
+        
+        /* Can try to get variance only a limited number of times */
+        CHECK(mippleAttempts-- != 0);
+      }
+      
+      /* Ok, now have key and attr records with different values for
+       * PK cols, let's try to insert
+       */
+      NdbTransaction* trans=pNdb->startTransaction();
+      CHECK(trans != 0);
+      
+      const NdbOperation* op= NULL;
+      if (optype == 0)
+      {
+        // ndbout << "Using insertTuple" << endl;
+        op= trans->insertTuple(tabRec,
+                               keyRowBuf,
+                               tabRec,
+                               attrRowBuf);
+      }
+      else
+      {
+        // ndbout << "Using writeTuple" << endl;
+        op= trans->writeTuple(tabRec,
+                              keyRowBuf,
+                              tabRec,
+                              attrRowBuf);
+      }
+      CHECK(op != 0);
+      
+      CHECK(trans->execute(Commit) == 0);
+      trans->close();
+      
+      /* Now read back */
+      memset(attrRowBuf, 0, sizeOfTabRec);
+      
+      Uint32 pkVal= 0;
+      pkVal= *(Uint32*) NdbDictionary::getValuePtr(tabRec,
+                                                   keyRowBuf,
+                                                   0);
+
+      trans= pNdb->startTransaction();
+      op= trans->readTuple(tabRec,
+                           keyRowBuf,
+                           tabRec,
+                           attrRowBuf);
+      CHECK(op != 0);
+      CHECK(trans->execute(Commit) == 0);
+      CHECK(trans->getNdbError().code == 0);
+      trans->close();
+      
+      /* Verify the values read back */
+      for (int col=0; col<pTab->getNoOfColumns(); col++)
+      {
+        const char* valPtr= NdbDictionary::getValuePtr(tabRec,
+                                                       attrRowBuf,
+                                                       col);
+        CHECK(valPtr != NULL);
+        
+        char calcBuff[ NDB_MAX_TUPLE_SIZE_IN_WORDS << 2 ];
+        int len= pTab->getColumn(col)->getSizeInBytes();
+        Uint32 real_len;
+        bool isNull= (calc.calcValue(record, col, updates, calcBuff,
+                                     len, &real_len) == NULL);
+        bool colIsNullable= pTab->getColumn(col)->getNullable();
+        if (isNull)
+        {
+          CHECK(colIsNullable);
+          if (!NdbDictionary::isNull(tabRec,
+                                     attrRowBuf,
+                                     col))
+          {
+            ndbout << "Error, col " << col 
+                   << " (pk=" <<  pTab->getColumn(col)->getPrimaryKey()
+                   << ") should be Null, but is not" << endl;
+            return NDBT_FAILED;
+          }
+        }
+        else
+        {
+          if (colIsNullable)
+          {
+            if (NdbDictionary::isNull(tabRec,
+                                      attrRowBuf,
+                                      col))
+            {
+              ndbout << "Error, col " << col 
+                     << " (pk=" << pTab->getColumn(col)->getPrimaryKey()
+                     << ") should be non-Null but is null" << endl;
+              return NDBT_FAILED;
+            };
+          }
+          
+          /* Compare actual data read back */
+          if( memcmp(calcBuff, valPtr, real_len) != 0 )
+          {
+            ndbout << "Error, col " << col 
+                   << " (pk=" << pTab->getColumn(col)->getPrimaryKey()
+                   << ") should be equal, but isn't for record "
+                   << record << endl;
+            ndbout << "Expected :";
+            for (Uint32 i=0; i < real_len; i++)
+            {
+              ndbout_c("%x ", calcBuff[i]);
+            }
+            ndbout << endl << "Received :";
+            for (Uint32 i=0; i < real_len; i++)
+            {
+              ndbout_c("%x ", valPtr[i]);
+            }
+            ndbout << endl;
+            
+            return NDBT_FAILED;
+          }
+        }
+      }
+      
+      /* Now delete the tuple */
+      trans= pNdb->startTransaction();
+      op= trans->deleteTuple(tabRec,
+                             keyRowBuf,
+                             tabRec);
+      CHECK(op != 0);
+      CHECK(trans->execute(Commit) == 0);
+      
+      trans->close();
+    }
+  }
+
+  return NDBT_OK;
+  
+}
+
+int
+testNdbRecordPKUpdate(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* In general, we should be able to update primary key
+   * values.  We cannot *change* them, but for cases where
+   * a collation maps several discrete values to a single
+   * normalised value, it should be possible to modify
+   * the discrete value of the key, as the normalised 
+   * key value is unchanged.
+   * Rather than testing with such a collation here, we 
+   * cop out and test for errors with a 'null' change.
+   */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab= ctx->getTab();
+  const NdbRecord* tabRec= pTab->getDefaultRecord();
+  char rowBuf[ NDB_MAX_TUPLE_SIZE_IN_WORDS << 2 ];
+  char badKeyRowBuf[ NDB_MAX_TUPLE_SIZE_IN_WORDS << 2 ];
+
+  HugoCalculator calc(*pTab);
+
+  const int numRecords= 100;
+
+  /* First, let's calculate the correct Hugo values for this row */
+  for (int record=0; record < numRecords; record++)
+  {
+    int updates= 0;
+    for (int col=0; col<pTab->getNoOfColumns(); col++)
+    {
+      char* valPtr= NdbDictionary::getValuePtr(tabRec,
+                                               rowBuf,
+                                               col);
+      CHECK(valPtr != NULL);
+      
+      int len= pTab->getColumn(col)->getSizeInBytes();
+      Uint32 real_len;
+      bool isNull= (calc.calcValue(record, col, updates, valPtr,
+                                   len, &real_len) == NULL);
+      if (pTab->getColumn(col)->getNullable())
+      {
+        NdbDictionary::setNull(tabRec,
+                               rowBuf,
+                               col,
+                               isNull);
+      }      
+    }
+
+    /* Create similar row, but with different id col (different
+     * PK from p.o.v. of PK column update
+     */
+    memcpy(badKeyRowBuf, rowBuf, NDB_MAX_TUPLE_SIZE_IN_WORDS << 2);
+    for (int col=0; col<pTab->getNoOfColumns(); col++)
+    {
+      if (calc.isIdCol(col))
+      {
+        char* valPtr= NdbDictionary::getValuePtr(tabRec,
+                                                 badKeyRowBuf,
+                                                 col);
+        Uint32 badId= record+333;
+        memcpy(valPtr, &badId, sizeof(badId));
+      }
+    }
+
+    NdbTransaction* trans=pNdb->startTransaction();
+    CHECK(trans != 0);
+    
+    const NdbOperation* op= trans->insertTuple(tabRec,
+                                               rowBuf);
+    CHECK(op != 0);
+    
+    CHECK(trans->execute(Commit) == 0);
+    trans->close();
+    
+    /* Now update the PK columns */
+    trans= pNdb->startTransaction();
+    op= trans->updateTuple(tabRec,
+                           rowBuf,
+                           tabRec,
+                           rowBuf);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    CHECK(trans->getNdbError().code == 0);
+    trans->close();
+
+    /* Now update PK with scan takeover op */
+    trans= pNdb->startTransaction();
+
+    NdbScanOperation* scanOp=trans->scanTable(tabRec,
+                                              NdbOperation::LM_Exclusive);
+    CHECK(scanOp != 0);
+    
+    CHECK(trans->execute(NoCommit) == 0);
+    
+    /* Now update PK with lock takeover op */
+    const char* rowPtr;
+    CHECK(scanOp->nextResult(&rowPtr, true, true) == 0);
+    
+    op= scanOp->updateCurrentTuple(trans,
+                                   tabRec,
+                                   rowBuf);
+    CHECK(op != NULL);
+    
+    CHECK(trans->execute(Commit) == 0);
+    
+    trans->close();
+
+    /* Now attempt bad PK update with lock takeover op 
+     * This is interesting as NDBAPI normally takes the
+     * value of PK columns in an update from the key
+     * row - so it's not possible to pass a 'different'
+     * value (except when collations are used).
+     * Scan Takeover update takes the PK values from the
+     * attribute record and so different values can 
+     * be supplied.
+     * Here we check that different values result in the
+     * kernel complaining.
+     */
+    trans= pNdb->startTransaction();
+
+    scanOp=trans->scanTable(tabRec,
+                            NdbOperation::LM_Exclusive);
+    CHECK(scanOp != 0);
+    
+    CHECK(trans->execute(NoCommit) == 0);
+    
+    /* Now update PK with lock takeover op */
+    CHECK(scanOp->nextResult(&rowPtr, true, true) == 0);
+    
+    op= scanOp->updateCurrentTuple(trans,
+                                   tabRec,
+                                   badKeyRowBuf);
+    CHECK(op != NULL);
+    
+    CHECK(trans->execute(Commit) == -1);
+    CHECK(trans->getNdbError().code == 897);
+
+    trans->close();
+
+    /* Now delete the tuple */
+    trans= pNdb->startTransaction();
+    op= trans->deleteTuple(tabRec,
+                           rowBuf,
+                           tabRec);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    
+    trans->close();
+  }
+
+  return NDBT_OK;
+  
+}
+
+static 
+BaseString getKeyVal(int record, bool upper)
+{
+  /* Create VARCHAR format key with upper or
+   * lower case leading char
+   */
+  BaseString keyData;
+  char c= 'a' + (record % ('z' - 'a'));
+  
+  keyData.appfmt("%cblahblah%d", c, record);
+  
+  if (upper)
+    keyData.ndb_toupper();
+
+  BaseString varCharKey;
+  varCharKey.appfmt("%c%s", keyData.length(), keyData.c_str());
+  
+  return varCharKey;
+}
+
+int
+testNdbRecordCICharPKUpdate(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Test a change to a CHAR primary key with a case insensitive
+   * collation.
+   */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab= ctx->getTab();
+  
+  /* Run as a 'T1' testcase - do nothing for other tables */
+  if (strcmp(pTab->getName(), "T1") != 0)
+    return NDBT_OK;
+
+  CHARSET_INFO* charset= NULL;
+  const char* csname="latin1_general_ci";
+  charset= get_charset_by_name(csname, MYF(0));
+  
+  if (charset == NULL)
+  {
+    ndbout << "Couldn't get charset " << csname << endl;
+    return NDBT_FAILED;
+  }
+
+  /* Create table with required schema */
+  NdbDictionary::Table tab;
+  tab.setName("TAB_CICHARPKUPD");
+  
+  NdbDictionary::Column pk;
+  pk.setName("PK");
+  pk.setType(NdbDictionary::Column::Varchar);
+  pk.setLength(20);
+  pk.setNullable(false);
+  pk.setPrimaryKey(true);
+  pk.setCharset(charset);
+  tab.addColumn(pk);
+
+  NdbDictionary::Column data;
+  data.setName("DATA");
+  data.setType(NdbDictionary::Column::Unsigned);
+  data.setNullable(false);
+  data.setPrimaryKey(false);
+  tab.addColumn(data);
+
+  pNdb->getDictionary()->dropTable(tab.getName());
+  if(pNdb->getDictionary()->createTable(tab) != 0)
+  {
+    ndbout << "Create table failed with error : "
+           << pNdb->getDictionary()->getNdbError().code
+           << pNdb->getDictionary()->getNdbError().message
+           << endl;
+    return NDBT_FAILED;
+  }
+  
+  ndbout << (NDBT_Table&)tab << endl;
+
+  pTab= pNdb->getDictionary()->getTable(tab.getName());
+  
+  const NdbRecord* tabRec= pTab->getDefaultRecord();
+  const Uint32 rowLen= NDB_MAX_TUPLE_SIZE_IN_WORDS << 2;
+  char ucRowBuf[ rowLen ];
+  char lcRowBuf[ rowLen ];
+  char readBuf[ rowLen ];
+  char* ucPkPtr= NdbDictionary::getValuePtr(tabRec,
+                                            ucRowBuf,
+                                            0);
+  Uint32* ucDataPtr= (Uint32*) NdbDictionary::getValuePtr(tabRec,
+                                                          ucRowBuf,
+                                                          1);
+  char* lcPkPtr= NdbDictionary::getValuePtr(tabRec,
+                                            lcRowBuf,
+                                            0);
+  Uint32* lcDataPtr= (Uint32*) NdbDictionary::getValuePtr(tabRec,
+                                                          lcRowBuf,
+                                                          1);
+
+  char* readPkPtr= NdbDictionary::getValuePtr(tabRec,
+                                              readBuf,
+                                              0);
+  Uint32* readDataPtr= (Uint32*) NdbDictionary::getValuePtr(tabRec,
+                                                            readBuf,
+                                                            1);
+    
+
+  const int numRecords= 100;
+  BaseString upperKey;
+  BaseString lowerKey;
+
+  for (int record=0; record < numRecords; record++)
+  {
+    upperKey.assign(getKeyVal(record, true).c_str());
+    lowerKey.assign(getKeyVal(record, false).c_str());
+    
+    memcpy(ucPkPtr, upperKey.c_str(), upperKey.length());
+    memcpy(lcPkPtr, lowerKey.c_str(), lowerKey.length());
+    memcpy(ucDataPtr, &record, sizeof(record));
+    memcpy(lcDataPtr, &record, sizeof(record));
+
+    /* Insert with upper case */
+    NdbTransaction* trans=pNdb->startTransaction();
+    CHECK(trans != 0);
+    
+    const NdbOperation* op= trans->insertTuple(tabRec,
+                                               ucRowBuf);
+    CHECK(op != 0);
+    
+    int rc= trans->execute(Commit);
+    if (rc != 0)
+      ndbout << "Error " << trans->getNdbError().message << endl;
+    CHECK(rc == 0);
+    trans->close();
+
+    /* Read with upper case */
+    trans=pNdb->startTransaction();
+    CHECK(trans != 0);
+    op= trans->readTuple(tabRec,
+                         ucRowBuf,
+                         tabRec,
+                         readBuf);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    trans->close();
+
+    /* Check key and data read */
+    CHECK(memcmp(ucPkPtr, readPkPtr, ucPkPtr[0]) == 0);
+    CHECK(memcmp(ucDataPtr, readDataPtr, sizeof(int)) == 0);
+    
+    memset(readBuf, 0, NDB_MAX_TUPLE_SIZE_IN_WORDS << 2);
+
+    /* Read with lower case */
+    trans=pNdb->startTransaction();
+    CHECK(trans != 0);
+    op= trans->readTuple(tabRec,
+                         lcRowBuf,
+                         tabRec,
+                         readBuf);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    trans->close();
+
+    /* Check key and data read */
+    CHECK(memcmp(ucPkPtr, readPkPtr, ucPkPtr[0]) == 0);
+    CHECK(memcmp(ucDataPtr, readDataPtr, sizeof(int)) == 0);
+    
+    memset(readBuf, 0, NDB_MAX_TUPLE_SIZE_IN_WORDS << 2);
+
+    /* Now update just the PK column to lower case */
+    trans= pNdb->startTransaction();
+    unsigned char mask[1];
+    mask[0]= 1;
+    op= trans->updateTuple(tabRec,
+                           lcRowBuf,
+                           tabRec,
+                           lcRowBuf,
+                           mask);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    CHECK(trans->getNdbError().code == 0);
+    trans->close();
+
+    /* Now check that we can read with the upper case key */
+    memset(readBuf, 0, NDB_MAX_TUPLE_SIZE_IN_WORDS << 2);
+    
+    trans=pNdb->startTransaction();
+    CHECK(trans != 0);
+    op= trans->readTuple(tabRec,
+                         ucRowBuf,
+                         tabRec,
+                         readBuf);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    trans->close();
+
+    /* Check key and data read */
+    CHECK(memcmp(lcPkPtr, readPkPtr, lcPkPtr[0]) == 0);
+    CHECK(memcmp(lcDataPtr, readDataPtr, sizeof(int)) == 0);
+
+    /* Now check that we can read with the lower case key */
+    memset(readBuf, 0, NDB_MAX_TUPLE_SIZE_IN_WORDS << 2);
+    
+    trans=pNdb->startTransaction();
+    CHECK(trans != 0);
+    op= trans->readTuple(tabRec,
+                         lcRowBuf,
+                         tabRec,
+                         readBuf);
+    CHECK(op != 0);
+    CHECK(trans->execute(Commit) == 0);
+    trans->close();
+
+    /* Check key and data read */
+    CHECK(memcmp(lcPkPtr, readPkPtr, lcPkPtr[0]) == 0);
+    CHECK(memcmp(lcDataPtr, readDataPtr, sizeof(int)) == 0);
+
+
+    /* Now delete the tuple */
+    trans= pNdb->startTransaction();
+    op= trans->deleteTuple(tabRec,
+                           ucRowBuf,
+                           tabRec);
+     CHECK(op != 0);
+     CHECK(trans->execute(Commit) == 0);
+   
+     trans->close();
+  }
+
+  return NDBT_OK;
+  
+}
+
+
 NDBT_TESTSUITE(testNdbApi);
 TESTCASE("MaxNdb", 
 	 "Create Ndb objects until no more can be created\n"){ 
@@ -2102,6 +2697,18 @@ TESTCASE("SimpleReadAbortOnError",
          "Test behaviour of Simple reads with Abort On Error"){
   INITIALIZER(simpleReadAbortOnError);
 }
+TESTCASE("NdbRecordPKAmbiguity",
+         "Test behaviour of NdbRecord insert with ambig. pk values"){
+  INITIALIZER(testNdbRecordPkAmbiguity);
+}
+TESTCASE("NdbRecordPKUpdate",
+         "Verify that primary key columns can be updated"){
+  INITIALIZER(testNdbRecordPKUpdate);
+}
+TESTCASE("NdbRecordCICharPKUpdate",
+         "Verify that a case-insensitive char pk column can be updated"){
+  INITIALIZER(testNdbRecordCICharPKUpdate);
+}
 NDBT_TESTSUITE_END(testNdbApi);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testTransactions.cpp'
--- a/storage/ndb/test/ndbapi/testTransactions.cpp	2008-11-08 20:45:48 +0000
+++ b/storage/ndb/test/ndbapi/testTransactions.cpp	2009-03-18 06:26:50 +0000
@@ -48,6 +48,17 @@ struct OperationTestCase {
 
 #define X -1
 
+/**
+ * //XX1 - SimpleRead can read either of primary/backup replicas
+ *         but uses locks. 
+ *         This means that combination of S-READ and ReadEx/ScanEx
+ *         will yield different result depending on which TC-node the S-READ
+ *         is started...
+ *
+ *         NOTE: S-READ vs DML is not unpredictable as DML locks both replicas
+ *        
+ *         Therefor those combinations are removed from the matrix
+ */
 OperationTestCase matrix[] = {
   { "ReadRead",         true, "READ",   1, "READ",      0, 1,   0, 1 },
   { "ReadReadEx",       true, "READ",   1, "READ-EX", 266, X,   0, 1 },
@@ -96,7 +107,7 @@ OperationTestCase matrix[] = {
 
   { "ScanExRead",       true, "SCAN-EX",1, "READ",    266, 1,   0, 1 },
   { "ScanExReadEx",     true, "SCAN-EX",1, "READ-EX", 266, 1,   0, 1 },
-  { "ScanExSimpleRead", true, "SCAN-EX",1, "S-READ",  266, 1,   0, 1 },
+//XX1  { "ScanExSimpleRead", true, "SCAN-EX",1, "S-READ",  266, 1,   0, 1 },
   { "ScanExDirtyRead",  true, "SCAN-EX",1, "D-READ",    0, 1,   0, 1 },
   { "ScanExInsert",     true, "SCAN-EX",1, "INSERT",  266, X,   0, 1 },
   { "ScanExUpdate",     true, "SCAN-EX",1, "UPDATE",  266, 2,   0, 1 },
@@ -128,7 +139,7 @@ OperationTestCase matrix[] = {
 
   { "ReadExRead",       true, "READ-EX",1, "READ",    266, X,   0, 1 },
   { "ReadExReadEx",     true, "READ-EX",1, "READ-EX", 266, X,   0, 1 },
-  { "ReadExSimpleRead", true, "READ-EX",1, "S-READ",  266, X,   0, 1 },
+//XX1  { "ReadExSimpleRead", true, "READ-EX",1, "S-READ",  266, X,   0, 1 },
   { "ReadExDirtyRead",  true, "READ-EX",1, "D-READ",    0, 1,   0, 1 },
   { "ReadExInsert",     true, "READ-EX",1, "INSERT",  266, X,   0, 1 },
   { "ReadExUpdate",     true, "READ-EX",1, "UPDATE",  266, X,   0, 1 },

=== modified file 'storage/ndb/test/ndbapi/test_event.cpp'
--- a/storage/ndb/test/ndbapi/test_event.cpp	2008-11-11 08:56:28 +0000
+++ b/storage/ndb/test/ndbapi/test_event.cpp	2009-03-13 12:44:27 +0000
@@ -23,6 +23,7 @@
 #include <NdbRestarts.hpp>
 #include <signaldata/DumpStateOrd.hpp>
 #include <NdbEnv.h>
+#include <Bitmask.hpp>
 
 #define GETNDB(ps) ((NDBT_NdbApiStep*)ps)->getNdb()
 
@@ -1691,24 +1692,70 @@ static int runMulti_NR(NDBT_Context* ctx
   DBUG_RETURN(NDBT_OK);
 }
 
+typedef Bitmask<(MAX_NDB_NODES + 31) / 32> NdbNodeBitmask;
+
+static
+int 
+restartNodes(NdbNodeBitmask mask)
+{
+  int cnt = 0;
+  int nodes[MAX_NDB_NODES];
+  NdbRestarter res;
+  for (Uint32 i = 0; i<MAX_NDB_NODES; i++)
+  {
+    if (mask.get(i))
+    {
+      nodes[cnt++] = i;
+      res.restartOneDbNode(i,
+                           /** initial */ false,
+                           /** nostart */ true,
+                           /** abort   */ true);
+    }
+  }
+
+  int result;
+  if (res.waitNodesNoStart(nodes, cnt) != 0)
+    return NDBT_FAILED;
+
+  res.startNodes(nodes, cnt);
+
+  return res.waitClusterStarted();
+}
+
 static int restartAllNodes()
 {
   NdbRestarter restarter;
-  int id = 0;
-  do {
-    int nodeId = restarter.getDbNodeId(id++);
-    ndbout << "Restart node " << nodeId << endl; 
-    if(restarter.restartOneDbNode(nodeId, false, false, true) != 0){
-      g_err << "Failed to restartNextDbNode" << endl;
-      break;
-    }    
-    if(restarter.waitClusterStarted(60) != 0){
-      g_err << "Cluster failed to start" << endl;
-      break;
+  NdbNodeBitmask ng;
+  NdbNodeBitmask nodes0;
+  NdbNodeBitmask nodes1;
+
+  /**
+   * Restart all nodes using two restarts
+   *   instead of one by one...as this takes to long
+   */
+  for (Uint32 i = 0; i<restarter.getNumDbNodes(); i++)
+  {
+    int nodeId = restarter.getDbNodeId(i);
+    if (ng.get(restarter.getNodeGroup(nodeId)) == false)
+    {
+      nodes0.set(nodeId);
+      ng.set(restarter.getNodeGroup(nodeId));
     }
-    id = id % restarter.getNumDbNodes();
-  } while (id);
-  return id != 0;
+    else
+    {
+      nodes1.set(nodeId);
+    }
+  }
+
+  int res;
+  if ((res = restartNodes(nodes0)) != NDBT_OK)
+  {
+    return res;
+  }
+  
+
+  res = restartNodes(nodes1);
+  return res;
 }
 
 static int runCreateDropNR(NDBT_Context* ctx, NDBT_Step* step)

=== added file 'storage/ndb/test/run-test/check-tests.sh'
--- a/storage/ndb/test/run-test/check-tests.sh	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/check-tests.sh	2009-03-18 09:51:56 +0000
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+set -e
+
+files="daily-basic-tests.txt daily-devel-tests.txt upgrade-tests.txt"
+
+die(){
+    echo "error at $1 : $2"
+    exit 1
+}
+
+check_state(){
+    if  [ $1 != $2 ]
+    then
+	die $3 $4
+    fi
+}
+
+check_file(){
+    file=$1
+    lineno=0
+    testcase=0
+
+    echo -n "-- checking $file..."
+    cat $file | awk '{ print "^" $0 "$";}' | while read line
+    do
+	lineno=$(expr $lineno + 1)
+	if [ $(echo $line | grep -c "^^#") -ne 0 ]
+	then
+	    continue
+	fi
+	
+	case "$line" in
+	    ^max-time*)
+		testcase=$(expr $testcase + 1);;
+	    ^cmd*)
+		if [ $(echo $line | wc -w) -ne 2 ]
+		then
+		    die $file $lineno
+		fi
+		testcase=$(expr $testcase + 2);;
+	    ^args*)
+		testcase=$(expr $testcase + 4);;
+	    ^type*)
+		;;
+	    ^$) 
+                if [ $testcase -ne 7 ]
+		then
+		    die $file $lineno
+		else
+		    testcase=0
+		    cnt=$(expr $cnt + 1)
+		fi;;
+	    *)
+	        die $file $lineno
+	esac
+   done
+   echo "ok"
+}
+   
+for file in $files
+do
+    check_file $file
+done

=== modified file 'storage/ndb/test/run-test/conf-dl145a.cnf'
--- a/storage/ndb/test/run-test/conf-dl145a.cnf	2007-06-04 08:32:32 +0000
+++ b/storage/ndb/test/run-test/conf-dl145a.cnf	2009-02-17 07:52:13 +0000
@@ -24,3 +24,6 @@ SendBufferMemory = 2M
 NoOfFragmentLogFiles = 4
 FragmentLogFileSize = 64M
 
+SharedGlobalMemory=256M
+InitialLogfileGroup=undo_buffer_size=64M;undofile01.dat:256M;undofile02.dat:128M
+InitialTablespace=datafile01.dat:128M;datafile02.dat:64M

=== modified file 'storage/ndb/test/run-test/conf-ndbmaster.cnf'
--- a/storage/ndb/test/run-test/conf-ndbmaster.cnf	2007-02-13 01:38:54 +0000
+++ b/storage/ndb/test/run-test/conf-ndbmaster.cnf	2009-02-17 07:52:13 +0000
@@ -21,3 +21,7 @@ BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
 MaxNoOfSavedMessages= 1000
 SendBufferMemory = 2M
+
+SharedGlobalMemory=256M
+InitialLogfileGroup=undo_buffer_size=64M;undofile01.dat:256M;undofile02.dat:128M
+InitialTablespace=datafile01.dat:128M;datafile02.dat:64M

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2009-02-04 13:08:05 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2009-03-19 13:28:34 +0000
@@ -121,10 +121,6 @@ args: -n NoCommitSleep T6 D1 D2
 
 max-time: 500
 cmd: testBasic
-args: -n NoCommit626 T6 D1 D2
-
-max-time: 500
-cmd: testBasic
 args: -n NoCommitAndClose T6 D1 D2
 
 max-time: 500
@@ -255,10 +251,30 @@ max-time: 500
 cmd: testBasic
 args: -n Bug20535
 
+#
+# INDEX
+#
+max-time: 1500
+cmd: testIndex
+args: -n CreateAll T1 T13 T14
+
+max-time: 3600
+cmd: testIndex
+args: -n InsertDelete T1 
+
+max-time: 3600
+cmd: testIndex
+args: -n CreateLoadDrop T1 
+
 max-time: 500
 cmd: testIndex
-args: -n Bug25059 -r 3000 T1
+args: -n MixedTransaction T1 
+
+max-time: 2500
+cmd: testIndex
+args: -n BuildDuring T6 
 
+#
 # SCAN TESTS
 #
 max-time: 500
@@ -517,10 +533,6 @@ max-time: 500
 cmd: testNodeRestart
 args: -n Bug15685 T1
 
-max-time: 500
-cmd: testNodeRestart
-args: -n Bug16772 T1
-
 #max-time: 500
 #cmd: testSystemRestart
 #args: -n Bug18385 T1
@@ -581,22 +593,6 @@ max-time: 3000
 cmd: testNodeRestart
 args: -n Bug25984 T1
 
-max-time: 1000
-cmd: testNodeRestart
-args: -n Bug26457 T1
-
-max-time: 1000
-cmd: testNodeRestart
-args: -n Bug26481 T1
-
-max-time: 1000
-cmd: testNodeRestart
-args: -n Bug28023 T6 D2
-
-max-time: 1000
-cmd: testNodeRestart
-args: -n Bug29364 T1
-
 max-time: 300
 cmd: testNodeRestart
 args: -n Bug32160 T1
@@ -635,14 +631,6 @@ max-time: 1500
 cmd: testDict
 args: -n CreateInvalidTables 
 
-max-time: 1500
-cmd: testDict
-args: -n CreateTableWhenDbIsFull T6 
-
-max-time: 1500
-cmd: testDict
-args: -n CreateMaxTables T6 
-
 max-time: 500
 cmd: testDict
 args: -n FragmentTypeSingle T1 
@@ -669,10 +657,6 @@ args: -n Bug21755 T1
 
 max-time: 1500
 cmd: testDict
-args: -l 25 -n DictRestart T1
-
-max-time: 1500
-cmd: testDict
 args: -n TableAddAttrs
 
 max-time: 1500
@@ -702,10 +686,6 @@ args: -n MaxTransactions T1 T6 T13 
 
 max-time: 500
 cmd: testNdbApi
-args: -n MaxOperations T1 T6 T13 
-
-max-time: 500
-cmd: testNdbApi
 args: -n MaxGetValue T1 T6 T13 
 
 max-time: 500
@@ -777,14 +757,26 @@ cmd: testNdbApi
 args: -n SimpleReadAbortOnError T1 T6 T15
 
 max-time: 500
+cmd: testNdbApi
+args: -n NdbRecordPKAmbiguity T1 T6 T15
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbRecordPKUpdate T1 T6 T15
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbRecordCICharPKUpdate T1 
+
+max-time: 500
 cmd: testInterpreter
 args: T1 
 
-max-time: 150000
+max-time: 7200
 cmd: testOperations
 args:
 
-max-time: 15000
+max-time: 7200
 cmd: testTransactions
 args:
 
@@ -840,57 +832,62 @@ max-time: 1500
 cmd: testSystemRestart
 args: -n basic T1 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR1 T1 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR1 T6 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR1 D1
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR1 D2 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR2 T1 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR2 T6 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR2 D1
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR2 D2 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR_UNDO T1 
 
-max-time: 1500
+max-time: 5000
 cmd: testSystemRestart
 args: -n SR_UNDO T6 
 
-max-time: 1000
-cmd: testSRBank
-args: -n SR -l 300 -r 15 T1
+#
+max-time: 5000
+cmd: testSystemRestart
+args: -l 1 -n SR6 T1 
 
-max-time: 1000
-cmd: testSRBank
-args: -n NR -l 300 -r 15 T1
+max-time: 5000
+cmd: testSystemRestart
+args: -l 1 -n SR7 T1 
 
-max-time: 1000
-cmd: testSRBank
-args: -n Mix -l 300 -r 15 T1
+max-time: 5000
+cmd: testSystemRestart
+args: -l 1 -n SR8 T1 
+
+max-time: 5000
+cmd: testSystemRestart
+args: -l 1 -n SR9 T1 
 
 max-time: 300
 cmd: testNodeRestart
@@ -916,10 +913,93 @@ max-time: 1000
 cmd: test_event
 args: -l 10 -n Bug27169 T1
 
+#
+max-time: 600
+cmd: test_event_merge
+args: --no-implicit-nulls --separate-events --blob-version 1
+
+#
+max-time: 600
+cmd: test_event_merge
+args: --no-implicit-nulls --separate-events
+
+#
+max-time: 600
+cmd: test_event_merge
+args: --no-implicit-nulls --no-multiops --blob-version 1
+
+#
+max-time: 600
+cmd: test_event_merge
+args: --no-implicit-nulls --no-multiops
+
+max-time: 600
+cmd: testBasic
+args: -n PkRead T1
+
 max-time: 300
 cmd: testNodeRestart
 args: -n Bug31980 T1
 
+max-time: 2500
+cmd: testNodeRestart
+args: -n CommittedRead T1
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNode T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n LateCommit T1
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartMasterNodeError T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodes T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodesAbort T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodesError9999 T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNodeInitial T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n NoLoad T6
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n TwoNodeFailure T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n TwoMasterNodeFailure T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FiftyPercentFail T6 T13 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNodeError T6 T13 
+
+#
+# MGMAPI AND MGSRV
+#
+max-time: 1800
+cmd: testMgm
+args: -n SingleUserMode T1 
+
 # OLD FLEX
 max-time: 500
 cmd: flexBench
@@ -929,6 +1009,10 @@ max-time: 500
 cmd: flexHammer
 args: -r 5 -t 32 
 
+max-time: 2500
+cmd: testNodeRestart
+args: -n NF_Hammer -r 5 T1
+
 max-time: 300
 cmd: DbCreate
 args:
@@ -972,10 +1056,6 @@ args: -n ApiTimeoutBasic T1
 
 max-time: 120
 cmd: testMgm
-args: -n ApiSessionFailure T1
-
-max-time: 120
-cmd: testMgm
 args: -n ApiGetStatusTimeout T1
 
 max-time: 120
@@ -1078,7 +1158,7 @@ max-time: 300
 cmd: test_event
 args: -n Bug31701 T1
 
-max-time: 300
+max-time: 600
 cmd: testSystemRestart
 args: -n Bug22696 T1
 
@@ -1099,10 +1179,6 @@ cmd: testNodeRestart
 args: -n mixedmultiop T1 I2 I3 D2
 
 max-time: 600
-cmd: test_event
-args: -l 1 -n SubscribeNR T1
-
-max-time: 600
 cmd: testNodeRestart
 args: -n Bug34702 T1
 

=== modified file 'storage/ndb/test/run-test/daily-devel-tests.txt'
--- a/storage/ndb/test/run-test/daily-devel-tests.txt	2008-12-03 19:44:54 +0000
+++ b/storage/ndb/test/run-test/daily-devel-tests.txt	2009-03-17 15:42:22 +0000
@@ -1,33 +1,41 @@
 #
-# INDEX
+# BACKUP
 #
+max-time: 1000
+cmd: atrt-testBackup
+args: -n BackupBank T6 
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxOperations T1 T6 T13 
+
 max-time: 1500
-cmd: testIndex
-args: -n CreateAll T1 T13 T14
+cmd: testDict
+args: -n CreateTableWhenDbIsFull T6 
 
-#-m 7200 1: testIndex -n InsertDeleteGentle T6
-max-time: 3600
-cmd: testIndex
-args: -n InsertDelete T1 
+max-time: 1500
+cmd: testDict
+args: -n CreateMaxTables T6 
 
-#-m 3600 1: testIndex -n CreateLoadDropGentle T6
-max-time: 3600
+max-time: 1500
+cmd: testDict
+args: -l 25 -n DictRestart T1
+
+max-time: 500
 cmd: testIndex
-args: -n CreateLoadDrop T1 
+args: -n Bug25059 -r 3000 T1
 
-#
-# BACKUP
-#
 max-time: 1000
-cmd: atrt-testBackup
-args: -n BackupBank T6 
+cmd: testSRBank
+args: -n SR -l 300 -r 15 T1
 
-#
-# MGMAPI AND MGSRV
-#
-max-time: 1800
-cmd: testMgm
-args: -n SingleUserMode T1 
+max-time: 1000
+cmd: testSRBank
+args: -n NR -l 300 -r 15 T1
+
+max-time: 1000
+cmd: testSRBank
+args: -n Mix -l 300 -r 15 T1
 
 #
 #
@@ -51,10 +59,6 @@ args: -n SR_FULLDB T6 
 #
 max-time: 2500
 cmd: testNodeRestart
-args: -n NoLoad T6
-
-max-time: 2500
-cmd: testNodeRestart
 args: -n MixedPkRead T6 T13 
 
 max-time: 2500
@@ -67,75 +71,27 @@ args: -l 1 -n MixedReadUpdateScan 
 
 max-time: 2500
 cmd: testNodeRestart
-args: -n CommittedRead T1
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n LateCommit T1
-
-max-time: 2500
-cmd: testNodeRestart
 args: -n Terror T6 T13 
 
 max-time: 2500
 cmd: testNodeRestart
 args: -n FullDb T6 T13 
 
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartRandomNode T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartRandomNodeError T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartRandomNodeInitial T6 T13 
-
 max-time: 3600
 cmd: testNodeRestart
 args: -l 1 -n RestartNFDuringNR T6 T13 
 
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartMasterNodeError T6 T13 
-
 max-time: 3600
 cmd: testNodeRestart
 args: -n RestartNodeDuringLCP T6 
 
 max-time: 2500
 cmd: testNodeRestart
-args: -n TwoNodeFailure T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n TwoMasterNodeFailure T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n FiftyPercentFail T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartAllNodes T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartAllNodesAbort T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n RestartAllNodesError9999 T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
 args: -n FiftyPercentStopAndWait T6 T13 
 
-max-time: 2500
+max-time: 500
 cmd: testNodeRestart
-args: -n NF_Hammer -r 5 T1
+args: -n Bug16772 T1
 
 #max-time: 500
 #cmd: testNodeRestart
@@ -156,10 +112,6 @@ args: -n NFNR3 T6 T13 
 
 max-time: 2500
 cmd: testIndex
-args: -n BuildDuring T6 
-
-max-time: 2500
-cmd: testIndex
 args: -l 2 -n SR1 T6 T13 
 
 max-time: 2500
@@ -182,32 +134,11 @@ max-time: 2500
 cmd: testIndex
 args: -l 2 -n SR1_O T6 T13 
 
-max-time: 500
-cmd: testIndex
-args: -n MixedTransaction T1 
-
 max-time: 2500
 cmd: testDict
 args: -n NF1 T1 T6 T13 
 
 #
-max-time: 1500
-cmd: testSystemRestart
-args: -l 1 -n SR6 T1 
-
-max-time: 1500
-cmd: testSystemRestart
-args: -l 1 -n SR7 T1 
-
-max-time: 1500
-cmd: testSystemRestart
-args: -l 1 -n SR8 T1 
-
-max-time: 1500
-cmd: testSystemRestart
-args: -l 1 -n SR9 T1 
-
-#
 max-time: 3600
 cmd: test_event
 args: -n EventOperationApplier -l 2
@@ -227,32 +158,12 @@ max-time: 2500
 cmd: test_event
 args: -n Multi
 
+max-time: 600
+cmd: test_event
+args: -l 1 -n SubscribeNR T1
+
 #
 max-time: 3600
 cmd: test_event
 args: -n CreateDropNR -l 1
 
-#
-max-time: 600
-cmd: test_event_merge
-args: --no-implicit-nulls --separate-events --blob-version 1
-
-#
-max-time: 600
-cmd: test_event_merge
-args: --no-implicit-nulls --separate-events
-
-#
-max-time: 600
-cmd: test_event_merge
-args: --no-implicit-nulls --no-multiops --blob-version 1
-
-#
-max-time: 600
-cmd: test_event_merge
-args: --no-implicit-nulls --no-multiops
-
-max-time: 600
-cmd: testBasic
-args: -n PkRead T1
-

=== modified file 'storage/ndb/test/src/HugoOperations.cpp'
--- a/storage/ndb/test/src/HugoOperations.cpp	2008-11-17 09:26:25 +0000
+++ b/storage/ndb/test/src/HugoOperations.cpp	2009-03-16 12:37:05 +0000
@@ -69,7 +69,8 @@ NdbConnection* HugoOperations::getTransa
 int HugoOperations::pkReadRecord(Ndb* pNdb,
 				 int recordNo,
 				 int numRecords,
-				 NdbOperation::LockMode lm){
+				 NdbOperation::LockMode lm,
+                                 NdbOperation::LockMode *lmused){
   int a;  
   allocRows(numRecords);
   indexScans.clear();
@@ -95,6 +96,8 @@ rand_lock_mode:
     case NdbOperation::LM_Exclusive:
     case NdbOperation::LM_CommittedRead:
     case NdbOperation::LM_SimpleRead:
+      if (lmused)
+        * lmused = lm;
       if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex && 
 	 pIndexScanOp == 0)
       {
@@ -150,7 +153,8 @@ rand_lock_mode:
 int HugoOperations::pkReadRandRecord(Ndb* pNdb,
                                      int records,
                                      int numRecords,
-                                     NdbOperation::LockMode lm){
+                                     NdbOperation::LockMode lm,
+                                     NdbOperation::LockMode *lmused){
   int a;  
   allocRows(numRecords);
   indexScans.clear();
@@ -176,6 +180,8 @@ rand_lock_mode:
     case NdbOperation::LM_Exclusive:
     case NdbOperation::LM_CommittedRead:
     case NdbOperation::LM_SimpleRead:
+      if (lmused)
+        * lmused = lm;
       if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex && 
 	 pIndexScanOp == 0)
       {

=== modified file 'storage/ndb/test/src/HugoTransactions.cpp'
--- a/storage/ndb/test/src/HugoTransactions.cpp	2009-02-04 12:32:27 +0000
+++ b/storage/ndb/test/src/HugoTransactions.cpp	2009-03-16 12:37:05 +0000
@@ -884,9 +884,10 @@ HugoTransactions::pkReadRecords(Ndb* pNd
     if (timer_active)
       NdbTick_getMicroTimer(&timer_start);
 
+    NdbOperation::LockMode lmused;
     if (_rand == 0)
     {
-      if(pkReadRecord(pNdb, r, batch, lm) != NDBT_OK)
+      if(pkReadRecord(pNdb, r, batch, lm, &lmused) != NDBT_OK)
       {
         ERR(pTrans->getNdbError());
         closeTransaction(pNdb);
@@ -895,7 +896,7 @@ HugoTransactions::pkReadRecords(Ndb* pNd
     }
     else
     {
-      if(pkReadRandRecord(pNdb, records, batch, lm) != NDBT_OK)
+      if(pkReadRandRecord(pNdb, records, batch, lm, &lmused) != NDBT_OK)
       {
         ERR(pTrans->getNdbError());
         closeTransaction(pNdb);
@@ -904,6 +905,20 @@ HugoTransactions::pkReadRecords(Ndb* pNd
     }
     
     check = pTrans->execute(Commit, AbortOnError);
+
+    if (check != -1 && lmused == NdbOperation::LM_CommittedRead)
+    {
+      /**
+       * LM_CommittedRead will not abort transaction
+       *   even if doing execute(AbortOnError);
+       *   so also check pTrans->getNdbError() in this case
+       */
+      if (pTrans->getNdbError().status != NdbError::Success)
+      {
+        check = -1;
+      }
+    }      
+
     if( check == -1 ) {
       const NdbError err = pTrans->getNdbError();
       

=== modified file 'storage/ndb/test/src/NdbBackup.cpp'
--- a/storage/ndb/test/src/NdbBackup.cpp	2008-12-11 09:43:11 +0000
+++ b/storage/ndb/test/src/NdbBackup.cpp	2009-02-23 12:37:14 +0000
@@ -150,28 +150,30 @@ NdbBackup::execRestore(bool _restore_dat
   int res = system(tmp.c_str());  
   
   ndbout << "scp res: " << res << endl;
-  
-  tmp.assfmt("%sndb_restore -c \"%s:%d\" -n %d -b %d %s %s .", 
+
+  if (res == 0 && _restore_meta)
+  {
+    /** don't restore DD objects */
+    
+    tmp.assfmt("%sndb_restore -c \"%s:%d\" -n %d -b %d -m -d .", 
 #if 1
-             "",
+               "",
 #else
-             "valgrind --leak-check=yes -v "
+               "valgrind --leak-check=yes -v "
 #endif
-             ndb_mgm_get_connected_host(handle),
-             ndb_mgm_get_connected_port(handle),
-             _node_id, 
-             _backup_id,
-             _restore_data?"-r":"",
-             _restore_meta?"-m":"");
+               ndb_mgm_get_connected_host(handle),
+               ndb_mgm_get_connected_port(handle),
+               _node_id, 
+               _backup_id);
+    
+    ndbout << "buf: "<< tmp.c_str() <<endl;
+    res = system(tmp.c_str());
+  }
   
-  ndbout << "buf: "<< tmp.c_str() <<endl;
-  res = system(tmp.c_str());
-
-  if (res && _restore_meta)
+  if (res == 0 && _restore_data)
   {
-    /** try once wo/ restoring DD objects */
 
-    tmp.assfmt("%sndb_restore -c \"%s:%d\" -n %d -b %d -d %s %s .", 
+    tmp.assfmt("%sndb_restore -c \"%s:%d\" -n %d -b %d -r .", 
 #if 1
                "",
 #else
@@ -180,9 +182,7 @@ NdbBackup::execRestore(bool _restore_dat
                ndb_mgm_get_connected_host(handle),
                ndb_mgm_get_connected_port(handle),
                _node_id, 
-               _backup_id,
-               _restore_data?"-r":"",
-               _restore_meta?"-m":"");
+               _backup_id);
     
     ndbout << "buf: "<< tmp.c_str() <<endl;
     res = system(tmp.c_str());

=== modified file 'storage/ndb/test/src/NdbRestarts.cpp'
--- a/storage/ndb/test/src/NdbRestarts.cpp	2007-10-23 09:38:20 +0000
+++ b/storage/ndb/test/src/NdbRestarts.cpp	2009-03-13 11:09:37 +0000
@@ -431,50 +431,80 @@ int twoNodeFailure(NdbRestarter& _restar
 
   myRandom48Init(NdbTick_CurrentMillisecond());
   int randomId = myRandom48(_restarter.getNumDbNodes());
-  int nodeId = _restarter.getDbNodeId(randomId);  
-  g_info << _restart->m_name << ": node = "<< nodeId << endl;
-
-  CHECK(_restarter.insertErrorInNode(nodeId, 9999) == 0,
-	"Could not restart node "<< nodeId);
-
-    // Create random value, max 10 secs
-  int max = 10;
-  int seconds = (myRandom48(max)) + 1;   
-  g_info << "Waiting for " << seconds << "(" << max 
-	 << ") secs " << endl;
-  NdbSleep_SecSleep(seconds);
+  int n[2];
+  n[0] = _restarter.getDbNodeId(randomId);  
+  n[1] = _restarter.getRandomNodeOtherNodeGroup(n[0], rand());
+  g_info << _restart->m_name << ": node = "<< n[0] << endl;
+
+  int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+  CHECK(_restarter.dumpStateOneNode(n[0], val2, 2) == 0,
+        "Failed to dump");
+  CHECK(_restarter.dumpStateOneNode(n[1], val2, 2) == 0,
+        "Failed to dump");
+  
+  CHECK(_restarter.insertErrorInNode(n[0], 9999) == 0,
+	"Could not restart node "<< n[0]);
 
-  nodeId = _restarter.getRandomNodeOtherNodeGroup(nodeId, rand());
-  g_info << _restart->m_name << ": node = "<< nodeId << endl;
+    // Create random value, max 3 secs
+  int max = 3000;
+  int ms = (myRandom48(max)) + 1;   
+  g_info << "Waiting for " << ms << "(" << max 
+	 << ") ms " << endl;
+  NdbSleep_MilliSleep(ms);
+
+  g_info << _restart->m_name << ": node = "<< n[1] << endl;
+  CHECK(_restarter.insertErrorInNode(n[1], 9999) == 0,
+	"Could not restart node "<< n[1]);
 
-  CHECK(_restarter.insertErrorInNode(nodeId, 9999) == 0,
-	"Could not restart node "<< nodeId);
+  CHECK(_restarter.waitNodesNoStart(n, 2) == 0,
+        "Failed to wait nostart");
 
+  _restarter.startNodes(n, 2);
+  
   return NDBT_OK;
 }
 
 int twoMasterNodeFailure(NdbRestarter& _restarter, 
 			 const NdbRestarts::NdbRestart* _restart){
 
-  int nodeId = _restarter.getDbNodeId(0);  
-  g_info << _restart->m_name << ": node = "<< nodeId << endl;
-
-  CHECK(_restarter.insertErrorInNode(nodeId, 39999) == 0,
-	"Could not restart node "<< nodeId);
-
-  // Create random value, max 10 secs
-  int max = 10;
-  int seconds = (myRandom48(max)) + 1;   
-  g_info << "Waiting for " << seconds << "(" << max 
-	 << ") secs " << endl;
-  NdbSleep_SecSleep(seconds);
-
-  nodeId = _restarter.getDbNodeId(0);  
-  g_info << _restart->m_name << ": node = "<< nodeId << endl;
-
-  CHECK(_restarter.insertErrorInNode(nodeId, 39999) == 0,
-	"Could not restart node "<< nodeId);
+  int n[2];
+  n[0] = _restarter.getMasterNodeId();  
+  n[1] = n[0];
+  do {
+    n[1] = _restarter.getNextMasterNodeId(n[1]);
+  } while(_restarter.getNodeGroup(n[0]) == _restarter.getNodeGroup(n[1]));
+  
+  g_info << _restart->m_name << ": ";
+  g_info << "node0 = "<< n[0] << "(" << _restarter.getNodeGroup(n[0]) << ") ";
+  g_info << "node1 = "<< n[1] << "(" << _restarter.getNodeGroup(n[1]) << ") ";
+  g_info << endl;
+
+  int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+  CHECK(_restarter.dumpStateOneNode(n[0], val2, 2) == 0,
+        "Failed to dump");
+  CHECK(_restarter.dumpStateOneNode(n[1], val2, 2) == 0,
+        "Failed to dump");
+  
+  CHECK(_restarter.insertErrorInNode(n[0], 9999) == 0,
+	"Could not restart node "<< n[0]);
+  
+  // Create random value, max 3 secs
+  int max = 3000;
+  int ms = (myRandom48(max)) + 1;   
+  g_info << "Waiting for " << ms << "(" << max 
+	 << ") ms " << endl;
+  NdbSleep_MilliSleep(ms);
+  
+  g_info << _restart->m_name << ": node = "<< n[1] << endl;
+  
+  CHECK(_restarter.insertErrorInNode(n[1], 9999) == 0,
+	"Could not restart node "<< n[1]);
+  
+  CHECK(_restarter.waitNodesNoStart(n, 2) == 0,
+        "Failed to wait nostart");
 
+  _restarter.startNodes(n, 2);
+  
   return NDBT_OK;
 }
 

=== modified file 'storage/ndb/test/src/UtilTransactions.cpp'
--- a/storage/ndb/test/src/UtilTransactions.cpp	2008-02-19 15:00:29 +0000
+++ b/storage/ndb/test/src/UtilTransactions.cpp	2009-03-16 15:10:51 +0000
@@ -1099,10 +1099,13 @@ UtilTransactions::verifyOrderedIndex(Ndb
 	    goto error;
 	  if(get_values(iop, indexRow))
 	    goto error;
+          if(equal(pIndex, iop, scanRow))
+            goto error;
 	}
-	
-	if(equal(pIndex, iop, scanRow))
-	  goto error;
+	else
+        {
+          goto error;
+        }
       }     
 
       check = pTrans->execute(NoCommit, AbortOnError);

=== modified file 'storage/ndb/tools/ndb_error_reporter'
--- a/storage/ndb/tools/ndb_error_reporter	2007-06-13 13:34:36 +0000
+++ b/storage/ndb/tools/ndb_error_reporter	2009-02-12 08:29:00 +0000
@@ -75,12 +75,12 @@ my $outfile;
 if($r==0)
 {
     $outfile= "$reportdir.tar.bz2";
-    system "tar c $reportdir|bzip2 > $outfile";
+    system "tar cf - $reportdir|bzip2 > $outfile";
 }
 else
 {
     $outfile= "$reportdir.tar.gz";
-    system "tar c $reportdir|gzip > $outfile";
+    system "tar cf - $reportdir|gzip > $outfile";
 }
 
 system "rm -rf $reportdir";

=== modified file 'storage/ndb/tools/restore/Restore.cpp'
--- a/storage/ndb/tools/restore/Restore.cpp	2008-08-12 18:56:42 +0000
+++ b/storage/ndb/tools/restore/Restore.cpp	2009-03-12 10:45:04 +0000
@@ -882,7 +882,7 @@ BackupFile::setCtlFile(Uint32 nodeId, Ui
   m_expectedFileHeader.FileType = BackupFormat::CTL_FILE;
 
   char name[PATH_MAX]; const Uint32 sz = sizeof(name);
-  BaseString::snprintf(name, sz, "BACKUP-%d.%d.ctl", backupId, nodeId);  
+  BaseString::snprintf(name, sz, "BACKUP-%u.%d.ctl", backupId, nodeId);  
   setName(path, name);
 }
 
@@ -893,7 +893,7 @@ BackupFile::setDataFile(const BackupFile
   m_expectedFileHeader.FileType = BackupFormat::DATA_FILE;
   
   char name[PATH_MAX]; const Uint32 sz = sizeof(name);
-  BaseString::snprintf(name, sz, "BACKUP-%d-%d.%d.Data", 
+  BaseString::snprintf(name, sz, "BACKUP-%u-%d.%d.Data", 
 	   m_expectedFileHeader.BackupId, no, m_nodeId);
   setName(bf.m_path, name);
 }
@@ -905,7 +905,7 @@ BackupFile::setLogFile(const BackupFile 
   m_expectedFileHeader.FileType = BackupFormat::LOG_FILE;
   
   char name[PATH_MAX]; const Uint32 sz = sizeof(name);
-  BaseString::snprintf(name, sz, "BACKUP-%d.%d.log", 
+  BaseString::snprintf(name, sz, "BACKUP-%u.%d.log", 
 	   m_expectedFileHeader.BackupId, m_nodeId);
   setName(bf.m_path, name);
 }

=== modified file 'storage/ndb/tools/restore/restore_main.cpp'
--- a/storage/ndb/tools/restore/restore_main.cpp	2008-08-12 18:56:42 +0000
+++ b/storage/ndb/tools/restore/restore_main.cpp	2009-03-12 10:45:04 +0000
@@ -694,7 +694,7 @@ main(int argc, char** argv)
     exitHandler(NDBT_FAILED);
   }
 
-  g_options.appfmt(" -b %d", ga_backupId);
+  g_options.appfmt(" -b %u", ga_backupId);
   g_options.appfmt(" -n %d", ga_nodeId);
   if (_restore_meta)
     g_options.appfmt(" -m");

Thread
bzr commit into mysql-6.0-ndb branch (tomas.ulin:2750) Tomas Ulin24 Mar