List:Commits« Previous MessageNext Message »
From:jonas oreland Date:May 7 2011 10:50am
Subject:bzr push into mysql-5.1-telco-7.1 branch (jonas:4181 to 4182)
View as plain text  
 4182 jonas oreland	2011-05-07 [merge]
      ndb - merge 70 to 71

    removed:
      mysql-test/suite/ndb/include/add_six_nodes.inc
      mysql-test/suite/ndb/include/add_two_nodes.inc
      mysql-test/suite/ndb/include/reload_ndb_mgmd.inc
      mysql-test/suite/ndb/r/add_node01.result
      mysql-test/suite/ndb/r/add_node02.result
      mysql-test/suite/ndb/r/add_node03.result
      mysql-test/suite/ndb/t/add_node01.test
      mysql-test/suite/ndb/t/add_node02.test
      mysql-test/suite/ndb/t/add_node03.test
    added:
      mysql-test/suite/ndb/data/
    renamed:
      mysql-test/suite/ndb/std_data/ => mysql-test/suite/ndb/backups/
      mysql-test/suite/ndb/std_data/ndb_backup50/ => mysql-test/suite/ndb/backups/50/
      mysql-test/suite/ndb/std_data/ndb_backup51/ => mysql-test/suite/ndb/backups/51/
      mysql-test/suite/ndb/std_data/ndb_backup51_d2_be/ => mysql-test/suite/ndb/backups/51_d2_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_d2_le/ => mysql-test/suite/ndb/backups/51_d2_le/
      mysql-test/suite/ndb/std_data/ndb_backup51_data_be/ => mysql-test/suite/ndb/backups/51_data_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_data_le/ => mysql-test/suite/ndb/backups/51_data_le/
      mysql-test/suite/ndb/std_data/ndb_backup51_dd/ => mysql-test/suite/ndb/backups/51_dd/
      mysql-test/suite/ndb/std_data/ndb_backup51_undolog_be/ => mysql-test/suite/ndb/backups/51_undolog_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_undolog_le/ => mysql-test/suite/ndb/backups/51_undolog_le/
      mysql-test/suite/ndb/std_data/ndb_backup_before_native_default/ => mysql-test/suite/ndb/backups/before_native_default/
      mysql-test/suite/ndb/std_data/ndb_backup_bug54613/ => mysql-test/suite/ndb/backups/bug54613/
      mysql-test/suite/ndb/std_data/ndb_backup_hashmap/ => mysql-test/suite/ndb/backups/hashmap/
      mysql-test/suite/ndb/std_data/ndb_backup_packed/ => mysql-test/suite/ndb/backups/packed/
      mysql-test/suite/ndb/std_data/table_data10000.dat => mysql-test/suite/ndb/data/table_data10000.dat
      mysql-test/suite/ndb/std_data/table_data100000.dat => mysql-test/suite/ndb/data/table_data100000.dat
    modified:
      mysql-test/Makefile.am
      mysql-test/suite/ndb/t/disabled.def
      mysql-test/suite/ndb/t/ndb_addnode.test
      mysql-test/suite/ndb/t/ndb_alter_table_backup.test
      mysql-test/suite/ndb/t/ndb_dd_restore_compat.test
      mysql-test/suite/ndb/t/ndb_native_default_support.test
      mysql-test/suite/ndb/t/ndb_restore_compat_downward.test
      mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test
      mysql-test/suite/ndb/t/ndb_restore_misc.test
      mysql-test/suite/ndb/t/ndb_restore_undolog.test
      mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test
      storage/ndb/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
      storage/ndb/src/kernel/blocks/backup/Backup.cpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 4181 Magnus Blåudd	2011-05-06 [merge]
      Merge 7.0 -> 7.1

    modified:
      mysql-test/include/ndb_backup_id.inc
      mysql-test/lib/My/SysInfo.pm
      mysql-test/mysql-test-run.pl
      mysql-test/suite/ndb/t/ndb_show_tables_result.inc
      storage/ndb/CMakeLists.txt
      storage/ndb/include/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/QueryTree.hpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/ObjectMap.cpp
      storage/ndb/src/ndbapi/ObjectMap.hpp
      storage/ndb/src/ndbapi/ndberror.c
=== modified file 'mysql-test/Makefile.am'
--- a/mysql-test/Makefile.am	2011-04-08 14:08:27 +0000
+++ b/mysql-test/Makefile.am	2011-05-07 10:50:14 +0000
@@ -76,19 +76,19 @@ EXTRA_DIST =	README \
 # List of directories containing test + result files and the
 # related test data files that should be copied
 TEST_DIRS = t r include std_data std_data/parts collections \
-	suite/ndb/std_data/ndb_backup50 \
-	suite/ndb/std_data/ndb_backup51 \
-	suite/ndb/std_data/ndb_backup51_data_be \
-	suite/ndb/std_data/ndb_backup51_data_le \
-	suite/ndb/std_data/ndb_backup51_dd \
-	suite/ndb/std_data/ndb_backup_packed \
-	suite/ndb/std_data/ndb_backup51_d2_be \
-	suite/ndb/std_data/ndb_backup51_d2_le \
-	suite/ndb/std_data/ndb_backup51_undolog_be \
-	suite/ndb/std_data/ndb_backup51_undolog_le \
-	suite/ndb/std_data/ndb_backup_hashmap \
-	suite/ndb/std_data/ndb_backup_before_native_default \
-	suite/ndb/std_data/ndb_backup_bug54613 \
+	suite/ndb/backups/50 \
+	suite/ndb/backups/51 \
+	suite/ndb/backups/51_data_be \
+	suite/ndb/backups/51_data_le \
+	suite/ndb/backups/51_dd \
+	suite/ndb/backups/packed \
+	suite/ndb/backups/51_d2_be \
+	suite/ndb/backups/51_d2_le \
+	suite/ndb/backups/51_undolog_be \
+	suite/ndb/backups/51_undolog_le \
+	suite/ndb/backups/hashmap \
+	suite/ndb/backups/before_native_default \
+	suite/ndb/backups/bug54613 \
 	std_data/funcs_1 \
 	extra/binlog_tests/ extra/rpl_tests \
 	suite/binlog suite/binlog/t suite/binlog/r suite/binlog/std_data \
@@ -106,7 +106,7 @@ TEST_DIRS = t r include std_data std_dat
 	suite/rpl suite/rpl/data suite/rpl/include suite/rpl/r \
 	suite/rpl/t \
 	suite/stress/include suite/stress/t suite/stress/r \
-	suite/ndb suite/ndb/t suite/ndb/r suite/ndb/include suite/ndb/std_data \
+	suite/ndb suite/ndb/t suite/ndb/r suite/ndb/include suite/ndb/data \
 	suite/ndb_big \
 	suite/ndb_binlog suite/ndb_binlog/t suite/ndb_binlog/r \
 	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \

=== renamed directory 'mysql-test/suite/ndb/std_data' => 'mysql-test/suite/ndb/backups'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup50' => 'mysql-test/suite/ndb/backups/50'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51' => 'mysql-test/suite/ndb/backups/51'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_d2_be' => 'mysql-test/suite/ndb/backups/51_d2_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_d2_le' => 'mysql-test/suite/ndb/backups/51_d2_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_data_be' => 'mysql-test/suite/ndb/backups/51_data_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_data_le' => 'mysql-test/suite/ndb/backups/51_data_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_dd' => 'mysql-test/suite/ndb/backups/51_dd'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_undolog_be' => 'mysql-test/suite/ndb/backups/51_undolog_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_undolog_le' => 'mysql-test/suite/ndb/backups/51_undolog_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_before_native_default' => 'mysql-test/suite/ndb/backups/before_native_default'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_bug54613' => 'mysql-test/suite/ndb/backups/bug54613'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_hashmap' => 'mysql-test/suite/ndb/backups/hashmap'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_packed' => 'mysql-test/suite/ndb/backups/packed'
=== added directory 'mysql-test/suite/ndb/data'
=== renamed file 'mysql-test/suite/ndb/std_data/table_data10000.dat' => 'mysql-test/suite/ndb/data/table_data10000.dat'
=== renamed file 'mysql-test/suite/ndb/std_data/table_data100000.dat' => 'mysql-test/suite/ndb/data/table_data100000.dat'
=== removed file 'mysql-test/suite/ndb/include/add_six_nodes.inc'
--- a/mysql-test/suite/ndb/include/add_six_nodes.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/add_six_nodes.inc	1970-01-01 00:00:00 +0000
@@ -1,64 +0,0 @@
---perl
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $file ="$vardir/my.cnf";
-my $file_new = "$vardir/my.cnf.new";
-
-open (IN, "$file") || die $!;
-open (OUT, ">$file_new") || die $!;
-
-while ($_ = <IN> ) {
-  if ($_ =~ /ndbd=localhost,localhost/i) 
-  {
-    # Replace text, all instances on a line (/g), case insensitive (/i)
-    $_ =~ s/ndbd=localhost,localhost/ndbd=localhost,localhost,localhost,localhost,localhost,localhost,localhost,localhost/gi;
-  }
-  print OUT "$_";
-  if ($_=~ /cluster_config.ndb_mgmd.1.1/i) 
-  {
-    print OUT "NodeId=3\n";
-  }
-}
-
-close IN;
-close OUT;
-
-open (OUT, ">>$file_new") || die $!;
-print OUT "[cluster_config.ndbd.3.1]\n";
-print OUT "Id=40\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.4.1]\n";
-print OUT "Id=41\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.5.1]\n";
-print OUT "Id=42\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.6.1]\n";
-print OUT "Id=43\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.7.1]\n";
-print OUT "Id=44\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.8.1]\n";
-print OUT "Id=45\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-
-close OUT;
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/include/add_two_nodes.inc'
--- a/mysql-test/suite/ndb/include/add_two_nodes.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/add_two_nodes.inc	1970-01-01 00:00:00 +0000
@@ -1,39 +0,0 @@
---perl
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $file ="$vardir/my.cnf";
-my $file_new = "$vardir/my.cnf.new";
-
-open (IN, "$file") || die $!;
-open (OUT, ">$file_new") || die $!;
-
-while ($_ = <IN> ) {
-  if ($_ =~ /ndbd=localhost,localhost/i) 
-  {
-    # Replace text, all instances on a line (/g), case insensitive (/i)
-    $_ =~ s/ndbd=localhost,localhost/ndbd=localhost,localhost,localhost,localhost/gi;
-  }
-  print OUT "$_";
-  if ($_=~ /cluster_config.ndb_mgmd.1.1/i) 
-  {
-    print OUT "NodeId=3\n";
-  }
-}
-
-close IN;
-close OUT;
-
-open (OUT, ">>$file_new") || die $!;
-print OUT "[cluster_config.ndbd.3.1]\n";
-print OUT "Id=40\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.4.1]\n";
-print OUT "Id=41\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-close OUT;
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/include/reload_ndb_mgmd.inc'
--- a/mysql-test/suite/ndb/include/reload_ndb_mgmd.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/reload_ndb_mgmd.inc	1970-01-01 00:00:00 +0000
@@ -1,37 +0,0 @@
---perl
-
-use strict;
-use IO::Socket::INET;
-
-use lib "lib/";
-use My::Config;
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $config= My::Config->new("$vardir/my.cnf");
-my $mgmd = $config->group("cluster_config.ndb_mgmd.1.1");
-my $server_port = $mgmd->value("PortNumber");
-#print "server_port: $server_port\n";
-
-my $server = new IO::Socket::INET
-(
- PeerAddr => 'localhost',
- PeerPort => $server_port,
- Proto    => 'tcp'
-);
-
-print $server "reload config\n";
-print $server "mycnf: 1\n";
-print $server "\n";
-
-my $result = "unkown error";
-while(my $line= <$server>){
-  if ($line =~ /result: (.*)/)
-  {
-    $result = $1;
-  }
-  last if ($line eq "\n");
-}
-die "reload failed, result: '$result'"
-    unless $result eq "Ok";
-
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/r/add_node01.result'
--- a/mysql-test/suite/ndb/r/add_node01.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node01.result	1970-01-01 00:00:00 +0000
@@ -1,238 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-drop database if exists DB1;
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-use DB1;
-create table new_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table new_table2(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-insert into new_table1(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-insert into new_table2(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-
-## ndb_mgm dump shows old data resides only on "old" nodes and new data resides on all nodes in cluster log 
-alter online table old_table1 reorganize partition;
-alter online table old_table2 reorganize partition;
-alter online table old_table3 reorganize partition;
-alter online table old_table4 reorganize partition;
-
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_1.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	EXTRA
-lg_1	UNDO LOG	CLUSTER_NODE=1;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=2;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=40;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=41;UNDO_BUFFER_SIZE=2097152
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_2.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	EXTRA
-lg_1	UNDO LOG	CLUSTER_NODE=1;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=2;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=40;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=41;UNDO_BUFFER_SIZE=2097152
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_1.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	TABLESPACE_NAME	EXTRA
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=1
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=2
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=40
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=41
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_2.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	TABLESPACE_NAME	EXTRA
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=1
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=2
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=40
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=41
-
-## Drop nodegroup with "new" nodes is not allowed with data one those nodes
-## Nodegroup with "new" nodes still exist after dropping it as shown:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-show databases;
-Database
-information_schema
-DB1
-mtr
-mysql
-test
-drop table old_table1,old_table2,old_table3,old_table4,new_table1,new_table2;
-drop database DB1;
-show databases;
-Database
-information_schema
-mtr
-mysql
-test
-
-## Drop nodegroup with "new" nodes
-## Nodegroup with "new" nodes still exists after dropping it as shown:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, no nodegroup)
-id=41	@127.0.0.1  (mysql ndb, no nodegroup)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-
-drop LOGFILE GROUP lg_1 ENGINE NDB;

=== removed file 'mysql-test/suite/ndb/r/add_node02.result'
--- a/mysql-test/suite/ndb/r/add_node02.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node02.result	1970-01-01 00:00:00 +0000
@@ -1,143 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-drop database if exists DB1;
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-## Cluster running after adding two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-######################################################
-######################################################
-CREATE TEMPORARY TABLE test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
-
-LOAD DATA INFILE 'DUMP_FILE' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
-
-DROP TABLE test.backup_info;
-
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-drop database DB1;

=== removed file 'mysql-test/suite/ndb/r/add_node03.result'
--- a/mysql-test/suite/ndb/r/add_node03.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node03.result	1970-01-01 00:00:00 +0000
@@ -1,76 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-## Add six nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-## Cluster running after adding six ndbd nodes:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	8 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=42	@127.0.0.1  (mysql ndb, Nodegroup: 2)
-id=43	@127.0.0.1  (mysql ndb, Nodegroup: 2)
-id=44	@127.0.0.1  (mysql ndb, Nodegroup: 3)
-id=45	@127.0.0.1  (mysql ndb, Nodegroup: 3)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-

=== removed file 'mysql-test/suite/ndb/t/add_node01.test'
--- a/mysql-test/suite/ndb/t/add_node01.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node01.test	1970-01-01 00:00:00 +0000
@@ -1,150 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
---disable_warnings
-drop database if exists DB1;
---enable_warnings
-
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_two_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
-
-# Cluster running after adding two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-use DB1;
-create table new_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table new_table2(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-insert into new_table1(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-insert into new_table2(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-
-## ndb_mgm dump shows old data resides only on "old" nodes and new data resides on all nodes in cluster log 
---exec $NDB_MGM -e "all dump 18" >> $NDB_TOOLS_OUTPUT
-
-alter online table old_table1 reorganize partition;
-alter online table old_table2 reorganize partition;
-alter online table old_table3 reorganize partition;
-alter online table old_table4 reorganize partition;
-
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_1.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_2.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_1.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_2.dat';
-
-## Drop nodegroup with "new" nodes is not allowed with data one those nodes
---error 255
---exec $NDB_MGM -e "drop nodegroup 1" >> $NDB_TOOLS_OUTPUT
-
-## Nodegroup with "new" nodes still exist after dropping it as shown:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-show databases;
-drop table old_table1,old_table2,old_table3,old_table4,new_table1,new_table2;
-drop database DB1;
-show databases;
-
-## Drop nodegroup with "new" nodes
---exec $NDB_MGM -e "drop nodegroup 1" >> $NDB_TOOLS_OUTPUT
-
-## Nodegroup with "new" nodes still exists after dropping it as shown:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-# Cleanup
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-
-drop LOGFILE GROUP lg_1 ENGINE NDB;

=== removed file 'mysql-test/suite/ndb/t/add_node02.test'
--- a/mysql-test/suite/ndb/t/add_node02.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node02.test	1970-01-01 00:00:00 +0000
@@ -1,124 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/ 
---exec $NDB_MGM -e show
-
---disable_warnings
-drop database if exists DB1;
---enable_warnings
-
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_two_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41 --initial
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
-
-## Cluster running after adding two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
---source include/ndb_backup.inc
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-
---exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
-
-# Cleanup
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-drop database DB1;

=== removed file 'mysql-test/suite/ndb/t/add_node03.test'
--- a/mysql-test/suite/ndb/t/add_node03.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node03.test	1970-01-01 00:00:00 +0000
@@ -1,67 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-## Add six nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_six_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41,42,43,44,45 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41,42,43,44,45 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=42 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=43 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=44 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=45 --initial
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "create nodegroup 42,43" >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "create nodegroup 44,45" >> $NDB_TOOLS_OUTPUT
-
-## Cluster running after adding six ndbd nodes:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-

=== modified file 'mysql-test/suite/ndb/t/disabled.def'
--- a/mysql-test/suite/ndb/t/disabled.def	2010-04-28 10:33:09 +0000
+++ b/mysql-test/suite/ndb/t/disabled.def	2011-05-06 13:40:42 +0000
@@ -16,7 +16,3 @@ ndb_partition_error2 : Bug#40989 ndb_par
 ndb_cache_trans           : Bug#42197 Query cache and autocommit
 ndb_disconnect_ddl        : Bug#31853 flaky testcase...
 
-# the below testcase have detected the bugs that are still open
-add_node01    : disabled waiting for safe_process compatible spawn
-add_node02    : disabled waiting for safe_process compatible spawn
-add_node03    : disabled waiting for safe_process compatible spawn

=== modified file 'mysql-test/suite/ndb/t/ndb_addnode.test'
--- a/mysql-test/suite/ndb/t/ndb_addnode.test	2010-01-27 10:08:37 +0000
+++ b/mysql-test/suite/ndb/t/ndb_addnode.test	2011-05-06 13:48:00 +0000
@@ -20,8 +20,8 @@ create table t1(id int NOT NULL PRIMARY 
 create table t2(id int NOT NULL PRIMARY KEY, data char(8))
 TABLESPACE ts_1 STORAGE DISK engine=ndb;
 
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
+load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
+load data local infile 'suite/ndb/data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
 
 ## Create nodegroup for "new" nodes
 --exec $NDB_MGM -e "create nodegroup 3,4"

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table_backup.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_backup.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_backup.test	2011-05-06 14:11:46 +0000
@@ -7,7 +7,7 @@
 --source include/have_ndb.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 ##############################
 # mix endian restore section #
@@ -22,8 +22,8 @@ DROP TABLE IF EXISTS t1;
 --echo *********************************
 --echo * restore tables w/ new column from little endian
 --echo *********************************
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
 SHOW TABLES;
 SHOW CREATE TABLE t1;
 SELECT * FROM t1 WHERE a = 1 or a = 10 or a = 20 or a = 30 ORDER BY a;
@@ -35,8 +35,8 @@ DROP TABLE t1;
 --echo *********************************
 --echo * restore tables w/ new column from big endian
 --echo *********************************
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
 SHOW TABLES;
 SHOW CREATE TABLE t1;
 SELECT * FROM t1 WHERE a = 1 or a = 10 or a = 20 or a = 30 ORDER BY a;

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_restore_compat.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_restore_compat.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_restore_compat.test	2011-05-06 14:11:46 +0000
@@ -1,10 +1,10 @@
 -- source include/have_ndb.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/ndb_backup51_dd >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51_dd >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/51_dd >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/51_dd >> $NDB_TOOLS_OUTPUT
 
 # (priviliges differ on embedded and server so replace)
 --replace_column 18 #

=== modified file 'mysql-test/suite/ndb/t/ndb_native_default_support.test'
--- a/mysql-test/suite/ndb/t/ndb_native_default_support.test	2010-11-10 13:39:11 +0000
+++ b/mysql-test/suite/ndb/t/ndb_native_default_support.test	2011-05-06 14:11:46 +0000
@@ -8,7 +8,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 --disable_warnings
 DROP TABLE IF EXISTS t1,bit1;
@@ -189,8 +189,8 @@ DROP DATABASE mysqltest;
 --echo * Restore the backup from 6.3 or 6.4, which don't support native default value
 --echo ******************************************************************************
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
 
 ####
 # Bug# 53539 Ndb : MySQLD default values in frm embedded in backup not endian-converted
@@ -572,8 +572,8 @@ SHOW CREATE TABLE t1;
 --let ndb_desc_opts= -d test t1
 --source suite/ndb/include/ndb_desc_print.inc
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -r --promote-attribute --exclude-missing-columns $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r --promote-attribute --exclude-missing-columns $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -r --promote-attribute --exclude-missing-columns $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r --promote-attribute --exclude-missing-columns $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
 
 SELECT i, j, f, d, d2, ch, HEX(b), HEX(vb), HEX(blob1), text1, timestamp_c, newOne, newTwo from t1 order by i;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_compat_downward.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_compat_downward.test	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_compat_downward.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/have_case_sensitive_file_system.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 # This test currently requires case sensitive file system as the tables
 # are originally stored with uppercase
@@ -19,8 +19,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 DROP DATABASE IF EXISTS BANK;
 --enable_warnings
 CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 USE BANK;
 --sorted_result
 SHOW TABLES;
@@ -56,8 +56,8 @@ TRUNCATE ACCOUNT_TYPE;
 --exec $NDB_DESC --no-defaults -d BANK ACCOUNT_TYPE | grep ForceVarPart
 
 # Restore data
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 # Check data
 SELECT * FROM GL            ORDER BY TIME,ACCOUNT_TYPE;
@@ -65,8 +65,8 @@ SELECT * FROM ACCOUNT       ORDER BY ACC
 SELECT COUNT(*) FROM TRANSACTION;
 SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0;
---exec $NDB_RESTORE --no-defaults -b 2 -n 1 -m -p 1 -s -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 2 -n 2 -p 1 -s -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 2 -n 1 -m -p 1 -s -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 2 -n 2 -p 1 -s -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 SELECT * FROM DESCRIPTION ORDER BY USERNAME;
 --exec $NDB_DESC --no-defaults -d BANK DESCRIPTION | grep SHORT_VAR
 --exec $NDB_DESC --no-defaults -d BANK DESCRIPTION | grep MEDIUM_VAR
@@ -78,8 +78,8 @@ DROP TABLE TRANSACTION;
 DROP TABLE SYSTEM_VALUES;
 DROP TABLE ACCOUNT_TYPE;
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -m $backup_data_dir/ndb_backup_packed >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup_packed >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -m $backup_data_dir/packed >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -p 1 -r $backup_data_dir/packed >> $NDB_TOOLS_OUTPUT
 
 SELECT * FROM GL            ORDER BY TIME,ACCOUNT_TYPE;
 SELECT * FROM ACCOUNT       ORDER BY ACCOUNT_ID;
@@ -96,7 +96,7 @@ drop table t1;
 # bug#54613
 
 --error 1
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects $backup_data_dir/ndb_backup_bug54613 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects $backup_data_dir/bug54613 >> $NDB_TOOLS_OUTPUT
 
 --error 0
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects --skip-broken-objects $backup_data_dir/ndb_backup_bug54613 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects --skip-broken-objects $backup_data_dir/bug54613 >> $NDB_TOOLS_OUTPUT

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # Bug #27543 restore of backup from different endian does not work for blob column
@@ -151,8 +151,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 USE test;
 DROP TABLE IF EXISTS t_num,t_datetime,t_string_1,t_string_2,t_gis;
 --enable_warnings
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_data_le >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_data_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_data_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_data_le >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;
@@ -173,8 +173,8 @@ SELECT AsText(t_geometrycollection), AsT
 #
 
 DROP TABLE t_num,t_datetime,t_string_1,t_string_2,t_gis;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_data_be >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_data_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_data_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_data_be >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_misc.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_misc.test	2011-04-11 13:36:12 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_misc.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # Bug #27775 - mediumint auto inc not restored correctly
@@ -484,9 +484,9 @@ source include/ndb_backup_id.inc;
 #
 
 # ensure correct restore of epoch numbers in old versions
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 select epoch from mysql.ndb_apply_status where server_id=0;
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 select epoch from mysql.ndb_apply_status where server_id=0;
 # ensure correct restore of epoch numbers in current version
 # number hould be "big"
@@ -497,12 +497,12 @@ select epoch > (1 << 32) from mysql.ndb_
 #
 # Bug#40428 core dumped when restore backup log file(redo log)
 #
---exec $NDB_RESTORE --print --print_meta -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --print --print_meta -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug #33040 ndb_restore crashes with --print_log
 #
---exec $NDB_RESTORE --print_log -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --print_log -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug#48005 ndb backup / restore does not restore the auto_increment
@@ -554,7 +554,7 @@ drop table ndb_show_tables_results;
 #
 # Bug#51432
 #
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 2 -m $backup_data_dir/ndb_backup_hashmap >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 2 -m $backup_data_dir/hashmap >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug#56285

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_undolog.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_undolog.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_undolog.test	2011-05-06 14:11:46 +0000
@@ -2,7 +2,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # The table structure and data list below
@@ -440,8 +440,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 USE test;
 DROP TABLE IF EXISTS t_num,t_datetime,t_string_1,t_string_2,t_gis,t_string_3,t_string_4,t_string_5;
 --enable_warnings
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_undolog_le >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_undolog_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_undolog_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_undolog_le >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;
@@ -480,8 +480,8 @@ ENGINE =NDB;
 #
 
 USE test;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_undolog_be >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_undolog_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_undolog_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_undolog_be >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test	2011-05-06 14:11:46 +0000
@@ -2,7 +2,7 @@
 -- source include/have_binlog_format_mixed_or_row.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 --disable_warnings
 drop table if exists t1;
@@ -123,8 +123,8 @@ CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE
 --echo #
 --echo # reset, restore and  binlog should _not_ happen
 reset master;
---exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 1 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 1 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 2 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 
 select count(*) from TRANSACTION;
 --source include/show_binlog_events2.inc

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-05-06 12:19:04 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-05-07 10:50:14 +0000
@@ -168,14 +168,14 @@ IF(EXISTS ${CMAKE_SOURCE_DIR}/storage/my
 ELSE()
   # New plugin support, cross-platform
   
-  # NDB is MANDATORY plugin in MySQL Cluster i.e it's always built
-  SET(is_mandatory_plugin "")
-  IF(MYSQL_CLUSTER_VERSION)
-    SET(is_mandatory_plugin "MANDATORY")
-  ENDIF()
+  # NDB is DEFAULT plugin in MySQL Cluster
+  SET(is_default_plugin "")
+  IF(MYSQL_CLUSTER_VERSION)
+    SET(is_default_plugin "DEFAULT")
+  ENDIF()
 
   MYSQL_ADD_PLUGIN(ndbcluster ${NDBCLUSTER_SOURCES} STORAGE_ENGINE
-    ${is_mandatory_plugin} STATIC_ONLY RECOMPILE_FOR_EMBEDDED
+    ${is_default_plugin} STATIC_ONLY RECOMPILE_FOR_EMBEDDED
     LINK_LIBRARIES ndbclient)
 
   IF (NOT MCP_BUG58158)

=== modified file 'storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp'
--- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2011-05-07 06:17:02 +0000
@@ -170,6 +170,7 @@ public:
     DumpBackup = 13000,
     DumpBackupSetCompressed = 13001,
     DumpBackupSetCompressedLCP = 13002,
+    BackupErrorInsert = 13003,
 
     DumpDbinfo = 14000,
     DbinfoListTables = 14001,

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2011-02-24 09:46:11 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2011-05-07 06:17:02 +0000
@@ -662,6 +662,16 @@ Backup::execDUMP_STATE_ORD(Signal* signa
     c_defaults.m_compressed_lcp= signal->theData[1];
     infoEvent("Compressed LCP: %d", c_defaults.m_compressed_lcp);
   }
+
+  if (signal->theData[0] == DumpStateOrd::BackupErrorInsert)
+  {
+    if (signal->getLength() == 1)
+      ndbout_c("BACKUP: setting error %u", signal->theData[1]);
+    else
+      ndbout_c("BACKUP: setting error %u, %u",
+               signal->theData[1], signal->theData[2]);
+    SET_ERROR_INSERT_VALUE2(signal->theData[1], signal->theData[2]);
+  }
 }
 
 void Backup::execDBINFO_SCANREQ(Signal *signal)
@@ -4578,6 +4588,13 @@ Backup::checkScan(Signal* signal, Backup
       sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, 
 		 AbortBackupOrd::SignalLength, JBB);
     }
+#ifdef ERROR_INSERT
+    else if (ERROR_INSERTED(10042) && filePtr.p->tableId ==c_error_insert_extra)
+    {
+      sendSignalWithDelay(lqhRef, GSN_SCAN_NEXTREQ, signal,
+			  10, ScanFragNextReq::SignalLength);
+    }
+#endif
     else
     {
       sendSignal(lqhRef, GSN_SCAN_NEXTREQ, signal, 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-07 06:17:02 +0000
@@ -723,9 +723,10 @@ struct Fragrecord {
   Uint32 m_free_page_id_list;
   DynArr256::Head m_page_map;
   DLFifoList<Page>::Head thFreeFirst;   // pages with atleast 1 free record
-  
+
   Uint32 m_lcp_scan_op;
-  Uint32 m_lcp_keep_list;
+  Local_key m_lcp_keep_list_head;
+  Local_key m_lcp_keep_list_tail;
 
   enum FragState
   { FS_FREE
@@ -1439,9 +1440,8 @@ typedef Ptr<HostBuffer> HostBufferPtr;
     STATIC_CONST( MM_SHRINK   = 0x00200000 ); // Has MM part shrunk
     STATIC_CONST( MM_GROWN    = 0x00400000 ); // Has MM part grown
     STATIC_CONST( FREED       = 0x00800000 ); // Is freed
+    STATIC_CONST( FREE        = 0x00800000 ); // alias
     STATIC_CONST( LCP_SKIP    = 0x01000000 ); // Should not be returned in LCP
-    STATIC_CONST( LCP_KEEP    = 0x02000000 ); // Should be returned in LCP
-    STATIC_CONST( FREE        = 0x02800000 ); // Is free
     STATIC_CONST( VAR_PART    = 0x04000000 ); // Is there a varpart
     STATIC_CONST( REORG_MOVE  = 0x08000000 );
 
@@ -3215,6 +3215,8 @@ private:
   Uint32* get_default_ptr(const Tablerec*, Uint32&);
   Uint32 get_len(Ptr<Page>* pagePtr, Var_part_ref ref);
 
+  STATIC_CONST( COPY_TUPLE_HEADER32 = 4 );
+
   Tuple_header* alloc_copy_tuple(const Tablerec* tabPtrP, Local_key* ptr){
     Uint32 * dst = c_undo_buffer.alloc_copy_tuple(ptr,
                                                   tabPtrP->total_rec_size);
@@ -3224,7 +3226,7 @@ private:
     bzero(dst, tabPtrP->total_rec_size);
 #endif
     Uint32 count = tabPtrP->m_no_of_attributes;
-    ChangeMask * mask = (ChangeMask*)(dst);
+    ChangeMask * mask = (ChangeMask*)(dst + COPY_TUPLE_HEADER32);
     mask->m_cols = count;
     return (Tuple_header*)(mask->end_of_mask(count));
   }
@@ -3234,11 +3236,12 @@ private:
   }
 
   Tuple_header * get_copy_tuple(Uint32 * rawptr) {
-    return (Tuple_header*)(get_change_mask_ptr(rawptr)->end_of_mask());
+    return (Tuple_header*)
+      (get_change_mask_ptr(rawptr)->end_of_mask());
   }
 
   ChangeMask * get_change_mask_ptr(Uint32 * rawptr) {
-    return (ChangeMask*)(rawptr);
+    return (ChangeMask*)(rawptr + COPY_TUPLE_HEADER32);
   }
 
   Tuple_header* get_copy_tuple(const Local_key* ptr){
@@ -3250,7 +3253,7 @@ private:
     Uint32 * tmp = raw - (1 + ((tabP->m_no_of_attributes + 31) >> 5));
     ChangeMask* mask = (ChangeMask*)tmp;
     assert(mask->end_of_mask() == raw);
-    assert(get_copy_tuple(tmp) == copytuple);
+    assert(get_copy_tuple(tmp - COPY_TUPLE_HEADER32) == copytuple);
     return mask;
   }
 
@@ -3382,10 +3385,10 @@ private:
                          Page_cache_client::Request,
                          OperationrecPtr);
   int retrieve_log_page(Signal*, FragrecordPtr, OperationrecPtr);
-  
-  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*, 
-		     Operationrec*, Fragrecord*, Tablerec*);
-  
+
+  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*,
+		     KeyReqStruct*, Operationrec*, Fragrecord*, Tablerec*);
+
   int handle_size_change_after_update(KeyReqStruct* req_struct,
 				      Tuple_header* org,
 				      Operationrec*,
@@ -3411,7 +3414,31 @@ private:
   void check_page_map(Fragrecord*);
   bool find_page_id_in_list(Fragrecord*, Uint32 pid);
 #endif
-  void handle_lcp_keep(Signal*, Fragrecord*, ScanOp*, Uint32 rowid);
+  void handle_lcp_keep(Signal*, Fragrecord*, ScanOp*);
+  void handle_lcp_keep_commit(const Local_key*,
+                              KeyReqStruct *,
+                              Operationrec*, Fragrecord*, Tablerec*);
+
+  void setup_lcp_read_copy_tuple( KeyReqStruct *,
+                                  Operationrec*,
+                                  Fragrecord*,
+                                  Tablerec*);
+
+  bool isCopyTuple(Uint32 pageid, Uint32 pageidx) const {
+    return (pageidx & (Uint16(1) << 15)) != 0;
+  }
+
+  void setCopyTuple(Uint32& pageid, Uint16& pageidx) const {
+    assert(!isCopyTuple(pageid, pageidx));
+    pageidx |= (Uint16(1) << 15);
+    assert(isCopyTuple(pageid, pageidx));
+  }
+
+  void clearCopyTuple(Uint32& pageid, Uint16& pageidx) const {
+    assert(isCopyTuple(pageid, pageidx));
+    pageidx &= ~(Uint16(1) << 15);
+    assert(!isCopyTuple(pageid, pageidx));
+  }
 };
 
 #if 0

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-07 06:17:02 +0000
@@ -51,15 +51,8 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* s
     PagePtr pagePtr;
     Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p);
 
-    ndbassert(ptr->m_header_bits & Tuple_header::FREE);
+    ndbassert(ptr->m_header_bits & Tuple_header::FREED);
 
-    if (ptr->m_header_bits & Tuple_header::LCP_KEEP)
-    {
-      ndbassert(! (ptr->m_header_bits & Tuple_header::FREED));
-      ptr->m_header_bits |= Tuple_header::FREED;
-      return;
-    }
-    
     if (regTabPtr.p->m_attributes[MM].m_no_of_varsize +
         regTabPtr.p->m_attributes[MM].m_no_of_dynamic)
     {
@@ -157,12 +150,12 @@ Dbtup::dealloc_tuple(Signal* signal,
 		     Uint32 gci,
 		     Page* page,
 		     Tuple_header* ptr, 
+                     KeyReqStruct * req_struct,
 		     Operationrec* regOperPtr, 
 		     Fragrecord* regFragPtr, 
 		     Tablerec* regTabPtr)
 {
   Uint32 lcpScan_ptr_i= regFragPtr->m_lcp_scan_op;
-  Uint32 lcp_keep_list = regFragPtr->m_lcp_keep_list;
 
   Uint32 bits = ptr->m_header_bits;
   Uint32 extra_bits = Tuple_header::FREED;
@@ -189,9 +182,15 @@ Dbtup::dealloc_tuple(Signal* signal,
     if (!is_rowid_lcp_scanned(rowid, *scanOp.p))
     {
       jam();
-      extra_bits = Tuple_header::LCP_KEEP; // Note REMOVE FREE
-      ptr->m_operation_ptr_i = lcp_keep_list;
-      regFragPtr->m_lcp_keep_list = rowid.ref();
+
+      /**
+       * We're committing a delete, on a row that should
+       *   be part of LCP. Copy original row into copy-tuple
+       *   and add this copy-tuple to lcp-keep-list
+       *
+       */
+      handle_lcp_keep_commit(&rowid,
+                             req_struct, regOperPtr, regFragPtr, regTabPtr);
     }
   }
   
@@ -204,6 +203,69 @@ Dbtup::dealloc_tuple(Signal* signal,
   }
 }
 
+void
+Dbtup::handle_lcp_keep_commit(const Local_key* rowid,
+                              KeyReqStruct * req_struct,
+                              Operationrec * opPtrP,
+                              Fragrecord * regFragPtr,
+                              Tablerec * regTabPtr)
+{
+  bool disk = false;
+  Uint32 sizes[4];
+  Uint32 * copytuple = get_copy_tuple_raw(&opPtrP->m_copy_tuple_location);
+  Tuple_header * dst = get_copy_tuple(copytuple);
+  Tuple_header * org = req_struct->m_tuple_ptr;
+  if (regTabPtr->need_expand(disk))
+  {
+    setup_fixed_part(req_struct, opPtrP, regTabPtr);
+    req_struct->m_tuple_ptr = dst;
+    expand_tuple(req_struct, sizes, org, regTabPtr, disk);
+    shrink_tuple(req_struct, sizes+2, regTabPtr, disk);
+  }
+  else
+  {
+    memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size);
+  }
+  dst->m_header_bits |= Tuple_header::COPY_TUPLE;
+
+  /**
+   * Store original row-id in copytuple[0,1]
+   * Store next-ptr in copytuple[1,2] (set to RNIL/RNIL)
+   *
+   */
+  assert(sizeof(Local_key) == 8);
+  memcpy(copytuple+0, rowid, sizeof(Local_key));
+
+  Local_key nil;
+  nil.setNull();
+  memcpy(copytuple+2, &nil, sizeof(nil));
+
+  /**
+   * Link it to list
+   */
+  if (regFragPtr->m_lcp_keep_list_tail.isNull())
+  {
+    jam();
+    regFragPtr->m_lcp_keep_list_head = opPtrP->m_copy_tuple_location;
+  }
+  else
+  {
+    jam();
+    Uint32 * tail = get_copy_tuple_raw(&regFragPtr->m_lcp_keep_list_tail);
+    Local_key nextptr;
+    memcpy(&nextptr, tail+2, sizeof(Local_key));
+    ndbassert(nextptr.isNull());
+    nextptr = opPtrP->m_copy_tuple_location;
+    memcpy(tail+2, &nextptr, sizeof(Local_key));
+  }
+  regFragPtr->m_lcp_keep_list_tail = opPtrP->m_copy_tuple_location;
+
+  /**
+   * And finally clear m_copy_tuple_location so that it won't be freed
+   */
+  opPtrP->m_copy_tuple_location.setNull();
+}
+
 #if 0
 static void dump_buf_hex(unsigned char *p, Uint32 bytes)
 {
@@ -786,7 +848,7 @@ skip_disk:
 	ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
       }
       dealloc_tuple(signal, gci_hi, page.p, tuple_ptr,
-		    regOperPtr.p, regFragPtr.p, regTabPtr.p); 
+		    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
     }
   } 
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-07 06:17:02 +0000
@@ -635,6 +635,17 @@ void Dbtup::execTUPKEYREQ(Signal* signal
      goto do_insert;
    }
 
+   if (unlikely(isCopyTuple(pageid, pageidx)))
+   {
+     /**
+      * Only LCP reads a copy-tuple "directly"
+      */
+     ndbassert(Roptype == ZREAD);
+     ndbassert(disk_page == RNIL);
+     setup_lcp_read_copy_tuple(&req_struct, regOperPtr, regFragPtr, regTabPtr);
+     goto do_read;
+   }
+
    /**
     * Get pointer to tuple
     */
@@ -652,6 +663,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal
      if (setup_read(&req_struct, regOperPtr, regFragPtr, regTabPtr, 
 		    disk_page != RNIL))
      {
+   do_read:
        if(handleReadReq(signal, regOperPtr, regTabPtr, &req_struct) != -1) 
        {
 	 req_struct.log_size= 0;
@@ -847,6 +859,44 @@ Dbtup::setup_fixed_part(KeyReqStruct* re
   req_struct->attr_descr= tab_descr; 
 }
 
+void
+Dbtup::setup_lcp_read_copy_tuple(KeyReqStruct* req_struct,
+                                 Operationrec* regOperPtr,
+                                 Fragrecord* regFragPtr,
+                                 Tablerec* regTabPtr)
+{
+  Local_key tmp;
+  tmp.m_page_no = req_struct->frag_page_id;
+  tmp.m_page_idx = regOperPtr->m_tuple_location.m_page_idx;
+  clearCopyTuple(tmp.m_page_no, tmp.m_page_idx);
+
+  Uint32 * copytuple = get_copy_tuple_raw(&tmp);
+  Local_key rowid;
+  memcpy(&rowid, copytuple+0, sizeof(Local_key));
+
+  req_struct->frag_page_id = rowid.m_page_no;
+  regOperPtr->m_tuple_location.m_page_idx = rowid.m_page_idx;
+
+  Tuple_header * th = get_copy_tuple(copytuple);
+  req_struct->m_page_ptr.setNull();
+  req_struct->m_tuple_ptr = (Tuple_header*)th;
+  th->m_operation_ptr_i = RNIL;
+  ndbassert((th->m_header_bits & Tuple_header::COPY_TUPLE) != 0);
+
+  Uint32 num_attr= regTabPtr->m_no_of_attributes;
+  Uint32 descr_start= regTabPtr->tabDescriptor;
+  TableDescriptor *tab_descr= &tableDescriptor[descr_start];
+  ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+  req_struct->attr_descr= tab_descr;
+
+  bool disk = false;
+  if (regTabPtr->need_expand(disk))
+  {
+    jam();
+    prepare_read(req_struct, regTabPtr, disk);
+  }
+}
+
  /* ---------------------------------------------------------------- */
  /* ------------------------ CONFIRM REQUEST ----------------------- */
  /* ---------------------------------------------------------------- */
@@ -1904,6 +1954,13 @@ int Dbtup::handleDeleteReq(Signal* signa
                            KeyReqStruct *req_struct,
 			   bool disk)
 {
+  Tuple_header* dst = alloc_copy_tuple(regTabPtr,
+                                       &regOperPtr->m_copy_tuple_location);
+  if (dst == 0) {
+    terrorCode = ZMEM_NOMEM_ERROR;
+    goto error;
+  }
+
   // delete must set but not increment tupVersion
   if (!regOperPtr->is_first_operation())
   {
@@ -1911,24 +1968,25 @@ int Dbtup::handleDeleteReq(Signal* signa
     regOperPtr->tupVersion= prevOp->tupVersion;
     // make copy since previous op is committed before this one
     const Tuple_header* org = get_copy_tuple(&prevOp->m_copy_tuple_location);
-    Tuple_header* dst = alloc_copy_tuple(regTabPtr,
-                                         &regOperPtr->m_copy_tuple_location);
-    if (dst == 0) {
-      terrorCode = ZMEM_NOMEM_ERROR;
-      goto error;
-    }
-    Uint32 len = regTabPtr->total_rec_size - 
-      Uint32(((Uint32*)dst) - 
+    Uint32 len = regTabPtr->total_rec_size -
+      Uint32(((Uint32*)dst) -
              get_copy_tuple_raw(&regOperPtr->m_copy_tuple_location));
     memcpy(dst, org, 4 * len);
     req_struct->m_tuple_ptr = dst;
-    set_change_mask_info(regTabPtr, get_change_mask_ptr(regTabPtr, dst));
   }
-  else 
+  else
   {
     regOperPtr->tupVersion= req_struct->m_tuple_ptr->get_tuple_version();
+    if (regTabPtr->m_no_of_disk_attributes)
+    {
+      dst->m_header_bits = req_struct->m_tuple_ptr->m_header_bits;
+      memcpy(dst->get_disk_ref_ptr(regTabPtr),
+	     req_struct->m_tuple_ptr->get_disk_ref_ptr(regTabPtr),
+             sizeof(Local_key));
+    }
   }
   req_struct->changeMask.set();
+  set_change_mask_info(regTabPtr, get_change_mask_ptr(regTabPtr, dst));
 
   if(disk && regOperPtr->m_undo_buffer_space == 0)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-05-07 06:17:02 +0000
@@ -670,7 +670,6 @@ void Dbtup::initializeDefaultValuesFrag(
   DefaultValuesFragment.p->fragStatus = Fragrecord::FS_ONLINE;
   DefaultValuesFragment.p->m_undo_complete= false;
   DefaultValuesFragment.p->m_lcp_scan_op = RNIL;
-  DefaultValuesFragment.p->m_lcp_keep_list = RNIL;
   DefaultValuesFragment.p->noOfPages = 0;
   DefaultValuesFragment.p->noOfVarPages = 0;
   DefaultValuesFragment.p->m_max_page_no = 0;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-04-18 15:36:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-05-07 06:17:02 +0000
@@ -703,7 +703,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signa
   regFragPtr.p->m_tablespace_id= tablespace_id;
   regFragPtr.p->m_undo_complete= false;
   regFragPtr.p->m_lcp_scan_op = RNIL;
-  regFragPtr.p->m_lcp_keep_list = RNIL;
+  regFragPtr.p->m_lcp_keep_list_head.setNull();
+  regFragPtr.p->m_lcp_keep_list_tail.setNull();
   regFragPtr.p->noOfPages = 0;
   regFragPtr.p->noOfVarPages = 0;
   regFragPtr.p->m_max_page_no = 0;
@@ -1573,6 +1574,8 @@ Dbtup::computeTableMetaData(Tablerec *re
   /* Room for changemask */
   total_rec_size += 1 + ((regTabPtr->m_no_of_attributes + 31) >> 5);
 
+  total_rec_size += COPY_TUPLE_HEADER32;
+
   regTabPtr->total_rec_size= total_rec_size;
 
   setUpQueryRoutines(regTabPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp	2011-05-07 06:17:02 +0000
@@ -287,9 +287,8 @@ Dbtup::execACC_CHECK_SCAN(Signal* signal
   }
 
   const bool lcp = (scan.m_bits & ScanOp::SCAN_LCP);
-  Uint32 lcp_list = fragPtr.p->m_lcp_keep_list;
 
-  if (lcp && lcp_list != RNIL)
+  if (lcp && ! fragPtr.p->m_lcp_keep_list_head.isNull())
   {
     jam();
     /**
@@ -297,7 +296,7 @@ Dbtup::execACC_CHECK_SCAN(Signal* signal
      *   So that scan state is not alterer
      *   if lcp_keep rows are found in ScanOp::First
      */
-    handle_lcp_keep(signal, fragPtr.p, scanPtr.p, lcp_list);
+    handle_lcp_keep(signal, fragPtr.p, scanPtr.p);
     return;
   }
 
@@ -692,19 +691,18 @@ Dbtup::scanNext(Signal* signal, ScanOpPt
  
   const bool mm = (bits & ScanOp::SCAN_DD);
   const bool lcp = (bits & ScanOp::SCAN_LCP);
-  
-  Uint32 lcp_list = fragPtr.p->m_lcp_keep_list;
+
   const Uint32 size = ((bits & ScanOp::SCAN_VS) == 0) ?
     table.m_offsets[mm].m_fix_header_size : 1;
   const Uint32 first = ((bits & ScanOp::SCAN_VS) == 0) ? 0 : 1;
 
-  if (lcp && lcp_list != RNIL)
+  if (lcp && ! fragPtr.p->m_lcp_keep_list_head.isNull())
   {
     jam();
     /**
      * Handle lcp keep list here to, due to scanCont
      */
-    handle_lcp_keep(signal, fragPtr.p, scanPtr.p, lcp_list);
+    handle_lcp_keep(signal, fragPtr.p, scanPtr.p);
     return false;
   }
 
@@ -1130,57 +1128,40 @@ Dbtup::scanNext(Signal* signal, ScanOpPt
 void
 Dbtup::handle_lcp_keep(Signal* signal,
                        Fragrecord* fragPtrP,
-                       ScanOp* scanPtrP,
-                       Uint32 lcp_list)
+                       ScanOp* scanPtrP)
 {
   TablerecPtr tablePtr;
   tablePtr.i = scanPtrP->m_tableId;
   ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
 
-  Local_key tmp;
-  tmp.assref(lcp_list);
-  tmp.m_page_no = getRealpid(fragPtrP, tmp.m_page_no);
-  
-  Ptr<Page> pagePtr;
-  c_page_pool.getPtr(pagePtr, tmp.m_page_no);
-  Tuple_header* ptr = (Tuple_header*)
-    ((Fix_page*)pagePtr.p)->get_ptr(tmp.m_page_idx, 0);
-  Uint32 headerbits = ptr->m_header_bits;
-  ndbrequire(headerbits & Tuple_header::LCP_KEEP);
-  
-  Uint32 next = ptr->m_operation_ptr_i;
-  ptr->m_operation_ptr_i = RNIL;
-  ptr->m_header_bits = headerbits & ~(Uint32)Tuple_header::FREE;
-  
-  if (tablePtr.p->m_bits & Tablerec::TR_Checksum) {
+  ndbassert(!fragPtrP->m_lcp_keep_list_head.isNull());
+  Local_key tmp = fragPtrP->m_lcp_keep_list_head;
+  Uint32 * copytuple = get_copy_tuple_raw(&tmp);
+  memcpy(&fragPtrP->m_lcp_keep_list_head,
+         copytuple+2,
+         sizeof(Local_key));
+
+  if (fragPtrP->m_lcp_keep_list_head.isNull())
+  {
     jam();
-    setChecksum(ptr, tablePtr.p);
+    ndbassert(tmp.m_page_no == fragPtrP->m_lcp_keep_list_tail.m_page_no);
+    ndbassert(tmp.m_page_idx == fragPtrP->m_lcp_keep_list_tail.m_page_idx);
+    fragPtrP->m_lcp_keep_list_tail.setNull();
   }
 
+  Local_key save = tmp;
+  setCopyTuple(tmp.m_page_no, tmp.m_page_idx);
   NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
   conf->scanPtr = scanPtrP->m_userPtr;
   conf->accOperationPtr = (Uint32)-1;
   conf->fragId = fragPtrP->fragmentId;
-  conf->localKey[0] = Local_key::ref2page_id(lcp_list);
-  conf->localKey[1] = Local_key::ref2page_idx(lcp_list);
+  conf->localKey[0] = tmp.m_page_no;
+  conf->localKey[1] = tmp.m_page_idx;
   conf->gci = 0;
   Uint32 blockNo = refToMain(scanPtrP->m_userRef);
   EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
-  
-  fragPtrP->m_lcp_keep_list = next;
-  ptr->m_header_bits |= Tuple_header::FREED; // RESTORE free flag
-  if (headerbits & Tuple_header::FREED)
-  {
-    if (tablePtr.p->m_attributes[MM].m_no_of_varsize +
-        tablePtr.p->m_attributes[MM].m_no_of_dynamic)
-    {
-      jam();
-      free_var_rec(fragPtrP, tablePtr.p, &tmp, pagePtr);
-    } else {
-      jam();
-      free_fix_rec(fragPtrP, tablePtr.p, &tmp, (Fix_page*)pagePtr.p);
-    }
-  }
+
+  c_undo_buffer.free_copy_tuple(&save);
 }
 
 void
@@ -1320,4 +1301,7 @@ Dbtup::execLCP_FRAG_ORD(Signal* signal)
   new (scanPtr.p) ScanOp;
   scanPtr.p->m_fragPtrI = fragPtr.i;
   scanPtr.p->m_state = ScanOp::First;
+
+  ndbassert(frag.m_lcp_keep_list_head.isNull());
+  ndbassert(frag.m_lcp_keep_list_tail.isNull());
 }

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-05-04 11:45:33 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-05-07 06:17:02 +0000
@@ -187,7 +187,7 @@ ErrorBundle ErrorCodes[] = {
   { 805,  DMEC, TR, "Out of attrinfo records in tuple manager" },
   { 830,  DMEC, TR, "Out of add fragment operation records" },
   { 873,  DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
-  { 899,  DMEC, TR, "Rowid already allocated" },
+  { 899,  DMEC, IE, "Internal error: rowid already allocated" },
   { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
   { 1218, DMEC, TR, "Send Buffers overloaded in NDB kernel" },
   { 1220, DMEC, TR, "REDO log files overloaded (increase FragmentLogFileSize)" },

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-07 06:17:02 +0000
@@ -2267,6 +2267,154 @@ runBug59496_case2(NDBT_Context* ctx, NDB
   return NDBT_OK;
 }
 
+#define CHK_RET_FAILED(x) if (!(x)) { ndbout_c("Failed on line: %u", __LINE__); return NDBT_FAILED; }
+
+int
+runTest899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+
+  const int rows = ctx->getNumRecords();
+  const int loops = ctx->getNumLoops();
+  const int batch = ctx->getProperty("Batch", Uint32(50));
+  const int until_stopped = ctx->getProperty("UntilStopped");
+
+  const NdbRecord * pRowRecord = pTab->getDefaultRecord();
+  CHK_RET_FAILED(pRowRecord != 0);
+
+  const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord);
+  Uint8 * pRow = new Uint8[len];
+
+  int count_ok = 0;
+  int count_failed = 0;
+  int count_899 = 0;
+  for (int i = 0; i < loops || (until_stopped && !ctx->isTestStopped()); i++)
+  {
+    ndbout_c("loop: %d",i);
+    int result = 0;
+    for (int rowNo = 0; rowNo < rows;)
+    {
+      NdbTransaction* pTrans = pNdb->startTransaction();
+      CHK_RET_FAILED(pTrans != 0);
+
+      for (int b = 0; rowNo < rows && b < batch; rowNo++, b++)
+      {
+        bzero(pRow, len);
+
+        HugoCalculator calc(* pTab);
+
+        NdbOperation::OperationOptions opts;
+        bzero(&opts, sizeof(opts));
+
+        const NdbOperation* pOp = 0;
+        switch(i % 2){
+        case 0:
+          calc.setValues(pRow, pRowRecord, rowNo, rand());
+          pOp = pTrans->writeTuple(pRowRecord, (char*)pRow,
+                                   pRowRecord, (char*)pRow,
+                                   0,
+                                   &opts,
+                                   sizeof(opts));
+          result = pTrans->execute(NoCommit);
+          break;
+        case 1:
+          calc.setValues(pRow, pRowRecord, rowNo, rand());
+          pOp = pTrans->deleteTuple(pRowRecord, (char*)pRow,
+                                    pRowRecord, (char*)pRow,
+                                    0,
+                                    &opts,
+                                    sizeof(opts));
+          result = pTrans->execute(NoCommit, AO_IgnoreError);
+          break;
+        }
+
+        CHK_RET_FAILED(pOp != 0);
+
+        if (result != 0)
+        {
+          goto found_error;
+        }
+      }
+      result = pTrans->execute(Commit);
+
+      if (result != 0)
+      {
+    found_error:
+        count_failed++;
+        NdbError err = pTrans->getNdbError();
+        if (! (err.status == NdbError::TemporaryError ||
+               err.classification == NdbError::NoDataFound ||
+               err.classification == NdbError::ConstraintViolation))
+        {
+          ndbout << err << endl;
+        }
+        CHK_RET_FAILED(err.status == NdbError::TemporaryError ||
+                       err.classification == NdbError::NoDataFound ||
+                       err.classification == NdbError::ConstraintViolation);
+        if (err.code == 899)
+        {
+          count_899++;
+          ndbout << err << endl;
+        }
+      }
+      else
+      {
+        count_ok++;
+      }
+      pTrans->close();
+    }
+  }
+
+  ndbout_c("count_ok: %d count_failed: %d (899: %d)",
+           count_ok, count_failed, count_899);
+  delete [] pRow;
+
+  return count_899 == 0 ? NDBT_OK : NDBT_FAILED;
+}
+
+int
+runInit899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter restarter;
+  int val = DumpStateOrd::DihMinTimeBetweenLCP;
+  restarter.dumpStateAllNodes(&val, 1);
+
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+  const NdbDictionary::Table * pTab2 = pNdb->getDictionary()->
+    getTable(pTab->getName());
+
+  int tableId = pTab2->getObjectId();
+  int val2[] = { DumpStateOrd::BackupErrorInsert, 10042, tableId };
+
+  for (int i = 0; i < restarter.getNumDbNodes(); i++)
+  {
+    if (i & 1)
+    {
+      int nodeId = restarter.getDbNodeId(i);
+      ndbout_c("Setting slow LCP of table %d on node %d",
+               tableId, nodeId);
+      restarter.dumpStateOneNode(nodeId, val2, 3);
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int
+runEnd899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  // reset LCP speed
+  NdbRestarter restarter;
+  int val[] = { DumpStateOrd::DihMinTimeBetweenLCP, 0 };
+  restarter.dumpStateAllNodes(val, 2);
+
+  restarter.insertErrorInAllNodes(0);
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testBasic);
 TESTCASE("PkInsert", 
 	 "Verify that we can insert and delete from this table using PK"
@@ -2618,6 +2766,13 @@ TESTCASE("Bug59496_case2", "")
   STEP(runBug59496_case2);
   STEPS(runBug59496_scan, 10);
 }
+TESTCASE("899", "")
+{
+  INITIALIZER(runLoadTable);
+  INITIALIZER(runInit899);
+  STEP(runTest899);
+  FINALIZER(runEnd899);
+}
 NDBT_TESTSUITE_END(testBasic);
 
 #if 0

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-03 06:21:59 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-07 06:17:02 +0000
@@ -32,6 +32,10 @@ max-time: 900
 cmd: testIndex
 args: -n NF_Mixed T1 T6 T13
 
+max-time: 900
+cmd: testBasic
+args: -r 5000 -n 899 T15 D1 D2
+
 max-time: 600
 cmd: atrt-testBackup
 args: -n NFMaster T1

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.1 branch (jonas:4181 to 4182) jonas oreland7 May