List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:May 11 2011 1:38pm
Subject:bzr push into mysql-5.1-telco-7.0-spj-scan-vs-scan branch
(ole.john.aske:3492 to 3493)
View as plain text  
 3493 Ole John Aske	2011-05-11 [merge]
      Merge telco-7.0 -> SPJ

    removed:
      mysql-test/suite/ndb/include/add_six_nodes.inc
      mysql-test/suite/ndb/include/add_two_nodes.inc
      mysql-test/suite/ndb/include/reload_ndb_mgmd.inc
      mysql-test/suite/ndb/r/add_node01.result
      mysql-test/suite/ndb/r/add_node02.result
      mysql-test/suite/ndb/r/add_node03.result
      mysql-test/suite/ndb/t/add_node01.test
      mysql-test/suite/ndb/t/add_node02.test
      mysql-test/suite/ndb/t/add_node03.test
    added:
      mysql-test/suite/ndb/data/
    renamed:
      mysql-test/suite/ndb/std_data/ => mysql-test/suite/ndb/backups/
      mysql-test/suite/ndb/std_data/ndb_backup50/ => mysql-test/suite/ndb/backups/50/
      mysql-test/suite/ndb/std_data/ndb_backup51/ => mysql-test/suite/ndb/backups/51/
      mysql-test/suite/ndb/std_data/ndb_backup51_d2_be/ => mysql-test/suite/ndb/backups/51_d2_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_d2_le/ => mysql-test/suite/ndb/backups/51_d2_le/
      mysql-test/suite/ndb/std_data/ndb_backup51_data_be/ => mysql-test/suite/ndb/backups/51_data_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_data_le/ => mysql-test/suite/ndb/backups/51_data_le/
      mysql-test/suite/ndb/std_data/ndb_backup51_dd/ => mysql-test/suite/ndb/backups/51_dd/
      mysql-test/suite/ndb/std_data/ndb_backup51_undolog_be/ => mysql-test/suite/ndb/backups/51_undolog_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_undolog_le/ => mysql-test/suite/ndb/backups/51_undolog_le/
      mysql-test/suite/ndb/std_data/ndb_backup_before_native_default/ => mysql-test/suite/ndb/backups/before_native_default/
      mysql-test/suite/ndb/std_data/ndb_backup_bug54613/ => mysql-test/suite/ndb/backups/bug54613/
      mysql-test/suite/ndb/std_data/ndb_backup_hashmap/ => mysql-test/suite/ndb/backups/hashmap/
      mysql-test/suite/ndb/std_data/ndb_backup_packed/ => mysql-test/suite/ndb/backups/packed/
      mysql-test/suite/ndb/std_data/table_data10000.dat => mysql-test/suite/ndb/data/table_data10000.dat
      mysql-test/suite/ndb/std_data/table_data100000.dat => mysql-test/suite/ndb/data/table_data100000.dat
    modified:
      mysql-test/Makefile.am
      mysql-test/lib/My/SysInfo.pm
      mysql-test/mysql-test-run.pl
      mysql-test/suite/ndb/t/disabled.def
      mysql-test/suite/ndb/t/ndb_addnode.test
      mysql-test/suite/ndb/t/ndb_alter_table_backup.test
      mysql-test/suite/ndb/t/ndb_dd_restore_compat.test
      mysql-test/suite/ndb/t/ndb_native_default_support.test
      mysql-test/suite/ndb/t/ndb_restore_compat_downward.test
      mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test
      mysql-test/suite/ndb/t/ndb_restore_misc.test
      mysql-test/suite/ndb/t/ndb_restore_undolog.test
      mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test
      storage/ndb/CMakeLists.txt
      storage/ndb/include/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
      storage/ndb/include/ndbapi/NdbReceiver.hpp
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/portlib/Makefile.am
      storage/ndb/src/common/portlib/NdbTCP.cpp
      storage/ndb/src/kernel/blocks/backup/Backup.cpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbReceiver.cpp
      storage/ndb/src/ndbapi/NdbScanOperation.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 3492 Ole John Aske	2011-05-05 [merge]
      merge telco-7.0 -> SPJ

    modified:
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
=== modified file 'mysql-test/Makefile.am'
--- a/mysql-test/Makefile.am	2011-04-08 13:59:44 +0000
+++ b/mysql-test/Makefile.am	2011-05-06 18:01:43 +0000
@@ -75,19 +75,19 @@ EXTRA_DIST =	README \
 # List of directories containing test + result files and the
 # related test data files that should be copied
 TEST_DIRS = t r include std_data std_data/parts collections \
-	suite/ndb/std_data/ndb_backup50 \
-	suite/ndb/std_data/ndb_backup51 \
-	suite/ndb/std_data/ndb_backup51_data_be \
-	suite/ndb/std_data/ndb_backup51_data_le \
-	suite/ndb/std_data/ndb_backup51_dd \
-	suite/ndb/std_data/ndb_backup_packed \
-	suite/ndb/std_data/ndb_backup51_d2_be \
-	suite/ndb/std_data/ndb_backup51_d2_le \
-	suite/ndb/std_data/ndb_backup51_undolog_be \
-	suite/ndb/std_data/ndb_backup51_undolog_le \
-	suite/ndb/std_data/ndb_backup_hashmap \
-	suite/ndb/std_data/ndb_backup_before_native_default \
-	suite/ndb/std_data/ndb_backup_bug54613 \
+	suite/ndb/backups/50 \
+	suite/ndb/backups/51 \
+	suite/ndb/backups/51_data_be \
+	suite/ndb/backups/51_data_le \
+	suite/ndb/backups/51_dd \
+	suite/ndb/backups/packed \
+	suite/ndb/backups/51_d2_be \
+	suite/ndb/backups/51_d2_le \
+	suite/ndb/backups/51_undolog_be \
+	suite/ndb/backups/51_undolog_le \
+	suite/ndb/backups/hashmap \
+	suite/ndb/backups/before_native_default \
+	suite/ndb/backups/bug54613 \
 	std_data/funcs_1 \
 	extra/binlog_tests/ extra/rpl_tests \
 	suite/binlog suite/binlog/t suite/binlog/r suite/binlog/std_data \
@@ -105,7 +105,7 @@ TEST_DIRS = t r include std_data std_dat
 	suite/rpl suite/rpl/data suite/rpl/include suite/rpl/r \
 	suite/rpl/t \
 	suite/stress/include suite/stress/t suite/stress/r \
-	suite/ndb suite/ndb/t suite/ndb/r suite/ndb/include suite/ndb/std_data \
+	suite/ndb suite/ndb/t suite/ndb/r suite/ndb/include suite/ndb/data \
 	suite/ndb_big \
 	suite/ndb_binlog suite/ndb_binlog/t suite/ndb_binlog/r \
 	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \

=== modified file 'mysql-test/lib/My/SysInfo.pm'
--- a/mysql-test/lib/My/SysInfo.pm	2011-04-08 12:48:50 +0000
+++ b/mysql-test/lib/My/SysInfo.pm	2011-05-06 08:05:58 +0000
@@ -168,6 +168,28 @@ sub num_cpus {
 }
 
 
+# Return the number of cores found
+#  - if there is a "core_id" attribute in the
+#    cpuinfo, use it to filter out only the count of
+#    cores, else return count of cpus 
+sub num_cores {
+  my ($self)= @_;
+  
+  my $cores = 0;
+  my %seen = (); # Hash with the core id's already seen 
+  foreach my $cpu (@{$self->{cpus}}) {
+    my $core_id = $cpu->{core_id};
+    
+    next if (defined $core_id and $seen{$core_id}++);
+ 
+    # Unknown core id or not seen this core before, count it
+    $cores++;
+  }
+  return $cores or
+    confess "INTERNAL ERROR: No cores!";
+}
+
+
 # Return the smallest bogomips value amongst the processors
 sub min_bogomips {
   my ($self)= @_;

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2011-04-27 06:52:34 +0000
+++ b/mysql-test/mysql-test-run.pl	2011-05-06 08:05:58 +0000
@@ -382,14 +382,11 @@ sub main {
     # Try to find a suitable value for number of workers
     my $sys_info= My::SysInfo->new();
 
-    $opt_parallel= $sys_info->num_cpus();
-    print "num_cpus: $opt_parallel, min_bogomips: " .
-      $sys_info->min_bogomips(). "\n";
+    $opt_parallel= $sys_info->num_cores();
     for my $limit (2000, 1500, 1000, 500){
       $opt_parallel-- if ($sys_info->min_bogomips() < $limit);
     }
     my $max_par= $ENV{MTR_MAX_PARALLEL} || 8;
-    print "max_par: $max_par\n";
     $opt_parallel= $max_par if ($opt_parallel > $max_par);
     $opt_parallel= $num_tests if ($opt_parallel > $num_tests);
     $opt_parallel= 1 if (IS_WINDOWS and $sys_info->isvm());

=== renamed directory 'mysql-test/suite/ndb/std_data' => 'mysql-test/suite/ndb/backups'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup50' => 'mysql-test/suite/ndb/backups/50'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51' => 'mysql-test/suite/ndb/backups/51'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_d2_be' => 'mysql-test/suite/ndb/backups/51_d2_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_d2_le' => 'mysql-test/suite/ndb/backups/51_d2_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_data_be' => 'mysql-test/suite/ndb/backups/51_data_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_data_le' => 'mysql-test/suite/ndb/backups/51_data_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_dd' => 'mysql-test/suite/ndb/backups/51_dd'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_undolog_be' => 'mysql-test/suite/ndb/backups/51_undolog_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_undolog_le' => 'mysql-test/suite/ndb/backups/51_undolog_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_before_native_default' => 'mysql-test/suite/ndb/backups/before_native_default'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_bug54613' => 'mysql-test/suite/ndb/backups/bug54613'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_hashmap' => 'mysql-test/suite/ndb/backups/hashmap'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_packed' => 'mysql-test/suite/ndb/backups/packed'
=== added directory 'mysql-test/suite/ndb/data'
=== renamed file 'mysql-test/suite/ndb/std_data/table_data10000.dat' => 'mysql-test/suite/ndb/data/table_data10000.dat'
=== renamed file 'mysql-test/suite/ndb/std_data/table_data100000.dat' => 'mysql-test/suite/ndb/data/table_data100000.dat'
=== removed file 'mysql-test/suite/ndb/include/add_six_nodes.inc'
--- a/mysql-test/suite/ndb/include/add_six_nodes.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/add_six_nodes.inc	1970-01-01 00:00:00 +0000
@@ -1,64 +0,0 @@
---perl
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $file ="$vardir/my.cnf";
-my $file_new = "$vardir/my.cnf.new";
-
-open (IN, "$file") || die $!;
-open (OUT, ">$file_new") || die $!;
-
-while ($_ = <IN> ) {
-  if ($_ =~ /ndbd=localhost,localhost/i) 
-  {
-    # Replace text, all instances on a line (/g), case insensitive (/i)
-    $_ =~ s/ndbd=localhost,localhost/ndbd=localhost,localhost,localhost,localhost,localhost,localhost,localhost,localhost/gi;
-  }
-  print OUT "$_";
-  if ($_=~ /cluster_config.ndb_mgmd.1.1/i) 
-  {
-    print OUT "NodeId=3\n";
-  }
-}
-
-close IN;
-close OUT;
-
-open (OUT, ">>$file_new") || die $!;
-print OUT "[cluster_config.ndbd.3.1]\n";
-print OUT "Id=40\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.4.1]\n";
-print OUT "Id=41\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.5.1]\n";
-print OUT "Id=42\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.6.1]\n";
-print OUT "Id=43\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.7.1]\n";
-print OUT "Id=44\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.8.1]\n";
-print OUT "Id=45\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-
-close OUT;
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/include/add_two_nodes.inc'
--- a/mysql-test/suite/ndb/include/add_two_nodes.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/add_two_nodes.inc	1970-01-01 00:00:00 +0000
@@ -1,39 +0,0 @@
---perl
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $file ="$vardir/my.cnf";
-my $file_new = "$vardir/my.cnf.new";
-
-open (IN, "$file") || die $!;
-open (OUT, ">$file_new") || die $!;
-
-while ($_ = <IN> ) {
-  if ($_ =~ /ndbd=localhost,localhost/i) 
-  {
-    # Replace text, all instances on a line (/g), case insensitive (/i)
-    $_ =~ s/ndbd=localhost,localhost/ndbd=localhost,localhost,localhost,localhost/gi;
-  }
-  print OUT "$_";
-  if ($_=~ /cluster_config.ndb_mgmd.1.1/i) 
-  {
-    print OUT "NodeId=3\n";
-  }
-}
-
-close IN;
-close OUT;
-
-open (OUT, ">>$file_new") || die $!;
-print OUT "[cluster_config.ndbd.3.1]\n";
-print OUT "Id=40\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.4.1]\n";
-print OUT "Id=41\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-close OUT;
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/include/reload_ndb_mgmd.inc'
--- a/mysql-test/suite/ndb/include/reload_ndb_mgmd.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/reload_ndb_mgmd.inc	1970-01-01 00:00:00 +0000
@@ -1,37 +0,0 @@
---perl
-
-use strict;
-use IO::Socket::INET;
-
-use lib "lib/";
-use My::Config;
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $config= My::Config->new("$vardir/my.cnf");
-my $mgmd = $config->group("cluster_config.ndb_mgmd.1.1");
-my $server_port = $mgmd->value("PortNumber");
-#print "server_port: $server_port\n";
-
-my $server = new IO::Socket::INET
-(
- PeerAddr => 'localhost',
- PeerPort => $server_port,
- Proto    => 'tcp'
-);
-
-print $server "reload config\n";
-print $server "mycnf: 1\n";
-print $server "\n";
-
-my $result = "unkown error";
-while(my $line= <$server>){
-  if ($line =~ /result: (.*)/)
-  {
-    $result = $1;
-  }
-  last if ($line eq "\n");
-}
-die "reload failed, result: '$result'"
-    unless $result eq "Ok";
-
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/r/add_node01.result'
--- a/mysql-test/suite/ndb/r/add_node01.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node01.result	1970-01-01 00:00:00 +0000
@@ -1,238 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-drop database if exists DB1;
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-use DB1;
-create table new_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table new_table2(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-insert into new_table1(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-insert into new_table2(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-
-## ndb_mgm dump shows old data resides only on "old" nodes and new data resides on all nodes in cluster log 
-alter online table old_table1 reorganize partition;
-alter online table old_table2 reorganize partition;
-alter online table old_table3 reorganize partition;
-alter online table old_table4 reorganize partition;
-
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_1.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	EXTRA
-lg_1	UNDO LOG	CLUSTER_NODE=1;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=2;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=40;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=41;UNDO_BUFFER_SIZE=2097152
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_2.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	EXTRA
-lg_1	UNDO LOG	CLUSTER_NODE=1;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=2;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=40;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=41;UNDO_BUFFER_SIZE=2097152
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_1.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	TABLESPACE_NAME	EXTRA
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=1
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=2
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=40
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=41
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_2.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	TABLESPACE_NAME	EXTRA
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=1
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=2
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=40
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=41
-
-## Drop nodegroup with "new" nodes is not allowed with data one those nodes
-## Nodegroup with "new" nodes still exist after dropping it as shown:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-show databases;
-Database
-information_schema
-DB1
-mtr
-mysql
-test
-drop table old_table1,old_table2,old_table3,old_table4,new_table1,new_table2;
-drop database DB1;
-show databases;
-Database
-information_schema
-mtr
-mysql
-test
-
-## Drop nodegroup with "new" nodes
-## Nodegroup with "new" nodes still exists after dropping it as shown:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, no nodegroup)
-id=41	@127.0.0.1  (mysql ndb, no nodegroup)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-
-drop LOGFILE GROUP lg_1 ENGINE NDB;

=== removed file 'mysql-test/suite/ndb/r/add_node02.result'
--- a/mysql-test/suite/ndb/r/add_node02.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node02.result	1970-01-01 00:00:00 +0000
@@ -1,143 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-drop database if exists DB1;
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-## Cluster running after adding two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-######################################################
-######################################################
-CREATE TEMPORARY TABLE test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
-
-LOAD DATA INFILE 'DUMP_FILE' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
-
-DROP TABLE test.backup_info;
-
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-drop database DB1;

=== removed file 'mysql-test/suite/ndb/r/add_node03.result'
--- a/mysql-test/suite/ndb/r/add_node03.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node03.result	1970-01-01 00:00:00 +0000
@@ -1,76 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-## Add six nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-## Cluster running after adding six ndbd nodes:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	8 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=42	@127.0.0.1  (mysql ndb, Nodegroup: 2)
-id=43	@127.0.0.1  (mysql ndb, Nodegroup: 2)
-id=44	@127.0.0.1  (mysql ndb, Nodegroup: 3)
-id=45	@127.0.0.1  (mysql ndb, Nodegroup: 3)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-

=== removed file 'mysql-test/suite/ndb/t/add_node01.test'
--- a/mysql-test/suite/ndb/t/add_node01.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node01.test	1970-01-01 00:00:00 +0000
@@ -1,150 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
---disable_warnings
-drop database if exists DB1;
---enable_warnings
-
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_two_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
-
-# Cluster running after adding two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-use DB1;
-create table new_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table new_table2(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-insert into new_table1(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-insert into new_table2(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-
-## ndb_mgm dump shows old data resides only on "old" nodes and new data resides on all nodes in cluster log 
---exec $NDB_MGM -e "all dump 18" >> $NDB_TOOLS_OUTPUT
-
-alter online table old_table1 reorganize partition;
-alter online table old_table2 reorganize partition;
-alter online table old_table3 reorganize partition;
-alter online table old_table4 reorganize partition;
-
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_1.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_2.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_1.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_2.dat';
-
-## Drop nodegroup with "new" nodes is not allowed with data one those nodes
---error 255
---exec $NDB_MGM -e "drop nodegroup 1" >> $NDB_TOOLS_OUTPUT
-
-## Nodegroup with "new" nodes still exist after dropping it as shown:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-show databases;
-drop table old_table1,old_table2,old_table3,old_table4,new_table1,new_table2;
-drop database DB1;
-show databases;
-
-## Drop nodegroup with "new" nodes
---exec $NDB_MGM -e "drop nodegroup 1" >> $NDB_TOOLS_OUTPUT
-
-## Nodegroup with "new" nodes still exists after dropping it as shown:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-# Cleanup
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-
-drop LOGFILE GROUP lg_1 ENGINE NDB;

=== removed file 'mysql-test/suite/ndb/t/add_node02.test'
--- a/mysql-test/suite/ndb/t/add_node02.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node02.test	1970-01-01 00:00:00 +0000
@@ -1,124 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/ 
---exec $NDB_MGM -e show
-
---disable_warnings
-drop database if exists DB1;
---enable_warnings
-
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_two_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41 --initial
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
-
-## Cluster running after adding two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
---source include/ndb_backup.inc
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-
---exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
-
-# Cleanup
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-drop database DB1;

=== removed file 'mysql-test/suite/ndb/t/add_node03.test'
--- a/mysql-test/suite/ndb/t/add_node03.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node03.test	1970-01-01 00:00:00 +0000
@@ -1,67 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-## Add six nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_six_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41,42,43,44,45 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41,42,43,44,45 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=42 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=43 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=44 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=45 --initial
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "create nodegroup 42,43" >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "create nodegroup 44,45" >> $NDB_TOOLS_OUTPUT
-
-## Cluster running after adding six ndbd nodes:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-

=== modified file 'mysql-test/suite/ndb/t/disabled.def'
--- a/mysql-test/suite/ndb/t/disabled.def	2010-04-28 10:33:09 +0000
+++ b/mysql-test/suite/ndb/t/disabled.def	2011-05-06 13:40:42 +0000
@@ -16,7 +16,3 @@ ndb_partition_error2 : Bug#40989 ndb_par
 ndb_cache_trans           : Bug#42197 Query cache and autocommit
 ndb_disconnect_ddl        : Bug#31853 flaky testcase...
 
-# the below testcase have detected the bugs that are still open
-add_node01    : disabled waiting for safe_process compatible spawn
-add_node02    : disabled waiting for safe_process compatible spawn
-add_node03    : disabled waiting for safe_process compatible spawn

=== modified file 'mysql-test/suite/ndb/t/ndb_addnode.test'
--- a/mysql-test/suite/ndb/t/ndb_addnode.test	2010-01-27 10:08:37 +0000
+++ b/mysql-test/suite/ndb/t/ndb_addnode.test	2011-05-06 13:48:00 +0000
@@ -20,8 +20,8 @@ create table t1(id int NOT NULL PRIMARY 
 create table t2(id int NOT NULL PRIMARY KEY, data char(8))
 TABLESPACE ts_1 STORAGE DISK engine=ndb;
 
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
+load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
+load data local infile 'suite/ndb/data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
 
 ## Create nodegroup for "new" nodes
 --exec $NDB_MGM -e "create nodegroup 3,4"

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table_backup.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_backup.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_backup.test	2011-05-06 14:11:46 +0000
@@ -7,7 +7,7 @@
 --source include/have_ndb.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 ##############################
 # mix endian restore section #
@@ -22,8 +22,8 @@ DROP TABLE IF EXISTS t1;
 --echo *********************************
 --echo * restore tables w/ new column from little endian
 --echo *********************************
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
 SHOW TABLES;
 SHOW CREATE TABLE t1;
 SELECT * FROM t1 WHERE a = 1 or a = 10 or a = 20 or a = 30 ORDER BY a;
@@ -35,8 +35,8 @@ DROP TABLE t1;
 --echo *********************************
 --echo * restore tables w/ new column from big endian
 --echo *********************************
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
 SHOW TABLES;
 SHOW CREATE TABLE t1;
 SELECT * FROM t1 WHERE a = 1 or a = 10 or a = 20 or a = 30 ORDER BY a;

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_restore_compat.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_restore_compat.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_restore_compat.test	2011-05-06 14:11:46 +0000
@@ -1,10 +1,10 @@
 -- source include/have_ndb.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/ndb_backup51_dd >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51_dd >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/51_dd >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/51_dd >> $NDB_TOOLS_OUTPUT
 
 # (priviliges differ on embedded and server so replace)
 --replace_column 18 #

=== modified file 'mysql-test/suite/ndb/t/ndb_native_default_support.test'
--- a/mysql-test/suite/ndb/t/ndb_native_default_support.test	2010-11-10 13:39:11 +0000
+++ b/mysql-test/suite/ndb/t/ndb_native_default_support.test	2011-05-06 14:11:46 +0000
@@ -8,7 +8,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 --disable_warnings
 DROP TABLE IF EXISTS t1,bit1;
@@ -189,8 +189,8 @@ DROP DATABASE mysqltest;
 --echo * Restore the backup from 6.3 or 6.4, which don't support native default value
 --echo ******************************************************************************
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
 
 ####
 # Bug# 53539 Ndb : MySQLD default values in frm embedded in backup not endian-converted
@@ -572,8 +572,8 @@ SHOW CREATE TABLE t1;
 --let ndb_desc_opts= -d test t1
 --source suite/ndb/include/ndb_desc_print.inc
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -r --promote-attribute --exclude-missing-columns $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r --promote-attribute --exclude-missing-columns $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -r --promote-attribute --exclude-missing-columns $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r --promote-attribute --exclude-missing-columns $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
 
 SELECT i, j, f, d, d2, ch, HEX(b), HEX(vb), HEX(blob1), text1, timestamp_c, newOne, newTwo from t1 order by i;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_compat_downward.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_compat_downward.test	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_compat_downward.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/have_case_sensitive_file_system.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 # This test currently requires case sensitive file system as the tables
 # are originally stored with uppercase
@@ -19,8 +19,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 DROP DATABASE IF EXISTS BANK;
 --enable_warnings
 CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 USE BANK;
 --sorted_result
 SHOW TABLES;
@@ -56,8 +56,8 @@ TRUNCATE ACCOUNT_TYPE;
 --exec $NDB_DESC --no-defaults -d BANK ACCOUNT_TYPE | grep ForceVarPart
 
 # Restore data
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 # Check data
 SELECT * FROM GL            ORDER BY TIME,ACCOUNT_TYPE;
@@ -65,8 +65,8 @@ SELECT * FROM ACCOUNT       ORDER BY ACC
 SELECT COUNT(*) FROM TRANSACTION;
 SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0;
---exec $NDB_RESTORE --no-defaults -b 2 -n 1 -m -p 1 -s -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 2 -n 2 -p 1 -s -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 2 -n 1 -m -p 1 -s -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 2 -n 2 -p 1 -s -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 SELECT * FROM DESCRIPTION ORDER BY USERNAME;
 --exec $NDB_DESC --no-defaults -d BANK DESCRIPTION | grep SHORT_VAR
 --exec $NDB_DESC --no-defaults -d BANK DESCRIPTION | grep MEDIUM_VAR
@@ -78,8 +78,8 @@ DROP TABLE TRANSACTION;
 DROP TABLE SYSTEM_VALUES;
 DROP TABLE ACCOUNT_TYPE;
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -m $backup_data_dir/ndb_backup_packed >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup_packed >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -m $backup_data_dir/packed >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -p 1 -r $backup_data_dir/packed >> $NDB_TOOLS_OUTPUT
 
 SELECT * FROM GL            ORDER BY TIME,ACCOUNT_TYPE;
 SELECT * FROM ACCOUNT       ORDER BY ACCOUNT_ID;
@@ -96,7 +96,7 @@ drop table t1;
 # bug#54613
 
 --error 1
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects $backup_data_dir/ndb_backup_bug54613 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects $backup_data_dir/bug54613 >> $NDB_TOOLS_OUTPUT
 
 --error 0
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects --skip-broken-objects $backup_data_dir/ndb_backup_bug54613 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects --skip-broken-objects $backup_data_dir/bug54613 >> $NDB_TOOLS_OUTPUT

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # Bug #27543 restore of backup from different endian does not work for blob column
@@ -151,8 +151,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 USE test;
 DROP TABLE IF EXISTS t_num,t_datetime,t_string_1,t_string_2,t_gis;
 --enable_warnings
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_data_le >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_data_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_data_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_data_le >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;
@@ -173,8 +173,8 @@ SELECT AsText(t_geometrycollection), AsT
 #
 
 DROP TABLE t_num,t_datetime,t_string_1,t_string_2,t_gis;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_data_be >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_data_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_data_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_data_be >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_misc.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_misc.test	2011-04-11 13:36:12 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_misc.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # Bug #27775 - mediumint auto inc not restored correctly
@@ -484,9 +484,9 @@ source include/ndb_backup_id.inc;
 #
 
 # ensure correct restore of epoch numbers in old versions
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 select epoch from mysql.ndb_apply_status where server_id=0;
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 select epoch from mysql.ndb_apply_status where server_id=0;
 # ensure correct restore of epoch numbers in current version
 # number hould be "big"
@@ -497,12 +497,12 @@ select epoch > (1 << 32) from mysql.ndb_
 #
 # Bug#40428 core dumped when restore backup log file(redo log)
 #
---exec $NDB_RESTORE --print --print_meta -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --print --print_meta -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug #33040 ndb_restore crashes with --print_log
 #
---exec $NDB_RESTORE --print_log -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --print_log -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug#48005 ndb backup / restore does not restore the auto_increment
@@ -554,7 +554,7 @@ drop table ndb_show_tables_results;
 #
 # Bug#51432
 #
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 2 -m $backup_data_dir/ndb_backup_hashmap >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 2 -m $backup_data_dir/hashmap >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug#56285

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_undolog.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_undolog.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_undolog.test	2011-05-06 14:11:46 +0000
@@ -2,7 +2,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # The table structure and data list below
@@ -440,8 +440,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 USE test;
 DROP TABLE IF EXISTS t_num,t_datetime,t_string_1,t_string_2,t_gis,t_string_3,t_string_4,t_string_5;
 --enable_warnings
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_undolog_le >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_undolog_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_undolog_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_undolog_le >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;
@@ -480,8 +480,8 @@ ENGINE =NDB;
 #
 
 USE test;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_undolog_be >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_undolog_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_undolog_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_undolog_be >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test	2011-05-06 14:11:46 +0000
@@ -2,7 +2,7 @@
 -- source include/have_binlog_format_mixed_or_row.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 --disable_warnings
 drop table if exists t1;
@@ -123,8 +123,8 @@ CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE
 --echo #
 --echo # reset, restore and  binlog should _not_ happen
 reset master;
---exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 1 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 1 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 2 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 
 select count(*) from TRANSACTION;
 --source include/show_binlog_events2.inc

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-04-05 06:46:48 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-05-11 13:37:37 +0000
@@ -17,6 +17,41 @@
 SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
     ${CMAKE_SOURCE_DIR}/cmake
     ${CMAKE_SOURCE_DIR}/storage/ndb/cmake)
+    
+# Check if this is MySQL Cluster build i.e the MySQL Server
+# version string ends in -ndb-Y.Y.Y[-status]    
+MACRO(NDB_CHECK_MYSQL_CLUSTER version_string)
+
+  IF(${version_string} MATCHES "(.*)-ndb-(.*)")
+    SET(mysql_version ${CMAKE_MATCH_1})
+    SET(cluster_version ${CMAKE_MATCH_2})
+    
+    MESSAGE(STATUS  "This is MySQL Cluster ${cluster_version}")
+    
+    # Sanity check that the mysql_version matches precalcuated
+    # values from higher level scripts  
+    IF(NOT ${mysql_version} EQUAL "${MYSQL_NO_DASH_VERSION}")
+      MESSAGE(FATAL_ERROR "Sanity check of version_string failed!")
+    ENDIF()
+
+    # Split the cluster_version further into Y.Y.Y subcomponents
+    IF(${cluster_version} MATCHES "([0-9]+)\\.([0-9]+)\\.([0-9]+)")
+      SET(MYSQL_CLUSTER_VERSION_MAJOR ${CMAKE_MATCH_1} CACHE INTERNAL
+        "MySQL Cluster Major version calculated from MySQL version" FORCE)
+      SET(MYSQL_CLUSTER_VERSION_MINOR ${CMAKE_MATCH_2} CACHE INTERNAL
+        "MySQL Cluster Minor version calculated from MySQL version" FORCE)
+      SET(MYSQL_CLUSTER_VERSION_BUILD ${CMAKE_MATCH_3} CACHE INTERNAL
+        "MySQL Cluster Build version calculated from MySQL version" FORCE)
+    ENDIF()
+
+    # Finally set MYSQL_CLUSTER_VERSION to be used as an indicator
+    # that this is a MySQL Cluster build, yay!
+    SET(MYSQL_CLUSTER_VERSION ${cluster_version} CACHE INTERNAL
+      "This is MySQL Cluster" FORCE)
+
+  ENDIF()
+ENDMACRO()
+
 
 # Temporarily remove -Werror from compiler flags until
 # storage/ndb/ can be built with it
@@ -37,6 +72,9 @@ IF(SOURCE_SUBLIBS)
   # NDBCLUSTER_SOURCES and NDBCLUSTER_LIBS, don't configure
   # again
 ELSE()
+
+  NDB_CHECK_MYSQL_CLUSTER(${VERSION})
+
   INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/ndb_configure.cmake)
 
   INCLUDE_DIRECTORIES(
@@ -117,8 +155,16 @@ IF(EXISTS ${CMAKE_SOURCE_DIR}/storage/my
   MYSQL_STORAGE_ENGINE(NDBCLUSTER)
 ELSE()
   # New plugin support, cross-platform
+  
+  # NDB is DEFAULT plugin in MySQL Cluster
+  SET(is_default_plugin "")
+  IF(MYSQL_CLUSTER_VERSION)
+    SET(is_default_plugin "DEFAULT")
+  ENDIF()
+
   MYSQL_ADD_PLUGIN(ndbcluster ${NDBCLUSTER_SOURCES} STORAGE_ENGINE
-    DEFAULT STATIC_ONLY RECOMPILE_FOR_EMBEDDED LINK_LIBRARIES ndbclient)
+    ${is_default_plugin} STATIC_ONLY RECOMPILE_FOR_EMBEDDED
+    LINK_LIBRARIES ndbclient)
 
   IF (NOT MCP_BUG58158)
     IF(WITH_EMBEDDED_SERVER)

=== modified file 'storage/ndb/include/CMakeLists.txt'
--- a/storage/ndb/include/CMakeLists.txt	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/include/CMakeLists.txt	2011-05-03 09:20:34 +0000
@@ -70,6 +70,18 @@ IF(NOT DEFINED NDB_VERSION_MAJOR OR
   MESSAGE(FATAL_ERROR "Couldn't parse version numbers from ndb_configure.m4")
 ENDIF()
 
+IF (DEFINED MYSQL_CLUSTER_VERSION)
+  # This is MySQL Cluster, the MySQL Cluster version must match NDB version
+  IF(NOT MYSQL_CLUSTER_VERSION_MAJOR EQUAL NDB_VERSION_MAJOR OR
+     NOT MYSQL_CLUSTER_VERSION_MINOR EQUAL NDB_VERSION_MINOR OR  
+     NOT MYSQL_CLUSTER_VERSION_BUILD EQUAL NDB_VERSION_BUILD)
+    MESSAGE(STATUS "MYSQL_CLUSTER_VERSION_MAJOR: ${MYSQL_CLUSTER_VERSION_MAJOR}")
+    MESSAGE(STATUS "MYSQL_CLUSTER_VERSION_MINOR: ${MYSQL_CLUSTER_VERSION_MINOR}")
+    MESSAGE(STATUS "MYSQL_CLUSTER_VERSION_BUILD: ${MYSQL_CLUSTER_VERSION_BUILD}")
+    MESSAGE(FATAL_ERROR "MySQL Cluster version does not match NDB version")
+  ENDIF()
+ENDIF()
+
 # Create ndb_version.h
 CONFIGURE_FILE(ndb_version.h.in
                ${CMAKE_CURRENT_SOURCE_DIR}/ndb_version.h

=== modified file 'storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp'
--- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2011-05-07 06:17:02 +0000
@@ -170,6 +170,7 @@ public:
     DumpBackup = 13000,
     DumpBackupSetCompressed = 13001,
     DumpBackupSetCompressedLCP = 13002,
+    BackupErrorInsert = 13003,
 
     DumpDbinfo = 14000,
     DbinfoListTables = 14001,

=== modified file 'storage/ndb/include/ndbapi/NdbReceiver.hpp'
--- a/storage/ndb/include/ndbapi/NdbReceiver.hpp	2011-02-09 14:59:39 +0000
+++ b/storage/ndb/include/ndbapi/NdbReceiver.hpp	2011-05-11 13:37:37 +0000
@@ -118,7 +118,7 @@ private:
   */
   void do_setup_ndbrecord(const NdbRecord *ndb_record, Uint32 batch_size,
                           Uint32 key_size, Uint32 read_range_no,
-                          Uint32 rowsize, char *buf, Uint32 column_count);
+                          Uint32 rowsize, char *buf);
 
   static
   Uint32 ndbrecord_rowsize(const NdbRecord *ndb_record,
@@ -137,33 +137,27 @@ private:
     new NdbRecord style operation.
   */
   bool m_using_ndb_record;
-  union {
-    /* members used for NdbRecAttr operation. */
-    struct {
-      Uint32 m_hidden_count;
-    } m_recattr;
-
-    /* members used for NdbRecord operation. */
-    struct {
-      Uint32 m_column_count;
-      const NdbRecord *m_ndb_record;
-      char *m_row;
-      /* Block of memory used to receive all rows in a batch during scan. */
-      char *m_row_buffer;
-      /*
-        Offsets between two rows in m_row_buffer.
-        This can be different from m_ndb_record->m_row_size, as we sometimes
-        store extra information after each row (range_no and keyinfo).
-        For non-scan operations, this is set to zero.
-      */
-      Uint32 m_row_offset;
-      /*
-        m_read_range_no is true if we are storing the range_no at the end of
-        each row during scans.
-      */
-      bool m_read_range_no;
-    } m_record;
-  };
+
+  /* members used for NdbRecord operation. */
+  struct {
+    const NdbRecord *m_ndb_record;
+    char *m_row;
+    /* Block of memory used to receive all rows in a batch during scan. */
+    char *m_row_buffer;
+    /*
+      Offsets between two rows in m_row_buffer.
+      This can be different from m_ndb_record->m_row_size, as we sometimes
+      store extra information after each row (range_no and keyinfo).
+      For non-scan operations, this is set to zero.
+    */
+    Uint32 m_row_offset;
+    /*
+      m_read_range_no is true if we are storing the range_no at the end of
+      each row during scans.
+    */
+    bool m_read_range_no;
+  } m_record;
+
   class NdbRecAttr* theFirstRecAttr;
   class NdbRecAttr* theCurrentRecAttr;
 
@@ -212,7 +206,6 @@ private:
 
   bool hasResults() const { return m_result_rows > 0; }
   bool nextResult() const { return m_current_row < m_result_rows; }
-  NdbRecAttr* copyout(NdbReceiver&);
   Uint32 receive_packed_recattr(NdbRecAttr**, Uint32 bmlen, 
                                 const Uint32* aDataPtr, Uint32 aLength);
   Uint32 receive_packed_ndbrecord(Uint32 bmlen,

=== modified file 'storage/ndb/src/common/portlib/CMakeLists.txt'
--- a/storage/ndb/src/common/portlib/CMakeLists.txt	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/common/portlib/CMakeLists.txt	2011-05-11 12:23:24 +0000
@@ -37,4 +37,8 @@ SET_TARGET_PROPERTIES(NdbDir-t
                       PROPERTIES COMPILE_FLAGS "-DTEST_NDBDIR")
 TARGET_LINK_LIBRARIES(NdbDir-t ndbportlib)
 
+ADD_EXECUTABLE(NdbGetInAddr-t NdbTCP.cpp)
+SET_TARGET_PROPERTIES(NdbGetInAddr-t
+                      PROPERTIES COMPILE_FLAGS "-DTEST_NDBGETINADDR")
+
 

=== modified file 'storage/ndb/src/common/portlib/Makefile.am'
--- a/storage/ndb/src/common/portlib/Makefile.am	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/portlib/Makefile.am	2011-05-11 12:23:24 +0000
@@ -33,7 +33,7 @@ EXTRA_PROGRAMS = memtest PortLibTest mun
 PortLibTest_SOURCES = NdbPortLibTest.cpp
 munmaptest_SOURCES = munmaptest.cpp
 
-noinst_PROGRAMS = NdbDir-t NdbNuma-t
+noinst_PROGRAMS = NdbDir-t NdbNuma-t NdbGetInAddr-t
 
 NdbDir_t_SOURCES = NdbDir.cpp \
        $(top_srcdir)/storage/ndb/src/common/util/basestring_vsnprintf.c
@@ -46,3 +46,5 @@ NdbNuma_t_SOURCES = NdbNuma.cpp 
 NdbNuma_t_CXXFLAGS = -DTEST_NDBNUMA
 NdbNuma_t_LDADD = $(top_builddir)/mysys/libmysyslt.la @LIBDL@
 
+NdbGetInAddr_t_SOURCES = NdbTCP.cpp
+NdbGetInAddr_t_CXXFLAGS = -DTEST_NDBGETINADDR

=== modified file 'storage/ndb/src/common/portlib/NdbTCP.cpp'
--- a/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-05-11 12:23:24 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -17,43 +17,159 @@
 
 
 #include <ndb_global.h>
-#include <my_net.h>
 #include <NdbTCP.h>
 
-
-
 extern "C"
-int 
+int
 Ndb_getInAddr(struct in_addr * dst, const char *address)
 {
+  struct addrinfo hints;
+  memset(&hints, 0, sizeof(hints));
+  hints.ai_family = AF_INET; // Only IPv4 address
+  hints.ai_socktype = SOCK_STREAM;
+  hints.ai_protocol = IPPROTO_TCP;
+
+  struct addrinfo* ai_list;
+  if (getaddrinfo(address, NULL, &hints, &ai_list) != 0)
+  {
+    dst->s_addr = INADDR_NONE;
+    return -1;
+  }
+
+  /* Return sin_addr for the first address returned */
+  struct sockaddr_in* sin = (struct sockaddr_in*)ai_list->ai_addr;
+  memcpy(dst, &sin->sin_addr, sizeof(struct in_addr));
+
+  freeaddrinfo(ai_list);
+  return 0;
+}
+
+#ifdef TEST_NDBGETINADDR
+#include <NdbTap.hpp>
+
+static void
+CHECK(const char* address, int expected_res, bool is_numeric= false)
+{
+  struct in_addr addr;
+
+  fprintf(stderr, "Checking '%s'\n", address);
+
+  int res= Ndb_getInAddr(&addr, address);
+
+  if (res != expected_res)
+  {
+    fprintf(stderr, "> unexpected result: %d, expected: %d\n",
+            res, expected_res);
+    abort();
+  }
+
+  if (res != 0)
   {
-    int tmp_errno;
-    struct hostent tmp_hostent, *hp;
-    char buff[GETHOSTBYNAME_BUFF_SIZE];
-    hp = my_gethostbyname_r(address,&tmp_hostent,buff,sizeof(buff),
-			    &tmp_errno);
-    if (hp)
+    fprintf(stderr, "> returned -1, checking INADDR_NONE\n");
+
+    // Should return INADDR_NONE when when lookup fails
+    struct in_addr none;
+    none.s_addr = INADDR_NONE;
+    if (memcmp(&addr, &none, sizeof(none)) != 0)
     {
-      memcpy(dst, hp->h_addr, min(sizeof(*dst), (size_t) hp->h_length));
-      my_gethostbyname_r_free();
-      return 0;
+      fprintf(stderr, "> didn't reurn INADDR_NONE after failure, "
+             "got: '%s', expected; '%s'\n",
+             inet_ntoa(addr), inet_ntoa(none));
+      abort();
     }
-    my_gethostbyname_r_free();
+    fprintf(stderr, "> got INADDR_NONE\n");
+    return;
   }
-  /* Try it as aaa.bbb.ccc.ddd. */
-  dst->s_addr = inet_addr(address);
-  if (dst->s_addr != 
-#ifdef INADDR_NONE
-      INADDR_NONE
-#else
-      -1
+
+  fprintf(stderr, "> '%s' -> '%s'\n", address, inet_ntoa(addr));
+
+  if (is_numeric)
+  {
+    // Check that numeric address always map back to itself
+    // ie. compare to value returned by 'inet_aton'
+    fprintf(stderr, "> Checking numeric address against inet_addr\n");
+    struct in_addr addr2;
+    addr2.s_addr = inet_addr(address);
+    fprintf(stderr, "> inet_addr(%s) -> '%s'\n", address, inet_ntoa(addr2));
+
+    if (memcmp(&addr, &addr2, sizeof(struct in_addr)) != 0)
+    {
+      fprintf(stderr, "> numeric address '%s' didn't map to same value as "
+             "inet_addr: '%s'", inet_ntoa(addr2));
+      abort();
+    }
+    fprintf(stderr, "> ok\n");
+  }
+}
+
+
+/*
+  socket_library_init
+   - Normally done by ndb_init(), but to avoid
+     having to link with "everything", implement it locally
+*/
+
+static void
+socket_library_init(void)
+{
+#ifdef _WIN32
+  WORD requested_version = MAKEWORD( 2, 0 );
+  WSADATA wsa_data;
+  if (WSAStartup( requested_version, &wsa_data ))
+  {
+    fprintf(stderr, "failed to init Winsock\n");
+    abort();
+  }
+
+  // Confirm that the requested version of the library was loaded
+  if (wsa_data.wVersion != requested_version)
+  {
+    (void)WSACleanup();
+    fprintf(stderr, "Wrong version of Winsock loaded\n");
+    abort();
+  }
+#endif
+}
+
+
+static void
+socket_library_end()
+{
+#ifdef _WIN32
+  (void)WSACleanup();
 #endif
-      )
+}
+
+
+TAPTEST(NdbGetInAddr)
+{
+  socket_library_init();
+
+  CHECK("localhost", 0);
+  CHECK("127.0.0.1", 0, true);
+
+  char hostname_buf[256];
+  if (gethostname(hostname_buf, sizeof(hostname_buf)) == 0)
   {
-    return 0;
+    // Check this machines hostname
+    CHECK(hostname_buf, 0);
+
+    struct in_addr addr;
+    Ndb_getInAddr(&addr, hostname_buf);
+    // Convert hostname to dotted decimal string ip and check
+    CHECK(inet_ntoa(addr), 0, true);
   }
-  return -1;
+  CHECK("unknown_?host", -1); // Does not exist
+  CHECK("3ffe:1900:4545:3:200:f8ff:fe21:67cf", -1); // No IPv6
+  CHECK("fe80:0:0:0:200:f8ff:fe21:67cf", -1);
+  CHECK("fe80::200:f8ff:fe21:67cf", -1);
+  CHECK("::1", -1); // the loopback, but still No IPv6
+
+  socket_library_end();
+
+  return 1; // OK
 }
+#endif
 
 
 static inline

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2011-02-28 12:25:52 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2011-05-11 13:37:37 +0000
@@ -662,6 +662,16 @@ Backup::execDUMP_STATE_ORD(Signal* signa
     c_defaults.m_compressed_lcp= signal->theData[1];
     infoEvent("Compressed LCP: %d", c_defaults.m_compressed_lcp);
   }
+
+  if (signal->theData[0] == DumpStateOrd::BackupErrorInsert)
+  {
+    if (signal->getLength() == 1)
+      ndbout_c("BACKUP: setting error %u", signal->theData[1]);
+    else
+      ndbout_c("BACKUP: setting error %u, %u",
+               signal->theData[1], signal->theData[2]);
+    SET_ERROR_INSERT_VALUE2(signal->theData[1], signal->theData[2]);
+  }
 }
 
 void Backup::execDBINFO_SCANREQ(Signal *signal)
@@ -4578,6 +4588,13 @@ Backup::checkScan(Signal* signal, Backup
       sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, 
 		 AbortBackupOrd::SignalLength, JBB);
     }
+#ifdef ERROR_INSERT
+    else if (ERROR_INSERTED(10042) && filePtr.p->tableId ==c_error_insert_extra)
+    {
+      sendSignalWithDelay(lqhRef, GSN_SCAN_NEXTREQ, signal,
+			  10, ScanFragNextReq::SignalLength);
+    }
+#endif
     else
     {
       sendSignal(lqhRef, GSN_SCAN_NEXTREQ, signal, 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-11 13:37:37 +0000
@@ -723,9 +723,10 @@ struct Fragrecord {
   Uint32 m_free_page_id_list;
   DynArr256::Head m_page_map;
   DLFifoList<Page>::Head thFreeFirst;   // pages with atleast 1 free record
-  
+
   Uint32 m_lcp_scan_op;
-  Uint32 m_lcp_keep_list;
+  Local_key m_lcp_keep_list_head;
+  Local_key m_lcp_keep_list_tail;
 
   enum FragState
   { FS_FREE
@@ -1439,9 +1440,8 @@ typedef Ptr<HostBuffer> HostBufferPtr;
     STATIC_CONST( MM_SHRINK   = 0x00200000 ); // Has MM part shrunk
     STATIC_CONST( MM_GROWN    = 0x00400000 ); // Has MM part grown
     STATIC_CONST( FREED       = 0x00800000 ); // Is freed
+    STATIC_CONST( FREE        = 0x00800000 ); // alias
     STATIC_CONST( LCP_SKIP    = 0x01000000 ); // Should not be returned in LCP
-    STATIC_CONST( LCP_KEEP    = 0x02000000 ); // Should be returned in LCP
-    STATIC_CONST( FREE        = 0x02800000 ); // Is free
     STATIC_CONST( VAR_PART    = 0x04000000 ); // Is there a varpart
     STATIC_CONST( REORG_MOVE  = 0x08000000 );
 
@@ -3215,6 +3215,8 @@ private:
   Uint32* get_default_ptr(const Tablerec*, Uint32&);
   Uint32 get_len(Ptr<Page>* pagePtr, Var_part_ref ref);
 
+  STATIC_CONST( COPY_TUPLE_HEADER32 = 4 );
+
   Tuple_header* alloc_copy_tuple(const Tablerec* tabPtrP, Local_key* ptr){
     Uint32 * dst = c_undo_buffer.alloc_copy_tuple(ptr,
                                                   tabPtrP->total_rec_size);
@@ -3224,7 +3226,7 @@ private:
     bzero(dst, tabPtrP->total_rec_size);
 #endif
     Uint32 count = tabPtrP->m_no_of_attributes;
-    ChangeMask * mask = (ChangeMask*)(dst);
+    ChangeMask * mask = (ChangeMask*)(dst + COPY_TUPLE_HEADER32);
     mask->m_cols = count;
     return (Tuple_header*)(mask->end_of_mask(count));
   }
@@ -3234,11 +3236,12 @@ private:
   }
 
   Tuple_header * get_copy_tuple(Uint32 * rawptr) {
-    return (Tuple_header*)(get_change_mask_ptr(rawptr)->end_of_mask());
+    return (Tuple_header*)
+      (get_change_mask_ptr(rawptr)->end_of_mask());
   }
 
   ChangeMask * get_change_mask_ptr(Uint32 * rawptr) {
-    return (ChangeMask*)(rawptr);
+    return (ChangeMask*)(rawptr + COPY_TUPLE_HEADER32);
   }
 
   Tuple_header* get_copy_tuple(const Local_key* ptr){
@@ -3250,7 +3253,7 @@ private:
     Uint32 * tmp = raw - (1 + ((tabP->m_no_of_attributes + 31) >> 5));
     ChangeMask* mask = (ChangeMask*)tmp;
     assert(mask->end_of_mask() == raw);
-    assert(get_copy_tuple(tmp) == copytuple);
+    assert(get_copy_tuple(tmp - COPY_TUPLE_HEADER32) == copytuple);
     return mask;
   }
 
@@ -3382,10 +3385,10 @@ private:
                          Page_cache_client::Request,
                          OperationrecPtr);
   int retrieve_log_page(Signal*, FragrecordPtr, OperationrecPtr);
-  
-  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*, 
-		     Operationrec*, Fragrecord*, Tablerec*);
-  
+
+  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*,
+		     KeyReqStruct*, Operationrec*, Fragrecord*, Tablerec*);
+
   int handle_size_change_after_update(KeyReqStruct* req_struct,
 				      Tuple_header* org,
 				      Operationrec*,
@@ -3411,7 +3414,31 @@ private:
   void check_page_map(Fragrecord*);
   bool find_page_id_in_list(Fragrecord*, Uint32 pid);
 #endif
-  void handle_lcp_keep(Signal*, Fragrecord*, ScanOp*, Uint32 rowid);
+  void handle_lcp_keep(Signal*, Fragrecord*, ScanOp*);
+  void handle_lcp_keep_commit(const Local_key*,
+                              KeyReqStruct *,
+                              Operationrec*, Fragrecord*, Tablerec*);
+
+  void setup_lcp_read_copy_tuple( KeyReqStruct *,
+                                  Operationrec*,
+                                  Fragrecord*,
+                                  Tablerec*);
+
+  bool isCopyTuple(Uint32 pageid, Uint32 pageidx) const {
+    return (pageidx & (Uint16(1) << 15)) != 0;
+  }
+
+  void setCopyTuple(Uint32& pageid, Uint16& pageidx) const {
+    assert(!isCopyTuple(pageid, pageidx));
+    pageidx |= (Uint16(1) << 15);
+    assert(isCopyTuple(pageid, pageidx));
+  }
+
+  void clearCopyTuple(Uint32& pageid, Uint16& pageidx) const {
+    assert(isCopyTuple(pageid, pageidx));
+    pageidx &= ~(Uint16(1) << 15);
+    assert(!isCopyTuple(pageid, pageidx));
+  }
 };
 
 #if 0

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-07 06:17:02 +0000
@@ -51,15 +51,8 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* s
     PagePtr pagePtr;
     Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p);
 
-    ndbassert(ptr->m_header_bits & Tuple_header::FREE);
+    ndbassert(ptr->m_header_bits & Tuple_header::FREED);
 
-    if (ptr->m_header_bits & Tuple_header::LCP_KEEP)
-    {
-      ndbassert(! (ptr->m_header_bits & Tuple_header::FREED));
-      ptr->m_header_bits |= Tuple_header::FREED;
-      return;
-    }
-    
     if (regTabPtr.p->m_attributes[MM].m_no_of_varsize +
         regTabPtr.p->m_attributes[MM].m_no_of_dynamic)
     {
@@ -157,12 +150,12 @@ Dbtup::dealloc_tuple(Signal* signal,
 		     Uint32 gci,
 		     Page* page,
 		     Tuple_header* ptr, 
+                     KeyReqStruct * req_struct,
 		     Operationrec* regOperPtr, 
 		     Fragrecord* regFragPtr, 
 		     Tablerec* regTabPtr)
 {
   Uint32 lcpScan_ptr_i= regFragPtr->m_lcp_scan_op;
-  Uint32 lcp_keep_list = regFragPtr->m_lcp_keep_list;
 
   Uint32 bits = ptr->m_header_bits;
   Uint32 extra_bits = Tuple_header::FREED;
@@ -189,9 +182,15 @@ Dbtup::dealloc_tuple(Signal* signal,
     if (!is_rowid_lcp_scanned(rowid, *scanOp.p))
     {
       jam();
-      extra_bits = Tuple_header::LCP_KEEP; // Note REMOVE FREE
-      ptr->m_operation_ptr_i = lcp_keep_list;
-      regFragPtr->m_lcp_keep_list = rowid.ref();
+
+      /**
+       * We're committing a delete, on a row that should
+       *   be part of LCP. Copy original row into copy-tuple
+       *   and add this copy-tuple to lcp-keep-list
+       *
+       */
+      handle_lcp_keep_commit(&rowid,
+                             req_struct, regOperPtr, regFragPtr, regTabPtr);
     }
   }
   
@@ -204,6 +203,69 @@ Dbtup::dealloc_tuple(Signal* signal,
   }
 }
 
+void
+Dbtup::handle_lcp_keep_commit(const Local_key* rowid,
+                              KeyReqStruct * req_struct,
+                              Operationrec * opPtrP,
+                              Fragrecord * regFragPtr,
+                              Tablerec * regTabPtr)
+{
+  bool disk = false;
+  Uint32 sizes[4];
+  Uint32 * copytuple = get_copy_tuple_raw(&opPtrP->m_copy_tuple_location);
+  Tuple_header * dst = get_copy_tuple(copytuple);
+  Tuple_header * org = req_struct->m_tuple_ptr;
+  if (regTabPtr->need_expand(disk))
+  {
+    setup_fixed_part(req_struct, opPtrP, regTabPtr);
+    req_struct->m_tuple_ptr = dst;
+    expand_tuple(req_struct, sizes, org, regTabPtr, disk);
+    shrink_tuple(req_struct, sizes+2, regTabPtr, disk);
+  }
+  else
+  {
+    memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size);
+  }
+  dst->m_header_bits |= Tuple_header::COPY_TUPLE;
+
+  /**
+   * Store original row-id in copytuple[0,1]
+   * Store next-ptr in copytuple[1,2] (set to RNIL/RNIL)
+   *
+   */
+  assert(sizeof(Local_key) == 8);
+  memcpy(copytuple+0, rowid, sizeof(Local_key));
+
+  Local_key nil;
+  nil.setNull();
+  memcpy(copytuple+2, &nil, sizeof(nil));
+
+  /**
+   * Link it to list
+   */
+  if (regFragPtr->m_lcp_keep_list_tail.isNull())
+  {
+    jam();
+    regFragPtr->m_lcp_keep_list_head = opPtrP->m_copy_tuple_location;
+  }
+  else
+  {
+    jam();
+    Uint32 * tail = get_copy_tuple_raw(&regFragPtr->m_lcp_keep_list_tail);
+    Local_key nextptr;
+    memcpy(&nextptr, tail+2, sizeof(Local_key));
+    ndbassert(nextptr.isNull());
+    nextptr = opPtrP->m_copy_tuple_location;
+    memcpy(tail+2, &nextptr, sizeof(Local_key));
+  }
+  regFragPtr->m_lcp_keep_list_tail = opPtrP->m_copy_tuple_location;
+
+  /**
+   * And finally clear m_copy_tuple_location so that it won't be freed
+   */
+  opPtrP->m_copy_tuple_location.setNull();
+}
+
 #if 0
 static void dump_buf_hex(unsigned char *p, Uint32 bytes)
 {
@@ -786,7 +848,7 @@ skip_disk:
 	ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
       }
       dealloc_tuple(signal, gci_hi, page.p, tuple_ptr,
-		    regOperPtr.p, regFragPtr.p, regTabPtr.p); 
+		    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
     }
   } 
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-11 13:37:37 +0000
@@ -635,6 +635,17 @@ void Dbtup::execTUPKEYREQ(Signal* signal
      goto do_insert;
    }
 
+   if (unlikely(isCopyTuple(pageid, pageidx)))
+   {
+     /**
+      * Only LCP reads a copy-tuple "directly"
+      */
+     ndbassert(Roptype == ZREAD);
+     ndbassert(disk_page == RNIL);
+     setup_lcp_read_copy_tuple(&req_struct, regOperPtr, regFragPtr, regTabPtr);
+     goto do_read;
+   }
+
    /**
     * Get pointer to tuple
     */
@@ -652,6 +663,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal
      if (setup_read(&req_struct, regOperPtr, regFragPtr, regTabPtr, 
 		    disk_page != RNIL))
      {
+   do_read:
        if(handleReadReq(signal, regOperPtr, regTabPtr, &req_struct) != -1) 
        {
 	 req_struct.log_size= 0;
@@ -847,6 +859,44 @@ Dbtup::setup_fixed_part(KeyReqStruct* re
   req_struct->attr_descr= tab_descr; 
 }
 
+void
+Dbtup::setup_lcp_read_copy_tuple(KeyReqStruct* req_struct,
+                                 Operationrec* regOperPtr,
+                                 Fragrecord* regFragPtr,
+                                 Tablerec* regTabPtr)
+{
+  Local_key tmp;
+  tmp.m_page_no = req_struct->frag_page_id;
+  tmp.m_page_idx = regOperPtr->m_tuple_location.m_page_idx;
+  clearCopyTuple(tmp.m_page_no, tmp.m_page_idx);
+
+  Uint32 * copytuple = get_copy_tuple_raw(&tmp);
+  Local_key rowid;
+  memcpy(&rowid, copytuple+0, sizeof(Local_key));
+
+  req_struct->frag_page_id = rowid.m_page_no;
+  regOperPtr->m_tuple_location.m_page_idx = rowid.m_page_idx;
+
+  Tuple_header * th = get_copy_tuple(copytuple);
+  req_struct->m_page_ptr.setNull();
+  req_struct->m_tuple_ptr = (Tuple_header*)th;
+  th->m_operation_ptr_i = RNIL;
+  ndbassert((th->m_header_bits & Tuple_header::COPY_TUPLE) != 0);
+
+  Uint32 num_attr= regTabPtr->m_no_of_attributes;
+  Uint32 descr_start= regTabPtr->tabDescriptor;
+  TableDescriptor *tab_descr= &tableDescriptor[descr_start];
+  ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+  req_struct->attr_descr= tab_descr;
+
+  bool disk = false;
+  if (regTabPtr->need_expand(disk))
+  {
+    jam();
+    prepare_read(req_struct, regTabPtr, disk);
+  }
+}
+
  /* ---------------------------------------------------------------- */
  /* ------------------------ CONFIRM REQUEST ----------------------- */
  /* ---------------------------------------------------------------- */
@@ -1904,6 +1954,13 @@ int Dbtup::handleDeleteReq(Signal* signa
                            KeyReqStruct *req_struct,
 			   bool disk)
 {
+  Tuple_header* dst = alloc_copy_tuple(regTabPtr,
+                                       &regOperPtr->m_copy_tuple_location);
+  if (dst == 0) {
+    terrorCode = ZMEM_NOMEM_ERROR;
+    goto error;
+  }
+
   // delete must set but not increment tupVersion
   if (!regOperPtr->is_first_operation())
   {
@@ -1911,24 +1968,25 @@ int Dbtup::handleDeleteReq(Signal* signa
     regOperPtr->tupVersion= prevOp->tupVersion;
     // make copy since previous op is committed before this one
     const Tuple_header* org = get_copy_tuple(&prevOp->m_copy_tuple_location);
-    Tuple_header* dst = alloc_copy_tuple(regTabPtr,
-                                         &regOperPtr->m_copy_tuple_location);
-    if (dst == 0) {
-      terrorCode = ZMEM_NOMEM_ERROR;
-      goto error;
-    }
-    Uint32 len = regTabPtr->total_rec_size - 
-      Uint32(((Uint32*)dst) - 
+    Uint32 len = regTabPtr->total_rec_size -
+      Uint32(((Uint32*)dst) -
              get_copy_tuple_raw(&regOperPtr->m_copy_tuple_location));
     memcpy(dst, org, 4 * len);
     req_struct->m_tuple_ptr = dst;
-    set_change_mask_info(regTabPtr, get_change_mask_ptr(regTabPtr, dst));
   }
-  else 
+  else
   {
     regOperPtr->tupVersion= req_struct->m_tuple_ptr->get_tuple_version();
+    if (regTabPtr->m_no_of_disk_attributes)
+    {
+      dst->m_header_bits = req_struct->m_tuple_ptr->m_header_bits;
+      memcpy(dst->get_disk_ref_ptr(regTabPtr),
+	     req_struct->m_tuple_ptr->get_disk_ref_ptr(regTabPtr),
+             sizeof(Local_key));
+    }
   }
   req_struct->changeMask.set();
+  set_change_mask_info(regTabPtr, get_change_mask_ptr(regTabPtr, dst));
 
   if(disk && regOperPtr->m_undo_buffer_space == 0)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-05-07 06:17:02 +0000
@@ -670,7 +670,6 @@ void Dbtup::initializeDefaultValuesFrag(
   DefaultValuesFragment.p->fragStatus = Fragrecord::FS_ONLINE;
   DefaultValuesFragment.p->m_undo_complete= false;
   DefaultValuesFragment.p->m_lcp_scan_op = RNIL;
-  DefaultValuesFragment.p->m_lcp_keep_list = RNIL;
   DefaultValuesFragment.p->noOfPages = 0;
   DefaultValuesFragment.p->noOfVarPages = 0;
   DefaultValuesFragment.p->m_max_page_no = 0;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-04-18 15:36:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-05-07 06:17:02 +0000
@@ -703,7 +703,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signa
   regFragPtr.p->m_tablespace_id= tablespace_id;
   regFragPtr.p->m_undo_complete= false;
   regFragPtr.p->m_lcp_scan_op = RNIL;
-  regFragPtr.p->m_lcp_keep_list = RNIL;
+  regFragPtr.p->m_lcp_keep_list_head.setNull();
+  regFragPtr.p->m_lcp_keep_list_tail.setNull();
   regFragPtr.p->noOfPages = 0;
   regFragPtr.p->noOfVarPages = 0;
   regFragPtr.p->m_max_page_no = 0;
@@ -1573,6 +1574,8 @@ Dbtup::computeTableMetaData(Tablerec *re
   /* Room for changemask */
   total_rec_size += 1 + ((regTabPtr->m_no_of_attributes + 31) >> 5);
 
+  total_rec_size += COPY_TUPLE_HEADER32;
+
   regTabPtr->total_rec_size= total_rec_size;
 
   setUpQueryRoutines(regTabPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp	2011-05-07 06:17:02 +0000
@@ -287,9 +287,8 @@ Dbtup::execACC_CHECK_SCAN(Signal* signal
   }
 
   const bool lcp = (scan.m_bits & ScanOp::SCAN_LCP);
-  Uint32 lcp_list = fragPtr.p->m_lcp_keep_list;
 
-  if (lcp && lcp_list != RNIL)
+  if (lcp && ! fragPtr.p->m_lcp_keep_list_head.isNull())
   {
     jam();
     /**
@@ -297,7 +296,7 @@ Dbtup::execACC_CHECK_SCAN(Signal* signal
      *   So that scan state is not alterer
      *   if lcp_keep rows are found in ScanOp::First
      */
-    handle_lcp_keep(signal, fragPtr.p, scanPtr.p, lcp_list);
+    handle_lcp_keep(signal, fragPtr.p, scanPtr.p);
     return;
   }
 
@@ -692,19 +691,18 @@ Dbtup::scanNext(Signal* signal, ScanOpPt
  
   const bool mm = (bits & ScanOp::SCAN_DD);
   const bool lcp = (bits & ScanOp::SCAN_LCP);
-  
-  Uint32 lcp_list = fragPtr.p->m_lcp_keep_list;
+
   const Uint32 size = ((bits & ScanOp::SCAN_VS) == 0) ?
     table.m_offsets[mm].m_fix_header_size : 1;
   const Uint32 first = ((bits & ScanOp::SCAN_VS) == 0) ? 0 : 1;
 
-  if (lcp && lcp_list != RNIL)
+  if (lcp && ! fragPtr.p->m_lcp_keep_list_head.isNull())
   {
     jam();
     /**
      * Handle lcp keep list here to, due to scanCont
      */
-    handle_lcp_keep(signal, fragPtr.p, scanPtr.p, lcp_list);
+    handle_lcp_keep(signal, fragPtr.p, scanPtr.p);
     return false;
   }
 
@@ -1130,57 +1128,40 @@ Dbtup::scanNext(Signal* signal, ScanOpPt
 void
 Dbtup::handle_lcp_keep(Signal* signal,
                        Fragrecord* fragPtrP,
-                       ScanOp* scanPtrP,
-                       Uint32 lcp_list)
+                       ScanOp* scanPtrP)
 {
   TablerecPtr tablePtr;
   tablePtr.i = scanPtrP->m_tableId;
   ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
 
-  Local_key tmp;
-  tmp.assref(lcp_list);
-  tmp.m_page_no = getRealpid(fragPtrP, tmp.m_page_no);
-  
-  Ptr<Page> pagePtr;
-  c_page_pool.getPtr(pagePtr, tmp.m_page_no);
-  Tuple_header* ptr = (Tuple_header*)
-    ((Fix_page*)pagePtr.p)->get_ptr(tmp.m_page_idx, 0);
-  Uint32 headerbits = ptr->m_header_bits;
-  ndbrequire(headerbits & Tuple_header::LCP_KEEP);
-  
-  Uint32 next = ptr->m_operation_ptr_i;
-  ptr->m_operation_ptr_i = RNIL;
-  ptr->m_header_bits = headerbits & ~(Uint32)Tuple_header::FREE;
-  
-  if (tablePtr.p->m_bits & Tablerec::TR_Checksum) {
+  ndbassert(!fragPtrP->m_lcp_keep_list_head.isNull());
+  Local_key tmp = fragPtrP->m_lcp_keep_list_head;
+  Uint32 * copytuple = get_copy_tuple_raw(&tmp);
+  memcpy(&fragPtrP->m_lcp_keep_list_head,
+         copytuple+2,
+         sizeof(Local_key));
+
+  if (fragPtrP->m_lcp_keep_list_head.isNull())
+  {
     jam();
-    setChecksum(ptr, tablePtr.p);
+    ndbassert(tmp.m_page_no == fragPtrP->m_lcp_keep_list_tail.m_page_no);
+    ndbassert(tmp.m_page_idx == fragPtrP->m_lcp_keep_list_tail.m_page_idx);
+    fragPtrP->m_lcp_keep_list_tail.setNull();
   }
 
+  Local_key save = tmp;
+  setCopyTuple(tmp.m_page_no, tmp.m_page_idx);
   NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
   conf->scanPtr = scanPtrP->m_userPtr;
   conf->accOperationPtr = (Uint32)-1;
   conf->fragId = fragPtrP->fragmentId;
-  conf->localKey[0] = Local_key::ref2page_id(lcp_list);
-  conf->localKey[1] = Local_key::ref2page_idx(lcp_list);
+  conf->localKey[0] = tmp.m_page_no;
+  conf->localKey[1] = tmp.m_page_idx;
   conf->gci = 0;
   Uint32 blockNo = refToMain(scanPtrP->m_userRef);
   EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
-  
-  fragPtrP->m_lcp_keep_list = next;
-  ptr->m_header_bits |= Tuple_header::FREED; // RESTORE free flag
-  if (headerbits & Tuple_header::FREED)
-  {
-    if (tablePtr.p->m_attributes[MM].m_no_of_varsize +
-        tablePtr.p->m_attributes[MM].m_no_of_dynamic)
-    {
-      jam();
-      free_var_rec(fragPtrP, tablePtr.p, &tmp, pagePtr);
-    } else {
-      jam();
-      free_fix_rec(fragPtrP, tablePtr.p, &tmp, (Fix_page*)pagePtr.p);
-    }
-  }
+
+  c_undo_buffer.free_copy_tuple(&save);
 }
 
 void
@@ -1320,4 +1301,7 @@ Dbtup::execLCP_FRAG_ORD(Signal* signal)
   new (scanPtr.p) ScanOp;
   scanPtr.p->m_fragPtrI = fragPtr.i;
   scanPtr.p->m_state = ScanOp::First;
+
+  ndbassert(frag.m_lcp_keep_list_head.isNull());
+  ndbassert(frag.m_lcp_keep_list_tail.isNull());
 }

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-05-11 13:37:37 +0000
@@ -1032,7 +1032,6 @@ NdbOperation::buildSignalsNdbRecord(Uint
       readMask.set(attrId);
       requestedCols++;
     }
-    theReceiver.m_record.m_column_count= requestedCols;
 
     /* Are there any columns to read via NdbRecord? */
     if (requestedCols > 0)

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-05-05 11:10:57 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-05-11 13:37:37 +0000
@@ -4068,8 +4068,7 @@ NdbQueryOperationImpl::prepareReceiver()
                           0 /*key_size*/, 
                           0 /*read_range_no*/, 
                           getRowSize(),
-                          rowBuf,
-                          0);
+                          rowBuf);
     m_resultStreams[i]->getReceiver().prepareSend();
   }
   // So that we can test for for buffer overrun.

=== modified file 'storage/ndb/src/ndbapi/NdbReceiver.cpp'
--- a/storage/ndb/src/ndbapi/NdbReceiver.cpp	2011-04-10 17:32:41 +0000
+++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp	2011-05-11 13:37:37 +0000
@@ -60,7 +60,6 @@ NdbReceiver::init(ReceiverType type, boo
     m_record.m_row_buffer= NULL;
     m_record.m_row_offset= 0;
     m_record.m_read_range_no= false;
-    m_record.m_column_count= 0;
   }
   theFirstRecAttr = NULL;
   theCurrentRecAttr = NULL;
@@ -216,8 +215,7 @@ NdbReceiver::calculate_batch_size(Uint32
 void
 NdbReceiver::do_setup_ndbrecord(const NdbRecord *ndb_record, Uint32 batch_size,
                                 Uint32 key_size, Uint32 read_range_no,
-                                Uint32 rowsize, char *row_buffer,
-                                Uint32 column_count)
+                                Uint32 rowsize, char *row_buffer)
 {
   m_using_ndb_record= true;
   m_record.m_ndb_record= ndb_record;
@@ -225,7 +223,6 @@ NdbReceiver::do_setup_ndbrecord(const Nd
   m_record.m_row_buffer= row_buffer;
   m_record.m_row_offset= rowsize;
   m_record.m_read_range_no= read_range_no;
-  m_record.m_column_count= column_count;
 }
 
 //static
@@ -261,26 +258,6 @@ NdbReceiver::ndbrecord_rowsize(const Ndb
   return rowsize;
 }
 
-NdbRecAttr*
-NdbReceiver::copyout(NdbReceiver & dstRec){
-  assert(!m_using_ndb_record);
-  NdbRecAttr *src = m_rows[m_current_row++];
-  NdbRecAttr *dst = dstRec.theFirstRecAttr;
-  NdbRecAttr *start = src;
-  Uint32 tmp = m_recattr.m_hidden_count;
-  while(tmp--)
-    src = src->next();
-  
-  while(dst){
-    Uint32 len = src->get_size_in_bytes();
-    dst->receive_data((Uint32*)src->aRef(), len);
-    src = src->next();
-    dst = dst->next();
-  }
-
-  return start;
-}
-
 /**
  * pad
  * This function determines how much 'padding' should be applied

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-04-27 11:50:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-05-11 13:37:37 +0000
@@ -395,8 +395,6 @@ NdbScanOperation::generatePackedReadAIs(
     columnCount++;
   }
 
-  theReceiver.m_record.m_column_count= columnCount;
-
   int result= 0;
 
   /* Are there any columns to read via NdbRecord? 
@@ -2335,8 +2333,7 @@ int NdbScanOperation::prepareSendScan(Ui
   {
     m_receivers[i]->do_setup_ndbrecord(m_attribute_record, batch_size,
                                        key_size, m_read_range_no,
-                                       rowsize, buf,
-                                       theReceiver.m_record.m_column_count);
+                                       rowsize, buf);
     buf+= bufsize;
   }
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-05-04 11:48:48 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-05-11 13:37:37 +0000
@@ -187,7 +187,7 @@ ErrorBundle ErrorCodes[] = {
   { 805,  DMEC, TR, "Out of attrinfo records in tuple manager" },
   { 830,  DMEC, TR, "Out of add fragment operation records" },
   { 873,  DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
-  { 899,  DMEC, TR, "Rowid already allocated" },
+  { 899,  DMEC, IE, "Internal error: rowid already allocated" },
   { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
   { 1218, DMEC, TR, "Send Buffers overloaded in NDB kernel" },
   { 1220, DMEC, TR, "REDO log files overloaded (increase FragmentLogFileSize)" },

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-07 06:17:02 +0000
@@ -2267,6 +2267,154 @@ runBug59496_case2(NDBT_Context* ctx, NDB
   return NDBT_OK;
 }
 
+#define CHK_RET_FAILED(x) if (!(x)) { ndbout_c("Failed on line: %u", __LINE__); return NDBT_FAILED; }
+
+int
+runTest899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+
+  const int rows = ctx->getNumRecords();
+  const int loops = ctx->getNumLoops();
+  const int batch = ctx->getProperty("Batch", Uint32(50));
+  const int until_stopped = ctx->getProperty("UntilStopped");
+
+  const NdbRecord * pRowRecord = pTab->getDefaultRecord();
+  CHK_RET_FAILED(pRowRecord != 0);
+
+  const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord);
+  Uint8 * pRow = new Uint8[len];
+
+  int count_ok = 0;
+  int count_failed = 0;
+  int count_899 = 0;
+  for (int i = 0; i < loops || (until_stopped && !ctx->isTestStopped()); i++)
+  {
+    ndbout_c("loop: %d",i);
+    int result = 0;
+    for (int rowNo = 0; rowNo < rows;)
+    {
+      NdbTransaction* pTrans = pNdb->startTransaction();
+      CHK_RET_FAILED(pTrans != 0);
+
+      for (int b = 0; rowNo < rows && b < batch; rowNo++, b++)
+      {
+        bzero(pRow, len);
+
+        HugoCalculator calc(* pTab);
+
+        NdbOperation::OperationOptions opts;
+        bzero(&opts, sizeof(opts));
+
+        const NdbOperation* pOp = 0;
+        switch(i % 2){
+        case 0:
+          calc.setValues(pRow, pRowRecord, rowNo, rand());
+          pOp = pTrans->writeTuple(pRowRecord, (char*)pRow,
+                                   pRowRecord, (char*)pRow,
+                                   0,
+                                   &opts,
+                                   sizeof(opts));
+          result = pTrans->execute(NoCommit);
+          break;
+        case 1:
+          calc.setValues(pRow, pRowRecord, rowNo, rand());
+          pOp = pTrans->deleteTuple(pRowRecord, (char*)pRow,
+                                    pRowRecord, (char*)pRow,
+                                    0,
+                                    &opts,
+                                    sizeof(opts));
+          result = pTrans->execute(NoCommit, AO_IgnoreError);
+          break;
+        }
+
+        CHK_RET_FAILED(pOp != 0);
+
+        if (result != 0)
+        {
+          goto found_error;
+        }
+      }
+      result = pTrans->execute(Commit);
+
+      if (result != 0)
+      {
+    found_error:
+        count_failed++;
+        NdbError err = pTrans->getNdbError();
+        if (! (err.status == NdbError::TemporaryError ||
+               err.classification == NdbError::NoDataFound ||
+               err.classification == NdbError::ConstraintViolation))
+        {
+          ndbout << err << endl;
+        }
+        CHK_RET_FAILED(err.status == NdbError::TemporaryError ||
+                       err.classification == NdbError::NoDataFound ||
+                       err.classification == NdbError::ConstraintViolation);
+        if (err.code == 899)
+        {
+          count_899++;
+          ndbout << err << endl;
+        }
+      }
+      else
+      {
+        count_ok++;
+      }
+      pTrans->close();
+    }
+  }
+
+  ndbout_c("count_ok: %d count_failed: %d (899: %d)",
+           count_ok, count_failed, count_899);
+  delete [] pRow;
+
+  return count_899 == 0 ? NDBT_OK : NDBT_FAILED;
+}
+
+int
+runInit899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter restarter;
+  int val = DumpStateOrd::DihMinTimeBetweenLCP;
+  restarter.dumpStateAllNodes(&val, 1);
+
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+  const NdbDictionary::Table * pTab2 = pNdb->getDictionary()->
+    getTable(pTab->getName());
+
+  int tableId = pTab2->getObjectId();
+  int val2[] = { DumpStateOrd::BackupErrorInsert, 10042, tableId };
+
+  for (int i = 0; i < restarter.getNumDbNodes(); i++)
+  {
+    if (i & 1)
+    {
+      int nodeId = restarter.getDbNodeId(i);
+      ndbout_c("Setting slow LCP of table %d on node %d",
+               tableId, nodeId);
+      restarter.dumpStateOneNode(nodeId, val2, 3);
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int
+runEnd899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  // reset LCP speed
+  NdbRestarter restarter;
+  int val[] = { DumpStateOrd::DihMinTimeBetweenLCP, 0 };
+  restarter.dumpStateAllNodes(val, 2);
+
+  restarter.insertErrorInAllNodes(0);
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testBasic);
 TESTCASE("PkInsert", 
 	 "Verify that we can insert and delete from this table using PK"
@@ -2618,6 +2766,13 @@ TESTCASE("Bug59496_case2", "")
   STEP(runBug59496_case2);
   STEPS(runBug59496_scan, 10);
 }
+TESTCASE("899", "")
+{
+  INITIALIZER(runLoadTable);
+  INITIALIZER(runInit899);
+  STEP(runTest899);
+  FINALIZER(runEnd899);
+}
 NDBT_TESTSUITE_END(testBasic);
 
 #if 0

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-04 05:33:14 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-11 13:37:37 +0000
@@ -32,6 +32,10 @@ max-time: 900
 cmd: testIndex
 args: -n NF_Mixed T1 T6 T13
 
+max-time: 900
+cmd: testBasic
+args: -r 5000 -n 899 T15 D1 D2
+
 max-time: 600
 cmd: atrt-testBackup
 args: -n NFMaster T1

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0-spj-scan-vs-scan branch(ole.john.aske:3492 to 3493) Ole John Aske11 May