List:Commits« Previous MessageNext Message »
From:Magnus Blåudd Date:January 27 2011 2:20pm
Subject:bzr push into mysql-5.5-telco-7.0 branch (magnus.blaudd:3148 to 3160)
View as plain text  
 3160 Magnus Blåudd	2011-01-27 [merge]
      Merge 7.0 -> 5.5-telco-7.0

    modified:
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/t/ndb_basic.test
      sql/ha_ndbcluster.cc
      storage/ndb/include/ndbapi/NdbDictionary.hpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
 3159 Magnus Blåudd	2011-01-27
      ndb
       - remove unused includes

    modified:
      sql/ha_ndbcluster_binlog.cc
 3158 Magnus Blåudd	2011-01-27
      ndb
       - move comment to correct place

    modified:
      sql/ha_ndbcluster_binlog.cc
 3157 Magnus Blåudd	2011-01-27
      ndb
       - init mdl_request properly before opening the mysql.ndb_binlog_index
         table by using the init_one_table function of TABLE_LIST.
       - add comments
       - move the use_all_columns() call up one level to the writer function
       - add call to release MDL locks on the opened table when writer has finished
        with it
       - remove the simple_open_n_lock_tables glue, not needed anymore

    modified:
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_glue.h
 3156 Magnus Blåudd	2011-01-27
      ndb
       - move the setting of thd->proc_info out of the function  code logic
       - use the_proc_info macro for setting and getting the proc_info

    modified:
      sql/ha_ndbcluster_binlog.cc
 3155 Magnus Blåudd	2011-01-27
      ndb
       - move the thd->clear_error() call up one level in the call stack
       - add assert to check that we normally don't call the function with an
        error in thd(which should then need to be handled or rest when it occured).

    modified:
      sql/ha_ndbcluster_binlog.cc
 3154 Magnus Blåudd	2011-01-27
      ndb
       - Explicitly commit or rollback the writes to ndb_binlog_index table, 
         although we normally use a non transactional engine for the
         ndb_binlog_index table

    modified:
      sql/ha_ndbcluster_binlog.cc
 3153 Magnus Blåudd	2011-01-27
      ndb
       - rename open_and_lock_ndb_binlog_index () to ndb_binlog_index_table__open()
       - update comment to describe that the table is being opened for write

    modified:
      sql/ha_ndbcluster_binlog.cc
 3152 Magnus Blåudd	2011-01-27
      ndb
       - rename ndb_add_binlog_index() to ndb_binlog_index_table__write_rows()
       - Update comment to describe that multiple rows are written

    modified:
      sql/ha_ndbcluster_binlog.cc
 3151 Magnus Blåudd	2011-01-26
      ndb
       - remove unused argument for ndb_binlog_thread_handle_error()

    modified:
      sql/ha_ndbcluster_binlog.cc
 3150 Magnus Blåudd	2011-01-26 [merge]
      Merge 7.0 -> 5.5-telco-7.0

    added:
      mysql-test/suite/ndb/r/ndb_statistics.result
      mysql-test/suite/ndb/t/ndb_statistics.test
    modified:
      configure.in
      mysql-test/suite/ndb/include/run_ndbapitest.inc
      mysql-test/suite/ndb/r/ndb_alter_table3.result
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/r/ndb_index_unique.result
      mysql-test/suite/ndb/r/ndb_read_multi_range.result
      mysql-test/suite/ndb/r/ndb_temporary.result
      mysql-test/suite/ndb/t/ndb_condition_pushdown.test
      mysql-test/suite/ndb/t/ndb_temporary.test
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_cond.cc
      storage/ndb/include/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/DisconnectRep.hpp
      storage/ndb/include/portlib/NdbMutex.h
      storage/ndb/ndb_configure.m4
      storage/ndb/src/common/portlib/NdbCondition.c
      storage/ndb/src/common/portlib/NdbMutex.c
      storage/ndb/src/kernel/CMakeLists.txt
      storage/ndb/src/kernel/blocks/CMakeLists.txt
      storage/ndb/src/kernel/blocks/backup/CMakeLists.txt
      storage/ndb/src/kernel/blocks/dbdict/CMakeLists.txt
      storage/ndb/src/kernel/blocks/dbdih/CMakeLists.txt
      storage/ndb/src/kernel/blocks/dblqh/CMakeLists.txt
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
      storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
      storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
      storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
      storage/ndb/src/mgmclient/CMakeLists.txt
      storage/ndb/src/mgmsrv/CMakeLists.txt
      storage/ndb/src/mgmsrv/ConfigManager.cpp
      storage/ndb/src/mgmsrv/ConfigManager.hpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/ndbapi/ClusterMgr.cpp
      storage/ndb/src/ndbapi/ClusterMgr.hpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/src/ndbapi/trp_client.cpp
      storage/ndb/src/ndbapi/trp_client.hpp
      storage/ndb/src/ndbapi/trp_node.cpp
      storage/ndb/src/ndbapi/trp_node.hpp
      storage/ndb/test/ndbapi/testMgmd.cpp
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/main.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/restore/consumer_restore.cpp
 3149 Magnus Blåudd	2011-01-21
      ndb
       - fix mysql version string after merging in 7.0.21

    modified:
      configure.in
 3148 Magnus Blåudd	2011-01-21
      ndb
       - It's no longer allowed to push a warning(with push_warning() or push_warning_printf()) with warning level WARN_lEVEL_ERROR in 5.5. 
       - change ha_ndb* to use WARN_LEVEL_WARNING or WARN_LEVEL_NOTE instead.
       - update .result files

    modified:
      mysql-test/suite/ndb/r/ndb_bitfield.result
      mysql-test/suite/ndb/r/ndb_dd_basic.result
      mysql-test/suite/ndb/r/ndb_dd_ddl.result
      mysql-test/suite/ndb/r/ndb_hidden_pk.result
      mysql-test/suite/ndb/r/ndb_partition_error.result
      mysql-test/suite/ndb/r/ndb_single_user.result
      mysql-test/suite/ndb/r/ndbinfo.result
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbinfo.cc
      sql/sql_error.cc
=== modified file 'configure.in'
--- a/configure.in	2010-11-16 12:37:26 +0000
+++ b/configure.in	2011-01-26 09:54:24 +0000
@@ -27,7 +27,7 @@ dnl
 dnl When changing the major version number please also check the switch
 dnl statement in mysqlbinlog::check_master_version().  You may also need
 dnl to update version.c in ndb.
-AC_INIT([MySQL Server], [5.5.7-ndb-7.0.20-alpha], [], [mysql])
+AC_INIT([MySQL Server], [5.5.7-ndb-7.0.22-alpha], [], [mysql])
 
 AC_CONFIG_SRCDIR([sql/mysqld.cc])
 AC_CANONICAL_SYSTEM

=== modified file 'mysql-test/suite/ndb/include/run_ndbapitest.inc'
--- a/mysql-test/suite/ndb/include/run_ndbapitest.inc	2010-01-07 17:39:03 +0000
+++ b/mysql-test/suite/ndb/include/run_ndbapitest.inc	2011-01-17 12:42:30 +0000
@@ -22,6 +22,11 @@ unless($test_bin)
   exit(0);
 }
 
+if ($ENV{'MYSQL_TMP_DIR'})
+{
+  $ENV{'NDBT_TMP_DIR'} = $ENV{'MYSQL_TMP_DIR'};
+}
+
 my $cmd = $test_bin;
 $cmd .= " $args" if $args;
 $cmd .= " 2>&1";

=== modified file 'mysql-test/suite/ndb/r/ndb_alter_table3.result'
--- a/mysql-test/suite/ndb/r/ndb_alter_table3.result	2010-10-27 11:32:32 +0000
+++ b/mysql-test/suite/ndb/r/ndb_alter_table3.result	2011-01-26 09:54:24 +0000
@@ -6,8 +6,8 @@ create index c on t1(c);
 show indexes from t1;
 Table	Non_unique	Key_name	Seq_in_index	Column_name	Collation	Cardinality	Sub_part	Packed	Null	Index_type	Comment
 t1	0	PRIMARY	1	a	A	3	NULL	NULL		BTREE	
-t1	1	b	1	b	A	3	NULL	NULL	YES	BTREE	
-t1	1	c	1	c	A	3	NULL	NULL	YES	BTREE	
+t1	1	b	1	b	A	NULL	NULL	NULL	YES	BTREE	
+t1	1	c	1	c	A	NULL	NULL	NULL	YES	BTREE	
 select * from t1 where c = 'two';
 a	b	c
 2	two	two
@@ -15,7 +15,7 @@ alter table t1 drop index c;
 show indexes from t1;
 Table	Non_unique	Key_name	Seq_in_index	Column_name	Collation	Cardinality	Sub_part	Packed	Null	Index_type	Comment
 t1	0	PRIMARY	1	a	A	3	NULL	NULL		BTREE	
-t1	1	b	1	b	A	3	NULL	NULL	YES	BTREE	
+t1	1	b	1	b	A	NULL	NULL	NULL	YES	BTREE	
 select * from t1 where c = 'two';
 a	b	c
 2	two	two

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-01-21 13:28:09 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-01-27 14:16:33 +0000
@@ -1,12 +1,5 @@
 DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
 drop database if exists mysqltest;
-CREATE TABLE t1 (
-pk1 INT NOT NULL PRIMARY KEY,
-attr1 INT NOT NULL,
-attr2 INT,
-attr3 VARCHAR(10)
-) ENGINE=ndbcluster;
-drop table t1;
 SHOW GLOBAL STATUS LIKE 'ndb\_%';
 Variable_name	Value
 Ndb_cluster_connection_pool	#
@@ -880,4 +873,16 @@ id	parent_id
 2	2
 3	3
 DROP TABLE child, parent;
+CREATE TABLE t1 (a INT PRIMARY KEY, b TEXT)
+ENGINE=ndb PARTITION BY KEY(a) PARTITIONS 24;
+ERROR HY000: Can't create table 'test.t1' (errno: 140)
+show warnings;
+Level	Code	Message
+Error	1296	Got error 1224 'Too many fragments' from NDB
+Error	1005	Can't create table 'test.t1' (errno: 140)
+CREATE TABLE t1 (a INT PRIMARY KEY, b TEXT)
+ENGINE=ndb;
+show warnings;
+Level	Code	Message
+drop table t1;
 End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/r/ndb_condition_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2011-01-21 12:43:19 +0000
+++ b/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2011-01-26 09:54:24 +0000
@@ -2279,5 +2279,18 @@ select * from t where exists
 pk	i
 1	3
 drop table t,subq;
+create table t (pk1 int, pk2 int, primary key(pk1,pk2)) engine = ndb;
+insert into t values (1,0), (2,0), (3,0), (4,0);
+set engine_condition_pushdown=1;
+select table1.pk1, table2.pk1, table1.pk2, table2.pk2
+from t as table1, t as table2
+where table2.pk1 in (0,3) and
+(table1.pk1 = 7 or table2.pk1 = 3);
+pk1	pk1	pk2	pk2
+1	3	0	0
+2	3	0	0
+3	3	0	0
+4	3	0	0
+drop table t;
 set engine_condition_pushdown = @old_ecpd;
 DROP TABLE t1,t2,t3,t4,t5;

=== modified file 'mysql-test/suite/ndb/r/ndb_index_unique.result'
--- a/mysql-test/suite/ndb/r/ndb_index_unique.result	2011-01-21 12:43:19 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_unique.result	2011-01-26 09:54:24 +0000
@@ -185,7 +185,7 @@ set @old_ecpd = @@session.engine_conditi
 set engine_condition_pushdown = true;
 explain select * from t2 where (b = 3 OR b = 5) AND c IS NULL AND a < 9 order by a;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t2	range	PRIMARY,b	PRIMARY	4	NULL	1	Using where with pushed condition
+1	SIMPLE	t2	range	PRIMARY,b	b	9	NULL	2	Using where with pushed condition; Using filesort
 select * from t2 where (b = 3 OR b = 5) AND c IS NULL AND a < 9 order by a;
 a	b	c
 3	3	NULL

=== modified file 'mysql-test/suite/ndb/r/ndb_read_multi_range.result'
--- a/mysql-test/suite/ndb/r/ndb_read_multi_range.result	2010-11-24 13:16:09 +0000
+++ b/mysql-test/suite/ndb/r/ndb_read_multi_range.result	2011-01-18 07:49:14 +0000
@@ -605,7 +605,7 @@ SELECT DISTINCT STRAIGHT_JOIN t1.pk FROM
 t1 LEFT JOIN t2 ON t2.a = t1.a AND t2.pk != 6;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	3000	Using temporary
-1	SIMPLE	t2	range	PRIMARY	PRIMARY	4	NULL	2	Using where; Distinct
+1	SIMPLE	t2	range	PRIMARY	PRIMARY	4	NULL	20	Using where; Distinct
 SELECT DISTINCT STRAIGHT_JOIN t1.pk FROM 
 t1 LEFT JOIN t2 ON t2.a = t1.a AND t2.pk != 6;
 drop table t1, t2;

=== added file 'mysql-test/suite/ndb/r/ndb_statistics.result'
--- a/mysql-test/suite/ndb/r/ndb_statistics.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_statistics.result	2011-01-18 11:49:03 +0000
@@ -0,0 +1,61 @@
+drop table if exists t1, t2, t3, t4;
+CREATE TABLE t10(
+K INT NOT NULL AUTO_INCREMENT,
+I INT, J INT,
+PRIMARY KEY(K),
+KEY(I,J),
+UNIQUE KEY(J,K)
+) ENGINE=ndbcluster;
+INSERT INTO t10(I,J) VALUES (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(0,0);
+CREATE TABLE t100 LIKE t10;
+INSERT INTO t100(I,J)
+SELECT X.J, X.J+(10*Y.J) FROM t10 AS X,t10 AS Y;
+CREATE TABLE t10000 LIKE t10;
+INSERT INTO t10000(I,J)
+SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
+WHERE X.J<50;
+INSERT INTO t10000(I,J)
+SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
+WHERE X.J>=50;
+ANALYZE TABLE t10,t100,t10000;
+Table	Op	Msg_type	Msg_text
+test.t10	analyze	status	OK
+test.t100	analyze	status	OK
+test.t10000	analyze	status	OK
+SELECT COUNT(*) FROM t10;
+COUNT(*)
+10
+SELECT COUNT(*) FROM t100;
+COUNT(*)
+100
+SELECT COUNT(*) FROM t10000;
+COUNT(*)
+10000
+EXPLAIN
+SELECT * FROM t10000 WHERE k = 42;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t10000	const	PRIMARY	PRIMARY	4	const	1	
+EXPLAIN
+SELECT * FROM t10000 WHERE k >= 42 and k < 10000;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t10000	range	PRIMARY	PRIMARY	4	NULL	10	Using where with pushed condition
+EXPLAIN
+SELECT * FROM t10000 WHERE k BETWEEN 42 AND 10000;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t10000	range	PRIMARY	PRIMARY	4	NULL	10	Using where with pushed condition
+EXPLAIN
+SELECT * FROM t10000 WHERE k < 42;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t10000	range	PRIMARY	PRIMARY	4	NULL	10	Using where with pushed condition
+EXPLAIN
+SELECT * FROM t10000 WHERE k > 42;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t10000	range	PRIMARY	PRIMARY	4	NULL	10	Using where with pushed condition
+EXPLAIN
+SELECT * FROM t10000 AS X JOIN t10000 AS Y
+ON Y.I=X.I AND Y.J = X.I;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	X	ALL	I	NULL	NULL	NULL	10000	
+1	SIMPLE	Y	ref	J,I	I	10	test.X.I,test.X.I	11	Using where
+DROP TABLE t10,t100,t10000;
+End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/r/ndb_temporary.result'
--- a/mysql-test/suite/ndb/r/ndb_temporary.result	2007-08-02 22:14:05 +0000
+++ b/mysql-test/suite/ndb/r/ndb_temporary.result	2011-01-25 09:45:42 +0000
@@ -5,6 +5,10 @@ create temporary table t1 (a int key) en
 alter table t1 engine=ndb;
 ERROR HY000: Table storage engine 'ndbcluster' does not support the create option 'TEMPORARY'
 drop table t1;
+CREATE TABLE bar ( id TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY ) ENGINE=NDBCluster ;
+CREATE TEMPORARY TABLE foo LIKE bar ;
+ERROR HY000: Can't create table 'test.foo' (errno: 1478)
+DROP TABLE bar;
 SET SESSION storage_engine=NDBCLUSTER;
 create table t1 (a int key);
 select engine from information_schema.tables where table_name = 't1';

=== modified file 'mysql-test/suite/ndb/t/ndb_basic.test'
--- a/mysql-test/suite/ndb/t/ndb_basic.test	2011-01-21 13:05:52 +0000
+++ b/mysql-test/suite/ndb/t/ndb_basic.test	2011-01-27 13:31:04 +0000
@@ -5,17 +5,6 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t
 drop database if exists mysqltest;
 --enable_warnings
 
-# workaround for bug#16445
-# remove to reproduce bug and run tests from ndb start
-# and with ndb_autodiscover disabled. Fails on Linux 50 % of the times
-CREATE TABLE t1 (
-  pk1 INT NOT NULL PRIMARY KEY,
-  attr1 INT NOT NULL,
-  attr2 INT,
-  attr3 VARCHAR(10)
-) ENGINE=ndbcluster;
-drop table t1;
-
 #
 # Basic test to show that the NDB 
 # table handler is working
@@ -796,4 +785,16 @@ SELECT * FROM child ORDER BY id;
 
 DROP TABLE child, parent;
 
+#
+# bug#59756
+#
+--error 1005
+CREATE TABLE t1 (a INT PRIMARY KEY, b TEXT)
+ENGINE=ndb PARTITION BY KEY(a) PARTITIONS 24;
+show warnings;
+CREATE TABLE t1 (a INT PRIMARY KEY, b TEXT)
+ENGINE=ndb;
+show warnings;
+drop table t1;
+
 --echo End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/t/ndb_condition_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2011-01-14 15:25:27 +0000
+++ b/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2011-01-26 09:54:24 +0000
@@ -2346,6 +2346,28 @@ select * from t where exists
 drop table t,subq;
 
 
+
+# Bug#58791 Incorrect result as Cluster may fail to reject an unpushable condition
+
+create table t (pk1 int, pk2 int, primary key(pk1,pk2)) engine = ndb;
+insert into t values (1,0), (2,0), (3,0), (4,0);
+
+set engine_condition_pushdown=1;
+
+# Multiple instances of same table (t as table<n>, ) confused 
+# ha_ndbcluster::cond_push() which accepted
+# '(table1.pk1 = 7 or table2.pk1 = 3)' as a pushable cond.
+# for 'table2'
+#
+
+--sorted_result
+select table1.pk1, table2.pk1, table1.pk2, table2.pk2
+ from t as table1, t as table2
+ where table2.pk1 in (0,3) and
+   (table1.pk1 = 7 or table2.pk1 = 3);
+
+drop table t;
+
 set engine_condition_pushdown = @old_ecpd;
 DROP TABLE t1,t2,t3,t4,t5;
 

=== added file 'mysql-test/suite/ndb/t/ndb_statistics.test'
--- a/mysql-test/suite/ndb/t/ndb_statistics.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_statistics.test	2011-01-18 11:49:03 +0000
@@ -0,0 +1,68 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+drop table if exists t1, t2, t3, t4;
+--enable_warnings
+
+CREATE TABLE t10(
+  K INT NOT NULL AUTO_INCREMENT,
+  I INT, J INT,
+  PRIMARY KEY(K),
+  KEY(I,J),
+  UNIQUE KEY(J,K)
+) ENGINE=ndbcluster;
+
+INSERT INTO t10(I,J) VALUES (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(0,0);
+
+CREATE TABLE t100 LIKE t10;
+INSERT INTO t100(I,J)
+  SELECT X.J, X.J+(10*Y.J) FROM t10 AS X,t10 AS Y;
+
+CREATE TABLE t10000 LIKE t10;
+
+# Insert into t10000 in two chunks to not
+#  exhaust MaxNoOfConcurrentOperations
+INSERT INTO t10000(I,J)
+  SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
+  WHERE X.J<50;
+INSERT INTO t10000(I,J)
+  SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
+  WHERE X.J>=50;
+
+ANALYZE TABLE t10,t100,t10000;
+
+SELECT COUNT(*) FROM t10;
+SELECT COUNT(*) FROM t100;
+SELECT COUNT(*) FROM t10000;
+
+#
+# Bug #59517: Incorrect detection of single row access in
+#             ha_ndbcluster::records_in_range()
+
+# Expect a single row (or const) when PK is excact specified
+EXPLAIN
+SELECT * FROM t10000 WHERE k = 42;
+
+# All queries below should *not* return a single row
+EXPLAIN
+SELECT * FROM t10000 WHERE k >= 42 and k < 10000;
+EXPLAIN
+SELECT * FROM t10000 WHERE k BETWEEN 42 AND 10000;
+EXPLAIN
+SELECT * FROM t10000 WHERE k < 42;
+EXPLAIN
+SELECT * FROM t10000 WHERE k > 42;
+
+#
+# Bug #59519 ::set_rec_per_key() assumes ORDER_INDEX to be unique
+#
+
+# 'REF' join of 'Y' should match >1 rows
+EXPLAIN
+SELECT * FROM t10000 AS X JOIN t10000 AS Y
+  ON Y.I=X.I AND Y.J = X.I;
+
+
+DROP TABLE t10,t100,t10000;
+
+--echo End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/t/ndb_temporary.test'
--- a/mysql-test/suite/ndb/t/ndb_temporary.test	2007-11-29 10:29:35 +0000
+++ b/mysql-test/suite/ndb/t/ndb_temporary.test	2011-01-25 09:50:48 +0000
@@ -21,6 +21,14 @@ drop table t1;
 
 
 #
+# create temporary like on an ndb table should give an error (bug#57437)
+#
+CREATE TABLE bar ( id TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY ) ENGINE=NDBCluster ;
+--error ER_CANT_CREATE_TABLE
+CREATE TEMPORARY TABLE foo LIKE bar ;
+DROP TABLE bar;
+
+#
 # if default storage engine=ndb, temporary tables
 # without explicit engine= should be created as myisam
 #

=== modified file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result	2010-10-27 11:32:32 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_ddl.result	2011-01-26 09:54:24 +0000
@@ -928,12 +928,12 @@ TEST-INFO: SLAVE:  The INSERT is committ
 -------- switch to master -------
 SHOW INDEX FROM mysqltest1.t5;
 Table	Non_unique	Key_name	Seq_in_index	Column_name	Collation	Cardinality	Sub_part	Packed	Null	Index_type	Comment
-t5	1	my_idx5	1	f1	A	0	NULL	NULL	YES	BTREE	
+t5	1	my_idx5	1	f1	A	NULL	NULL	NULL	YES	BTREE	
 
 -------- switch to slave --------
 SHOW INDEX FROM mysqltest1.t5;
 Table	Non_unique	Key_name	Seq_in_index	Column_name	Collation	Cardinality	Sub_part	Packed	Null	Index_type	Comment
-t5	1	my_idx5	1	f1	A	0	NULL	NULL	YES	BTREE	
+t5	1	my_idx5	1	f1	A	NULL	NULL	NULL	YES	BTREE	
 
 -------- switch to master -------
 

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-01-21 14:27:30 +0000
+++ b/sql/ha_ndbcluster.cc	2011-01-27 14:16:33 +0000
@@ -959,9 +959,35 @@ Ndb *ha_ndbcluster::get_ndb(THD *thd)
 void ha_ndbcluster::set_rec_per_key()
 {
   DBUG_ENTER("ha_ndbcluster::set_rec_per_key");
+  /*
+    Set up the 'rec_per_key[]' for keys which we have good knowledge
+    about the distribution. 'rec_per_key[]' is init'ed to '0' by 
+    open_binary_frm(), which is interpreted as 'unknown' by optimizer.
+    -> Not setting 'rec_per_key[]' will force the optimizer to use
+    its own heuristic to estimate 'records pr. key'.
+  */
   for (uint i=0 ; i < table_share->keys ; i++)
   {
-    table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1;
+    switch (get_index_type(i))
+    {
+    case UNIQUE_ORDERED_INDEX:
+    case PRIMARY_KEY_ORDERED_INDEX:
+    case UNIQUE_INDEX:
+    case PRIMARY_KEY_INDEX:
+    {
+      // Index is unique when all 'key_parts' are specified,
+      // else distribution is unknown and not specified here.
+      KEY* key_info= table->key_info + i;
+      key_info->rec_per_key[key_info->key_parts-1]= 1;
+      break;
+    }
+    case ORDERED_INDEX:
+      // 'Records pr. key' are unknown for non-unique indexes.
+      // (May change when we get better index statistics.)
+      break;
+    default:
+      DBUG_ASSERT(false);
+    }
   }
   DBUG_VOID_RETURN;
 }
@@ -7639,6 +7665,7 @@ int ha_ndbcluster::create(const char *na
   size_t pack_length, length;
   uint i, pk_length= 0;
   uchar *data= NULL, *pack_data= NULL;
+  bool create_temporary= (create_info->options & HA_LEX_CREATE_TMP_TABLE);
   bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
   bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
   const char *tablespace= create_info->tablespace;
@@ -7647,10 +7674,24 @@ int ha_ndbcluster::create(const char *na
   bool ndb_sys_table= FALSE;
   partition_info *part_info;
   int result= 0;
+  NdbDictionary::ObjectId objId;
 
   DBUG_ENTER("ha_ndbcluster::create");
   DBUG_PRINT("enter", ("name: %s", name));
 
+  if (create_temporary)
+  {
+    /*
+      Ndb does not support temporary tables
+     */
+    my_errno= ER_ILLEGAL_HA_CREATE_OPTION;
+    DBUG_PRINT("info", ("Ndb doesn't support temporary tables"));
+    push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+                        ER_ILLEGAL_HA_CREATE_OPTION,
+                        "Ndb doesn't support temporary tables");
+    DBUG_RETURN(my_errno);
+  }
+
   DBUG_ASSERT(*fn_rext((char*)name) == 0);
   set_dbname(name);
   set_tabname(name);
@@ -7660,7 +7701,6 @@ int ha_ndbcluster::create(const char *na
   
   Ndb *ndb= get_ndb(thd);
   NDBDICT *dict= ndb->getDictionary();
-  Ndb_table_guard ndbtab_g(dict);
 
 #ifndef NDB_WITHOUT_TABLESPACE_IN_FRM
   DBUG_PRINT("info", ("Tablespace %s,%s", form->s->tablespace, create_info->tablespace));
@@ -7714,6 +7754,7 @@ int ha_ndbcluster::create(const char *na
 
   if (is_truncate)
   {
+    Ndb_table_guard ndbtab_g(dict);
     ndbtab_g.init(m_tabname);
     if (!(m_table= ndbtab_g.get_table()))
       ERR_RETURN(dict->getNdbError());
@@ -7741,7 +7782,9 @@ int ha_ndbcluster::create(const char *na
   {
     if (THDVAR(thd, table_temporary))
     {
+#ifdef DOES_NOT_WORK_CURRENTLY
       tab.setTemporary(TRUE);
+#endif
       tab.setLogging(FALSE);
     }
     else if (THDVAR(thd, table_no_logging))
@@ -8015,7 +8058,7 @@ int ha_ndbcluster::create(const char *na
   }
 
   // Create the table in NDB     
-  if (dict->createTable(tab) != 0) 
+  if (dict->createTable(tab, &objId) != 0)
   {
     const NdbError err= dict->getNdbError();
     set_ndb_err(thd, err);
@@ -8023,26 +8066,14 @@ int ha_ndbcluster::create(const char *na
     goto abort;
   }
 
-  ndbtab_g.init(m_tabname);
-  // temporary set m_table during create
-  // reset at return
-  m_table= ndbtab_g.get_table();
-  // TODO check also that we have the same frm...
-  if (!m_table)
-  {
-    /* purecov: begin deadcode */
-    const NdbError err= dict->getNdbError();
-    set_ndb_err(thd, err);
-    my_errno= ndb_to_mysql_error(&err);
-    goto abort;
-    /* purecov: end */
-  }
-
   DBUG_PRINT("info", ("Table %s/%s created successfully", 
                       m_dbname, m_tabname));
 
   // Create secondary indexes
+  tab.assignObjId(objId);
+  m_table= &tab;
   my_errno= create_indexes(thd, ndb, form);
+  m_table= 0;
 
   if (!my_errno)
   {
@@ -8078,6 +8109,13 @@ err_return:
     m_table= 0;
     ERR_RETURN(dict->getNdbError());
   }
+
+  /**
+   * createTable/index schema transaction OK
+   */
+  Ndb_table_guard ndbtab_g(dict, m_tabname);
+  m_table= ndbtab_g.get_table();
+
   if (my_errno)
   {
     /*
@@ -10582,8 +10620,10 @@ ha_ndbcluster::records_in_range(uint inx
   // Read from hash index with full key
   // This is a "const" table which returns only one record!      
   if ((idx_type != ORDERED_INDEX) &&
-      ((min_key && min_key->length == key_length) || 
-       (max_key && max_key->length == key_length)))
+      ((min_key && min_key->length == key_length) &&
+       (max_key && max_key->length == key_length) &&
+       (min_key->key==max_key->key ||
+        memcmp(min_key->key, max_key->key, key_length)==0)))
     DBUG_RETURN(1);
   
   if ((idx_type == PRIMARY_KEY_ORDERED_INDEX ||
@@ -12783,12 +12823,24 @@ Item*
 ha_ndbcluster::cond_push(const Item *cond) 
 { 
   DBUG_ENTER("cond_push");
+
+  if (cond->used_tables() & ~table->map)
+  {
+    /**
+     * 'cond' refers fields from other tables, or other instances 
+     * of this table, -> reject it.
+     * (Optimizer need to have a better understanding of what is 
+     *  pushable by each handler.)
+     */
+    DBUG_EXECUTE("where",print_where((Item *)cond, "Rejected cond_push", QT_ORDINARY););
+    DBUG_RETURN(cond);
+  }
   if (!m_cond) 
     m_cond= new ha_ndbcluster_cond;
   if (!m_cond)
   {
     my_errno= HA_ERR_OUT_OF_MEM;
-    DBUG_RETURN(NULL);
+    DBUG_RETURN(cond);
   }
   DBUG_EXECUTE("where",print_where((Item *)cond, m_tabname, QT_ORDINARY););
   DBUG_RETURN(m_cond->cond_push(cond, table, (NDBTAB *)m_table));

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-01-21 14:27:30 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-01-27 14:05:37 +0000
@@ -32,8 +32,6 @@
 #include "ha_ndbcluster_binlog.h"
 #include <ndbapi/NdbDictionary.hpp>
 #include <ndbapi/ndb_cluster_connection.hpp>
-#include <util/NdbAutoPtr.hpp>
-#include <portlib/NdbTick.h>
 
 #ifdef ndb_dynamite
 #undef assert
@@ -3396,22 +3394,30 @@ struct ndb_binlog_index_row {
   struct ndb_binlog_index_row *next;
 };
 
+
 /*
-  Open the ndb_binlog_index table
+  Open the ndb_binlog_index table for writing
 */
-static int open_and_lock_ndb_binlog_index(THD *thd, TABLE_LIST *tables,
-                                          TABLE **ndb_binlog_index)
+static int
+ndb_binlog_index_table__open(THD *thd,
+                             TABLE **ndb_binlog_index)
 {
-  const char *save_proc_info= thd->proc_info;
+  const char *save_proc_info=
+    thd_proc_info(thd, "Opening " NDB_REP_DB "." NDB_REP_TABLE);
 
-  bzero((char*) tables, sizeof(*tables));
-  tables->db= repdb;
-  tables->alias= tables->table_name= reptable;
-  tables->lock_type= TL_WRITE;
-  thd->proc_info= "Opening " NDB_REP_DB "." NDB_REP_TABLE;
-  tables->required_type= FRMTYPE_TABLE;
-  thd->clear_error();
-  if (simple_open_n_lock_tables(thd, tables))
+  TABLE_LIST tables;
+  tables.init_one_table(STRING_WITH_LEN(NDB_REP_DB),    // db
+                        STRING_WITH_LEN(NDB_REP_TABLE), // name
+                        NDB_REP_TABLE,                  // alias
+                        TL_WRITE);                      // for write
+
+  /* Only allow real table to be opened */
+  tables.required_type= FRMTYPE_TABLE;
+
+  const bool derived = false;
+  const uint flags =
+    MYSQL_LOCK_IGNORE_TIMEOUT; /* Wait for lock "infinitely" */
+  if (open_and_lock_tables(thd, &tables, derived, flags))
   {
     if (thd->killed)
       sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
@@ -3419,26 +3425,32 @@ static int open_and_lock_ndb_binlog_inde
       sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
                       thd_stmt_da(thd)->sql_errno(),
                       thd_stmt_da(thd)->message());
-    thd->proc_info= save_proc_info;
+    thd_proc_info(thd, save_proc_info);
     return -1;
   }
-  *ndb_binlog_index= tables->table;
-  thd->proc_info= save_proc_info;
-  (*ndb_binlog_index)->use_all_columns();
+  *ndb_binlog_index= tables.table;
+  thd_proc_info(thd, save_proc_info);
   return 0;
 }
 
+
 /*
-  Insert one row in the ndb_binlog_index
+  Write rows to the ndb_binlog_index table
 */
-
 static int
-ndb_add_ndb_binlog_index(THD *thd, ndb_binlog_index_row *row)
+ndb_binlog_index_table__write_rows(THD *thd,
+                                   ndb_binlog_index_row *row)
 {
   int error= 0;
   ndb_binlog_index_row *first= row;
   TABLE *ndb_binlog_index= 0;
-  TABLE_LIST binlog_tables;
+
+  /*
+    Assume this function is not called with an error set in thd
+    (but clear for safety in release version)
+   */
+  assert(!thd->is_error());
+  thd->clear_error();
 
   /*
     Turn of binlogging to prevent the table changes to be written to
@@ -3446,20 +3458,22 @@ ndb_add_ndb_binlog_index(THD *thd, ndb_b
   */
   tmp_disable_binlog(thd);
 
-  if (open_and_lock_ndb_binlog_index(thd, &binlog_tables, &ndb_binlog_index))
+  if (ndb_binlog_index_table__open(thd, &ndb_binlog_index))
   {
     sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
     error= -1;
     goto add_ndb_binlog_index_err;
   }
 
-  /*
-    Intialize ndb_binlog_index->record[0]
-  */
+  // Set all columns to be written
+  ndb_binlog_index->use_all_columns();
+
   do
   {
     ulonglong epoch= 0, orig_epoch= 0;
     uint orig_server_id= 0;
+
+    // Intialize ndb_binlog_index->record[0]
     empty_record(ndb_binlog_index);
 
     ndb_binlog_index->field[0]->store(first->master_log_pos, true);
@@ -3512,7 +3526,20 @@ ndb_add_ndb_binlog_index(THD *thd, ndb_b
   } while (row);
 
 add_ndb_binlog_index_err:
+  /*
+    Explicitly commit or rollback the writes(although we normally
+    use a non transactional engine for the ndb_binlog_index table)
+  */
+  thd->stmt_da->can_overwrite_status= TRUE;
+  thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
+  thd->stmt_da->can_overwrite_status= FALSE;
+
+  // Close the tables this thread has opened
   close_thread_tables(thd);
+
+  // Release MDL locks on the opened table
+  thd->mdl_context.release_transactional_locks();
+
   reenable_binlog(thd);
   return error;
 }
@@ -5248,8 +5275,9 @@ static void ndb_unpack_record(TABLE *tab
 /*
   Handle error states on events from the storage nodes
 */
-static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp,
-                                          ndb_binlog_index_row &row)
+static int
+ndb_binlog_thread_handle_error(Ndb *ndb,
+                               NdbEventOperation *pOp)
 {
   Ndb_event_data *event_data= (Ndb_event_data *) pOp->getCustomData();
   NDB_SHARE *share= event_data->share;
@@ -6570,7 +6598,7 @@ restart_cluster_failure:
           event_count++;
 #endif
           if (pOp->hasError() &&
-              ndb_binlog_thread_handle_error(i_ndb, pOp, *rows) < 0)
+              ndb_binlog_thread_handle_error(i_ndb, pOp) < 0)
             goto err;
 
 #ifndef DBUG_OFF
@@ -6698,7 +6726,7 @@ restart_cluster_failure:
           DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci));
           if (opt_ndb_log_binlog_index)
           {
-            if (ndb_add_ndb_binlog_index(thd, rows))
+            if (ndb_binlog_index_table__write_rows(thd, rows))
             {
               /* 
                  Writing to ndb_binlog_index failed, check if we are
@@ -6710,7 +6738,7 @@ restart_cluster_failure:
                 volatile THD::killed_state killed= thd->killed;
                 /* We are cleaning up, allow for flushing last epoch */
                 thd->killed= THD::NOT_KILLED;
-                ndb_add_ndb_binlog_index(thd, rows);
+                ndb_binlog_index_table__write_rows(thd, rows);
                 /* Restore kill flag */
                 thd->killed= killed;
                 (void) mysql_mutex_unlock(&LOCK_thread_count);

=== modified file 'sql/ha_ndbcluster_cond.cc'
--- a/sql/ha_ndbcluster_cond.cc	2011-01-14 15:25:27 +0000
+++ b/sql/ha_ndbcluster_cond.cc	2011-01-26 09:54:24 +0000
@@ -972,7 +972,7 @@ ha_ndbcluster_cond::cond_push(const Item
   if (ndb_cond == NULL)
   {
     my_errno= HA_ERR_OUT_OF_MEM;
-    DBUG_RETURN(NULL);
+    DBUG_RETURN(cond);
   }
   if (m_cond_stack)
     ndb_cond->next= m_cond_stack;

=== modified file 'sql/ha_ndbcluster_glue.h'
--- a/sql/ha_ndbcluster_glue.h	2011-01-21 12:43:19 +0000
+++ b/sql/ha_ndbcluster_glue.h	2011-01-27 13:25:19 +0000
@@ -66,12 +66,6 @@ bool close_cached_tables(THD *thd, TABLE
   return close_cached_tables(thd, tables, wait_for_refresh, LONG_TIMEOUT);
 }
 
-/* simple_open_n_lock_tables has been removed */
-inline int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables)
-{
-  return open_and_lock_tables(thd, tables, FALSE, 0);
-}
-
 /* Online alter table not supported */
 #define NDB_WITHOUT_ONLINE_ALTER
 

=== modified file 'storage/ndb/include/CMakeLists.txt'
--- a/storage/ndb/include/CMakeLists.txt	2010-08-04 14:00:24 +0000
+++ b/storage/ndb/include/CMakeLists.txt	2011-01-26 08:45:32 +0000
@@ -26,29 +26,49 @@ CONFIGURE_FILE(ndb_types.h.in
                @ONLY)
 
 #
+# Read a value for variable from ndb_configure.m4
+#
+MACRO(NDB_GET_CONFIG_VALUE keyword var)
+ IF(NOT ${var})
+   # Read the line which contains the keyword
+   FILE (STRINGS ${NDB_SOURCE_DIR}/ndb_configure.m4 str
+         REGEX "^[ ]*${keyword}=")
+   IF(str)
+     # Remove the keyword=
+     STRING(REPLACE "${keyword}=" "" str ${str})
+     # Remove whitespace
+     STRING(REGEX REPLACE "[ ].*" "" str "${str}")
+     SET(${var} ${str})
+   ENDIF()
+ ENDIF()
+ENDMACRO()
+
+#
 # Read ndb_configure.m4 and extract the NDB_VERSION_XX=YY variables
 #
-FILE(READ "${CMAKE_SOURCE_DIR}/storage/ndb/ndb_configure.m4" CONFIGURE_IN)
 
-# NDB_VERSION_MAJOR
-STRING(REGEX REPLACE ".*NDB_VERSION_MAJOR=([0-9]+).*"
-        "\\1" PARSE_NDB_VERSION_MAJOR "${CONFIGURE_IN}")
-SET(NDB_VERSION_MAJOR "${PARSE_NDB_VERSION_MAJOR}" CACHE STRING "NDB Major Version" FORCE)
-
-# NDB_VERSION_MINOR
-STRING(REGEX REPLACE ".*NDB_VERSION_MINOR=([0-9]+).*"
-        "\\1" PARSE_NDB_VERSION_MINOR "${CONFIGURE_IN}")
-SET(NDB_VERSION_MINOR "${PARSE_NDB_VERSION_MINOR}" CACHE STRING "NDB Minor Version" FORCE)
-
-# NDB_VERSION_BUILD
-STRING(REGEX REPLACE ".*NDB_VERSION_BUILD=([0-9]+).*"
-        "\\1" PARSE_NDB_VERSION_BUILD "${CONFIGURE_IN}")
-SET(NDB_VERSION_BUILD "${PARSE_NDB_VERSION_BUILD}" CACHE STRING "NDB Build Version" FORCE)
-
-# NDB_VERSION_STATUS               
-STRING(REGEX REPLACE ".*NDB_VERSION_STATUS=\"([^\"]+)\".*"
-        "\\1" PARSE_NDB_VERSION_STATUS "${CONFIGURE_IN}")
-SET(NDB_VERSION_STATUS "${NDB_VERSION_STATUS}" CACHE STRING "NDB Status Version" FORCE)
+NDB_GET_CONFIG_VALUE(NDB_VERSION_MAJOR major)
+SET(NDB_VERSION_MAJOR "${major}" CACHE INTERNAL "NDB Major Version" FORCE)
+
+NDB_GET_CONFIG_VALUE(NDB_VERSION_MINOR minor)
+SET(NDB_VERSION_MINOR "${minor}" CACHE INTERNAL "NDB Minor Version" FORCE)
+
+NDB_GET_CONFIG_VALUE(NDB_VERSION_BUILD build)
+SET(NDB_VERSION_BUILD "${build}" CACHE INTERNAL "NDB Build Version" FORCE)
+
+NDB_GET_CONFIG_VALUE(NDB_VERSION_STATUS status)
+# Trim surrounding "'s from status
+STRING(REGEX REPLACE "\"" "" status "${status}")
+SET(NDB_VERSION_STATUS "${status}" CACHE INTERNAL "NDB Status Version" FORCE)
+
+IF(NOT DEFINED NDB_VERSION_MAJOR OR
+   NOT DEFINED NDB_VERSION_MINOR OR
+   NOT DEFINED NDB_VERSION_BUILD)
+  MESSAGE(STATUS "NDB_VERSION_MAJOR: ${NDB_VERSION_MAJOR}")
+  MESSAGE(STATUS "NDB_VERSION_MINOR: ${NDB_VERSION_MINOR}")
+  MESSAGE(STATUS "NDB_VERSION_BUILD: ${NDB_VERSION_BUILD}")
+  MESSAGE(FATAL_ERROR "Couldn't parse version numbers from ndb_configure.m4")
+ENDIF()
 
 # Create ndb_version.h
 CONFIGURE_FILE(ndb_version.h.in

=== modified file 'storage/ndb/include/kernel/signaldata/DisconnectRep.hpp'
--- a/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp	2011-01-18 07:39:47 +0000
@@ -24,12 +24,14 @@
 /**
  * 
  */
-class DisconnectRep {
+struct DisconnectRep
+{
   /**
    * Receiver(s)
    */
   friend class Qmgr;
   friend class Cmvmi; // Cmvmi
+  friend class ClusterMgr;
 
   /**
    * Senders
@@ -43,7 +45,6 @@ class DisconnectRep {
    */
   friend bool printDISCONNECT_REP(FILE *, const Uint32 *, Uint32, Uint16);
 
-public:
   STATIC_CONST( SignalLength = 2 );
 
   enum ErrCode {
@@ -54,8 +55,6 @@ public:
     TcReportNodeFailed = 0xFF000001
   };
 
-private:
-  
   Uint32 nodeId;
   Uint32 err;
 };

=== modified file 'storage/ndb/include/ndbapi/NdbDictionary.hpp'
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp	2010-10-13 09:33:02 +0000
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp	2011-01-27 13:23:07 +0000
@@ -1041,6 +1041,14 @@ public:
      *   passing NULL pointer will equal to bitmap with all columns set
      */
     int checkColumns(const Uint32* bitmap, unsigned len_in_bytes) const;
+
+    /**
+     * Set tableId,tableVersion on a table...
+     *   this is a "work-around" since createIndex can't (currently)
+     *   accept an ObjectId instead of table-object in createIndex
+     *   this as way way too much stuff is pushed into NdbDictInterface
+     */
+    void assignObjId(const ObjectId &);
 #endif
 
     // these 2 are not de-doxygenated
@@ -2211,6 +2219,14 @@ public:
     int createTable(const Table &table);
 
     /**
+     * Create defined table given defined Table instance
+     *   return ObjectId
+     * @param table Table to create
+     * @return 0 if successful otherwise -1.
+     */
+    int createTable(const Table &table, ObjectId * objid);
+
+    /**
      * Start table optimization given defined table object
      * @param t Object of table to optimize
      * @param Pre-allocated OptimizeTableHandle

=== modified file 'storage/ndb/include/portlib/NdbMutex.h'
--- a/storage/ndb/include/portlib/NdbMutex.h	2009-12-17 00:05:47 +0000
+++ b/storage/ndb/include/portlib/NdbMutex.h	2011-01-26 08:34:31 +0000
@@ -30,7 +30,25 @@ extern "C" {
 #else
 #include <pthread.h>
 #endif
+#ifndef NDB_MUTEX_STAT
 typedef pthread_mutex_t NdbMutex;
+#else
+typedef struct {
+  pthread_mutex_t mutex;
+  unsigned cnt_lock;
+  unsigned cnt_lock_contention;
+  unsigned cnt_trylock_ok;
+  unsigned cnt_trylock_nok;
+  unsigned long long min_lock_wait_time_ns;
+  unsigned long long sum_lock_wait_time_ns;
+  unsigned long long max_lock_wait_time_ns;
+  unsigned long long min_hold_time_ns;
+  unsigned long long sum_hold_time_ns;
+  unsigned long long max_hold_time_ns;
+  unsigned long long lock_start_time_ns;
+  char name[32];
+} NdbMutex;
+#endif
 
 /**
  * Create a mutex
@@ -40,6 +58,7 @@ typedef pthread_mutex_t NdbMutex;
  * returnvalue: pointer to the mutex structure
  */
 NdbMutex* NdbMutex_Create(void);
+NdbMutex* NdbMutex_CreateWithName(const char * name);
 
 /**
  * Initialize a mutex created with file-storage or on the stack
@@ -48,6 +67,7 @@ NdbMutex* NdbMutex_Create(void);
  * * returnvalue: 0 = succeeded, -1 = failed
  */
 int NdbMutex_Init(NdbMutex* p_mutex);
+int NdbMutex_InitWithName(NdbMutex* p_mutex, const char * name);
 
 /**
  * Destroy a mutex

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-01-14 15:25:27 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-01-26 09:54:24 +0000
@@ -2,7 +2,7 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=21
+NDB_VERSION_BUILD=22
 NDB_VERSION_STATUS=""
 
 dnl for build ndb docs

=== modified file 'storage/ndb/src/common/portlib/NdbCondition.c'
--- a/storage/ndb/src/common/portlib/NdbCondition.c	2010-09-22 11:52:37 +0000
+++ b/storage/ndb/src/common/portlib/NdbCondition.c	2011-01-26 08:34:31 +0000
@@ -127,7 +127,11 @@ NdbCondition_Wait(struct NdbCondition* p
   if (p_cond == NULL || p_mutex == NULL)
     return 1;
   
+#ifdef NDB_MUTEX_STAT
+  result = pthread_cond_wait(&p_cond->cond, &p_mutex->mutex);
+#else
   result = pthread_cond_wait(&p_cond->cond, p_mutex);
+#endif
   
   return result;
 }
@@ -182,13 +186,14 @@ NdbCondition_WaitTimeoutAbs(struct NdbCo
 {
 #ifdef NDB_WIN
   struct timespec tmp = *abstime;
+  abstime = &tmp;
 #endif
 
   if (p_cond == NULL || p_mutex == NULL)
     return 1;
 
-#ifdef NDB_WIN
-  return pthread_cond_timedwait(&p_cond->cond, p_mutex, &tmp);
+#ifdef NDB_MUTEX_STAT
+  return pthread_cond_timedwait(&p_cond->cond, &p_mutex->mutex, abstime);
 #else
   return pthread_cond_timedwait(&p_cond->cond, p_mutex, abstime);
 #endif

=== modified file 'storage/ndb/src/common/portlib/NdbMutex.c'
--- a/storage/ndb/src/common/portlib/NdbMutex.c	2011-01-09 16:31:17 +0000
+++ b/storage/ndb/src/common/portlib/NdbMutex.c	2011-01-26 08:34:31 +0000
@@ -22,7 +22,16 @@
 #include <NdbMutex.h>
 #include <NdbMem.h>
 
-NdbMutex* NdbMutex_Create(void)
+#ifdef NDB_MUTEX_STAT
+static FILE * statout = 0;
+#endif
+
+NdbMutex* NdbMutex_Create()
+{
+  return NdbMutex_CreateWithName(0);
+}
+
+NdbMutex* NdbMutex_CreateWithName(const char * name)
 {
   NdbMutex* pNdbMutex;
   int result;
@@ -32,7 +41,7 @@ NdbMutex* NdbMutex_Create(void)
   if (pNdbMutex == NULL)
     return NULL;
 
-  result = NdbMutex_Init(pNdbMutex);
+  result = NdbMutex_InitWithName(pNdbMutex, name);
   if (result == 0)
   {
     return pNdbMutex;
@@ -43,9 +52,39 @@ NdbMutex* NdbMutex_Create(void)
 
 int NdbMutex_Init(NdbMutex* pNdbMutex)
 {
+  return NdbMutex_InitWithName(pNdbMutex, 0);
+}
+
+int NdbMutex_InitWithName(NdbMutex* pNdbMutex, const char * name)
+{
   int result;
+  pthread_mutex_t * p;
   DBUG_ENTER("NdbMutex_Init");
-  
+
+#ifdef NDB_MUTEX_STAT
+  bzero(pNdbMutex, sizeof(NdbMutex));
+  pNdbMutex->min_lock_wait_time_ns = ~(Uint64)0;
+  pNdbMutex->min_hold_time_ns = ~(Uint64)0;
+  p = &pNdbMutex->mutex;
+  if (name == 0)
+  {
+    snprintf(pNdbMutex->name, sizeof(pNdbMutex->name), "%p",
+             pNdbMutex);
+  }
+  else
+  {
+    snprintf(pNdbMutex->name, sizeof(pNdbMutex->name), "%p:%s",
+             pNdbMutex, name);
+  }
+  if (getenv("NDB_MUTEX_STAT") != 0)
+  {
+    statout = stdout;
+  }
+#else
+  p = pNdbMutex;
+  (void)name;
+#endif
+
 #if defined(VM_TRACE) && \
   defined(HAVE_PTHREAD_MUTEXATTR_INIT) && \
   defined(HAVE_PTHREAD_MUTEXATTR_SETTYPE)
@@ -53,11 +92,11 @@ int NdbMutex_Init(NdbMutex* pNdbMutex)
   pthread_mutexattr_t t;
   pthread_mutexattr_init(&t);
   pthread_mutexattr_settype(&t, PTHREAD_MUTEX_ERRORCHECK);
-  result = pthread_mutex_init(pNdbMutex, &t);
+  result = pthread_mutex_init(p, &t);
   assert(result == 0);
   pthread_mutexattr_destroy(&t);
 #else
-  result = pthread_mutex_init(pNdbMutex, 0);
+  result = pthread_mutex_init(p, 0);
 #endif
   DBUG_RETURN(result);
 }
@@ -69,13 +108,66 @@ int NdbMutex_Destroy(NdbMutex* p_mutex)
   if (p_mutex == NULL)
     return -1;
 
+#ifdef NDB_MUTEX_STAT
+  result = pthread_mutex_destroy(&p_mutex->mutex);
+#else
   result = pthread_mutex_destroy(p_mutex);
+#endif
 
   NdbMem_Free(p_mutex);
 
   return result;
 }
 
+#ifdef NDB_MUTEX_STAT
+static
+inline
+Uint64
+now()
+{
+  struct timespec ts;
+  clock_gettime(CLOCK_MONOTONIC, &ts);
+  return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+}
+
+static
+void
+dumpstat(NdbMutex* p)
+{
+  if (statout != 0)
+  {
+    fprintf(statout,
+            "%s : "
+            " lock [ cnt: %u con: %u wait: [ min: %llu avg: %llu max: %llu ] ]"
+            " trylock [ ok: %u nok: %u ]"
+            " hold: [ min: %llu avg: %llu max: %llu ]\n",
+            p->name,
+            p->cnt_lock,
+            p->cnt_lock_contention,
+            p->min_lock_wait_time_ns,
+            p->cnt_lock_contention ?
+            p->sum_lock_wait_time_ns / p->cnt_lock_contention : 0,
+            p->max_lock_wait_time_ns,
+            p->cnt_trylock_ok,
+            p->cnt_trylock_nok,
+            p->min_hold_time_ns,
+            (p->cnt_lock + p->cnt_trylock_ok) ?
+            p->sum_hold_time_ns / (p->cnt_lock + p->cnt_trylock_ok) : 0,
+            p->max_hold_time_ns);
+  }
+  p->cnt_lock = 0;
+  p->cnt_lock_contention = 0;
+  p->cnt_trylock_ok = 0;
+  p->cnt_trylock_nok = 0;
+  p->min_lock_wait_time_ns = ~(Uint64)0;
+  p->sum_lock_wait_time_ns = 0;
+  p->max_lock_wait_time_ns = 0;
+  p->min_hold_time_ns = ~(Uint64)0;
+  p->sum_hold_time_ns = 0;
+  p->max_hold_time_ns = 0;
+}
+
+#endif
 
 int NdbMutex_Lock(NdbMutex* p_mutex)
 {
@@ -84,7 +176,33 @@ int NdbMutex_Lock(NdbMutex* p_mutex)
   if (p_mutex == NULL)
     return -1;
 
+#ifdef NDB_MUTEX_STAT
+  {
+    Uint64 stop;
+    if ((result = pthread_mutex_trylock(&p_mutex->mutex)) == 0)
+    {
+      stop = now();
+    }
+    else
+    {
+      Uint64 start = now();
+      assert(result == EBUSY);
+      result = pthread_mutex_lock(&p_mutex->mutex);
+      stop = now();
+      p_mutex->cnt_lock_contention++;
+      Uint64 t = (stop - start);
+      p_mutex->sum_lock_wait_time_ns += t;
+      if (t < p_mutex->min_lock_wait_time_ns)
+        p_mutex->min_lock_wait_time_ns = t;
+      if (t > p_mutex->max_lock_wait_time_ns)
+        p_mutex->max_lock_wait_time_ns = t;
+    }
+    p_mutex->cnt_lock++;
+    p_mutex->lock_start_time_ns = stop;
+  }
+#else
   result = pthread_mutex_lock(p_mutex);
+#endif
   assert(result == 0);
 
   return result;
@@ -98,7 +216,26 @@ int NdbMutex_Unlock(NdbMutex* p_mutex)
   if (p_mutex == NULL)
     return -1;
 
+#ifdef NDB_MUTEX_STAT
+  {
+    Uint64 stop = now() - p_mutex->lock_start_time_ns;
+    p_mutex->sum_hold_time_ns += stop;
+    if (stop < p_mutex->min_hold_time_ns)
+      p_mutex->min_hold_time_ns = stop;
+    if (stop > p_mutex->max_hold_time_ns)
+      p_mutex->max_hold_time_ns = stop;
+    result = pthread_mutex_unlock(&p_mutex->mutex);
+    if (((p_mutex->sum_hold_time_ns + p_mutex->sum_lock_wait_time_ns)
+         >= 3*1000000000ULL) ||
+        p_mutex->cnt_lock >= 16384 ||
+        p_mutex->cnt_trylock_ok >= 16384)
+    {
+      dumpstat(p_mutex);
+    }
+  }
+#else
   result = pthread_mutex_unlock(p_mutex);
+#endif
   assert(result == 0);
 
   return result;
@@ -107,12 +244,26 @@ int NdbMutex_Unlock(NdbMutex* p_mutex)
 
 int NdbMutex_Trylock(NdbMutex* p_mutex)
 {
-  int result = -1;
+  int result;
+
+  if (p_mutex == NULL)
+    return -1;
 
-  if (p_mutex != NULL) {
-    result = pthread_mutex_trylock(p_mutex);
-    assert(result == 0 || result == EBUSY);
+#ifdef NDB_MUTEX_STAT
+  result = pthread_mutex_trylock(&p_mutex->mutex);
+  if (result == 0)
+  {
+    p_mutex->cnt_trylock_ok++;
+    p_mutex->lock_start_time_ns = now();
+  }
+  else
+  {
+    __sync_fetch_and_add(&p_mutex->cnt_trylock_nok, 1);
   }
+#else
+  result = pthread_mutex_trylock(p_mutex);
+#endif
+  assert(result == 0 || result == EBUSY);
 
   return result;
 }

=== modified file 'storage/ndb/src/kernel/CMakeLists.txt'
--- a/storage/ndb/src/kernel/CMakeLists.txt	2010-11-10 14:45:18 +0000
+++ b/storage/ndb/src/kernel/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -52,14 +52,16 @@ IF(WIN32)
     ${CMAKE_SOURCE_DIR}/storage/ndb/src/common/logger/msg00001.bin)
 ENDIF()
 
-ADD_EXECUTABLE(ndbd
-  main.cpp ndbd.cpp angel.cpp SimBlockList.cpp ${NDBD_EXTRA_SRC})
+MYSQL_ADD_EXECUTABLE(ndbd
+  main.cpp ndbd.cpp angel.cpp SimBlockList.cpp ${NDBD_EXTRA_SRC}
+  DESTINATION ${INSTALL_SBINDIR}
+  COMPONENT ClusterDataNode)
 TARGET_LINK_LIBRARIES(ndbd ${NDBD_LIBS} ndbsched ${LIBDL})
-INSTALL(TARGETS ndbd DESTINATION libexec)
 
 IF(NDB_BUILD_NDBMTD)
-  ADD_EXECUTABLE(ndbmtd
-    main.cpp ndbd.cpp angel.cpp SimBlockList.cpp ${NDBD_EXTRA_SRC})
+  MYSQL_ADD_EXECUTABLE(ndbmtd
+    main.cpp ndbd.cpp angel.cpp SimBlockList.cpp ${NDBD_EXTRA_SRC}
+    DESTINATION ${INSTALL_SBINDIR}
+    COMPONENT ClusterDataNode)
   TARGET_LINK_LIBRARIES(ndbmtd ${NDBD_LIBS} ndbsched_mt ${LIBDL})
-  INSTALL(TARGETS ndbmtd DESTINATION libexec)
 ENDIF()

=== modified file 'storage/ndb/src/kernel/blocks/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/CMakeLists.txt	2010-11-08 16:26:18 +0000
+++ b/storage/ndb/src/kernel/blocks/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -72,9 +72,9 @@ ADD_LIBRARY(ndbblocks STATIC
     dbtup/DbtupClient.cpp
     ${EXTRA_SRC})
 
-ADD_EXECUTABLE(ndb_print_file
-    print_file.cpp
-    diskpage.cpp
-    dbtup/tuppage.cpp
-)
+MYSQL_ADD_EXECUTABLE(ndb_print_file
+  print_file.cpp
+  diskpage.cpp
+  dbtup/tuppage.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_print_file ndbclient)

=== modified file 'storage/ndb/src/kernel/blocks/backup/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/backup/CMakeLists.txt	2010-11-26 10:42:53 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -13,6 +13,8 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
-ADD_EXECUTABLE(ndb_print_backup_file read.cpp)
+MYSQL_ADD_EXECUTABLE(ndb_print_backup_file
+  read.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_print_backup_file
     ndbtrace ndblogger ndbgeneral ndbportlib)

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/dbdict/CMakeLists.txt	2010-11-26 10:42:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -13,7 +13,8 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
-ADD_EXECUTABLE(ndb_print_schema_file
-               printSchemaFile.cpp)
+MYSQL_ADD_EXECUTABLE(ndb_print_schema_file
+  printSchemaFile.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_print_schema_file
     ndbtrace ndblogger ndbgeneral ndbportlib)

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/dbdih/CMakeLists.txt	2010-11-26 10:42:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -14,6 +14,8 @@
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
 
-ADD_EXECUTABLE(ndb_print_sys_file printSysfile.cpp)
+MYSQL_ADD_EXECUTABLE(ndb_print_sys_file
+  printSysfile.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_print_sys_file
     ndbtrace ndblogger ndbgeneral ndbportlib)

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/dblqh/CMakeLists.txt	2010-11-26 10:42:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -14,8 +14,9 @@
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
 
-ADD_EXECUTABLE(ndb_redo_log_reader
-    redoLogReader/records.cpp
-    redoLogReader/reader.cpp)
+MYSQL_ADD_EXECUTABLE(ndb_redo_log_reader
+  redoLogReader/records.cpp
+  redoLogReader/reader.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_redo_log_reader
     ndbtrace ndblogger ndbgeneral ndbportlib)

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-01-03 15:08:41 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-01-17 15:00:13 +0000
@@ -4598,11 +4598,11 @@ void Dblqh::execLQHKEYREQ(Signal* signal
   
   if (LqhKeyReq::getRowidFlag(Treqinfo))
   {
-    ndbassert(refToBlock(senderRef) != DBTC);
+    ndbassert(refToMain(senderRef) != DBTC);
   }
   else if(op == ZINSERT)
   {
-    ndbassert(refToBlock(senderRef) == DBTC);
+    ndbassert(refToMain(senderRef) == DBTC);
   }
   
   if ((LqhKeyReq::FixedSignalLength + nextPos + TreclenAiLqhkey) != 

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2010-12-02 11:02:29 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-01-17 15:00:13 +0000
@@ -1284,7 +1284,7 @@ public:
   typedef Ptr<TcFailRecord> TcFailRecordPtr;
 
 public:
-  Dbtc(Block_context&);
+  Dbtc(Block_context&, Uint32 instanceNumber = 0);
   virtual ~Dbtc();
 
 private:
@@ -2051,6 +2051,8 @@ private:
 
   Uint32 c_gcp_ref;
 
+  Uint32 c_sttor_ref;
+
 #ifdef ERROR_INSERT
   // Used with ERROR_INSERT 8078 + 8079 to check API_FAILREQ handling
   Uint32 c_lastFailedApi;

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp	2010-02-22 14:05:33 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp	2011-01-17 15:00:13 +0000
@@ -173,8 +173,8 @@ Dbtc::getParam(const char* name, Uint32*
   return false;
 }
 
-Dbtc::Dbtc(Block_context& ctx):
-  SimulatedBlock(DBTC, ctx),
+Dbtc::Dbtc(Block_context& ctx, Uint32 instanceNo):
+  SimulatedBlock(DBTC, ctx, instanceNo),
   c_theDefinedTriggers(c_theDefinedTriggerPool),
   c_firedTriggerHash(c_theFiredTriggerPool),
   c_maxNumberOfDefinedTriggers(0),

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-01-14 12:05:51 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-01-17 15:00:13 +0000
@@ -654,6 +654,7 @@ void Dbtc::execSTTOR(Signal* signal)
                                                      /* START CASE */
   tphase = signal->theData[1];
   csignalKey = signal->theData[6];
+  c_sttor_ref = signal->getSendersBlockRef();
   switch (tphase) {
   case ZSPH1:
     jam();
@@ -673,7 +674,7 @@ void Dbtc::sttorryLab(Signal* signal)
   signal->theData[2] = 2;    /* SIGNAL VERSION NUMBER */
   signal->theData[3] = ZSPH1;
   signal->theData[4] = 255;
-  sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+  sendSignal(c_sttor_ref, GSN_STTORRY, signal, 5, JBB);
 }//Dbtc::sttorryLab()
 
 /* ***************************************************************************/
@@ -689,6 +690,7 @@ void Dbtc::execNDB_STTOR(Signal* signal)
   tnodeid = signal->theData[1];
   tndbstartphase = signal->theData[2];   /* START PHASE      */
   tstarttype = signal->theData[3];       /* START TYPE       */
+  c_sttor_ref = signal->getSendersBlockRef();
   switch (tndbstartphase) {
   case ZINTSPH1:
     jam();
@@ -724,7 +726,7 @@ void Dbtc::execNDB_STTOR(Signal* signal)
 void Dbtc::ndbsttorry010Lab(Signal* signal) 
 {
   signal->theData[0] = cownref;
-  sendSignal(cndbcntrblockref, GSN_NDB_STTORRY, signal, 1, JBB);
+  sendSignal(c_sttor_ref, GSN_NDB_STTORRY, signal, 1, JBB);
 }//Dbtc::ndbsttorry010Lab()
 
 void
@@ -789,7 +791,7 @@ void Dbtc::startphase1x010Lab(Signal* si
 void Dbtc::intstartphase1x010Lab(Signal* signal) 
 {
   cownNodeid = tnodeid;
-  cownref =          calcTcBlockRef(cownNodeid);
+  cownref =          reference();
   clqhblockref =     calcLqhBlockRef(cownNodeid);
   cdihblockref =     calcDihBlockRef(cownNodeid);
   cdictblockref =    calcDictBlockRef(cownNodeid);
@@ -1240,7 +1242,8 @@ void Dbtc::execTCSEIZEREQ(Signal* signal
     apiConnectptr.p->ndbapiBlockref = tapiBlockref;
     signal->theData[0] = apiConnectptr.p->ndbapiConnect;
     signal->theData[1] = apiConnectptr.i;
-    sendSignal(tapiBlockref, GSN_TCSEIZECONF, signal, 2, JBB);
+    signal->theData[2] = reference();
+    sendSignal(tapiBlockref, GSN_TCSEIZECONF, signal, 3, JBB);
     return;
   }
 
@@ -11847,7 +11850,7 @@ void Dbtc::initialiseRecordsLab(Signal*
   signal->theData[2] = 0;
   signal->theData[3] = retRef;
   signal->theData[4] = retData;
-  sendSignal(DBTC_REF, GSN_CONTINUEB, signal, 5, JBB);
+  sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
 }
 
 /* ========================================================================= */

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2010-12-04 11:20:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-01-17 15:00:13 +0000
@@ -539,7 +539,7 @@ Dbtup::checkImmediateTriggersAfterInsert
                                          Tablerec *regTablePtr,
                                          bool disk)
 {
-  if(refToBlock(req_struct->TC_ref) != DBTC) {
+  if (refToMain(req_struct->TC_ref) != DBTC) {
     return;
   }
 
@@ -559,7 +559,7 @@ Dbtup::checkImmediateTriggersAfterUpdate
                                          Tablerec* regTablePtr,
                                          bool disk)
 {
-  if(refToBlock(req_struct->TC_ref) != DBTC) {
+  if (refToMain(req_struct->TC_ref) != DBTC) {
     return;
   }
 
@@ -587,7 +587,7 @@ Dbtup::checkImmediateTriggersAfterDelete
                                          Tablerec* regTablePtr,
                                          bool disk)
 {
-  if(refToBlock(req_struct->TC_ref) != DBTC) {
+  if (refToMain(req_struct->TC_ref) != DBTC) {
     return;
   }
 

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2010-10-21 12:02:45 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2011-01-17 15:00:13 +0000
@@ -328,7 +328,8 @@ DbUtil::execTCSEIZECONF(Signal* signal){
   ptr.i = signal->theData[0] >> 1;
   c_seizingTransactions.getPtr(ptr, signal->theData[0] >> 1);
   ptr.p->connectPtr = signal->theData[1];
-  
+  ptr.p->connectRef = signal->theData[2];
+
   c_seizingTransactions.release(ptr);
 
   if (c_seizingTransactions.isEmpty())
@@ -2281,7 +2282,7 @@ DbUtil::runOperation(Signal* signal, Tra
   printTCKEYREQ(stdout, signal->getDataPtr(), pop->tckeyLenInBytes >> 2,0);
 #endif
   Uint32 sigLen = pop->tckeyLen + (keyLen > 8 ? 8 : keyLen);
-  sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, sigLen, JBB);
+  sendSignal(transPtr.p->connectRef, GSN_TCKEYREQ, signal, sigLen, JBB);
   
   /**
    * More the 8 words of key info not implemented
@@ -2295,7 +2296,8 @@ DbUtil::runOperation(Signal* signal, Tra
   keyInfo->connectPtr = transPtr.p->connectPtr;
   keyInfo->transId[0] = transPtr.p->transId[0];
   keyInfo->transId[1] = transPtr.p->transId[1];
-  sendKeyInfo(signal, keyInfo, op->keyInfo, kit);
+  sendKeyInfo(signal, transPtr.p->connectRef,
+              keyInfo, op->keyInfo, kit);
 
   /**
    * AttrInfo
@@ -2307,14 +2309,17 @@ DbUtil::runOperation(Signal* signal, Tra
 
   AttrInfoIterator ait;
   pop->attrInfo.first(ait);
-  sendAttrInfo(signal, attrInfo, pop->attrInfo, ait);
+  sendAttrInfo(signal, transPtr.p->connectRef,
+               attrInfo, pop->attrInfo, ait);
   
   op->attrInfo.first(ait);
-  sendAttrInfo(signal, attrInfo, op->attrInfo, ait);
+  sendAttrInfo(signal, transPtr.p->connectRef,
+               attrInfo, op->attrInfo, ait);
 }
 
 void
 DbUtil::sendKeyInfo(Signal* signal, 
+                    Uint32 tcRef,
 		    KeyInfo* keyInfo,
 		    const KeyInfoBuffer & keyBuf,
 		    KeyInfoIterator & kit)
@@ -2330,13 +2335,14 @@ DbUtil::sendKeyInfo(Signal* signal,
 #if 0 //def EVENT_DEBUG
     printf("DbUtil::sendKeyInfo: sendSignal(DBTC_REF, GSN_KEYINFO, signal, %d , JBB)\n", KeyInfo::HeaderLength + keyDataLen);
 #endif
-    sendSignal(DBTC_REF, GSN_KEYINFO, signal, 
+    sendSignal(tcRef, GSN_KEYINFO, signal,
 	       KeyInfo::HeaderLength + keyDataLen, JBB);
   }
 }
 
 void
 DbUtil::sendAttrInfo(Signal* signal, 
+                     Uint32 tcRef,
 		     AttrInfo* attrInfo, 
 		     const AttrInfoBuffer & attrBuf,
 		     AttrInfoIterator & ait)
@@ -2351,7 +2357,7 @@ DbUtil::sendAttrInfo(Signal* signal,
 #if 0 //def EVENT_DEBUG
     printf("DbUtil::sendAttrInfo: sendSignal(DBTC_REF, GSN_ATTRINFO, signal, %d , JBB)\n", AttrInfo::HeaderLength + i);
 #endif
-    sendSignal(DBTC_REF, GSN_ATTRINFO, signal, 
+    sendSignal(tcRef, GSN_ATTRINFO, signal,
 	       AttrInfo::HeaderLength + i, JBB);
   }
 }
@@ -2491,6 +2497,9 @@ DbUtil::execTCKEYCONF(Signal* signal){
     gci_lo = keyConf->operations[ops].apiOperationPtr;
   }
 
+  TransactionPtr transPtr;
+  c_runningTransactions.getPtr(transPtr, transI);
+
   /**
    * Check commit ack marker flag
    */
@@ -2499,11 +2508,9 @@ DbUtil::execTCKEYCONF(Signal* signal){
     jam();
     signal->theData[0] = transId1;
     signal->theData[1] = transId2;
-    sendSignal(DBTC_REF, GSN_TC_COMMIT_ACK, signal, 2, JBB);    
+    sendSignal(transPtr.p->connectRef, GSN_TC_COMMIT_ACK, signal, 2, JBB);    
   }//if
 
-  TransactionPtr transPtr;
-  c_runningTransactions.getPtr(transPtr, transI);
   ndbrequire(transId1 == transPtr.p->transId[0] && 
 	     transId2 == transPtr.p->transId[1]);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2010-10-21 12:02:45 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2011-01-17 15:00:13 +0000
@@ -348,6 +348,7 @@ public:
     };
     
     Uint32 connectPtr;
+    Uint32 connectRef;
     Uint32 transId[2];
     SLList<Operation> operations;
 
@@ -408,11 +409,11 @@ public:
   void initResultSet(ResultSetBuffer &, const ResultSetInfoBuffer &);
   void runTransaction(Signal* signal, TransactionPtr);
   void runOperation(Signal* signal, TransactionPtr &, OperationPtr &, Uint32);
-  void sendKeyInfo(Signal* signal, 
+  void sendKeyInfo(Signal* signal, Uint32 ref,
 		   KeyInfo* keyInfo,
 		   const KeyInfoBuffer & keyBuf,
 		   KeyInfoIterator & kit);
-  void sendAttrInfo(Signal*, 
+  void sendAttrInfo(Signal*, Uint32 ref,
 		    AttrInfo* attrInfo, 
 		    const AttrInfoBuffer &,
 		    AttrInfoIterator & ait);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2010-06-24 21:42:03 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-01-17 15:00:13 +0000
@@ -325,6 +325,7 @@ private:
   UintR cnoWaitrep6;
   UintR cnoWaitrep7;
   UintR ctcConnectionP;
+  Uint32 ctcReference;
   UintR ctcReqInfo;
   Uint8 ctransidPhase;
   Uint16 cresponses;

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2010-12-13 15:34:50 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-01-17 15:00:13 +0000
@@ -2519,6 +2519,7 @@ void Ndbcntr::execTCSEIZECONF(Signal* si
 {
   jamEntry();
   ctcConnectionP = signal->theData[1];
+  ctcReference = signal->theData[2];
   crSystab7Lab(signal);
   return;
 }//Ndbcntr::execTCSEIZECONF()
@@ -2582,7 +2583,7 @@ void Ndbcntr::crSystab7Lab(Signal* signa
     AttributeHeader::init(&tAIDataPtr[2], 1, 2 << 2);
     tAIDataPtr[3]                = (tkey << 16);
     tAIDataPtr[4]                = 1;    
-    sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, 
+    sendSignal(ctcReference, GSN_TCKEYREQ, signal,
 	       TcKeyReq::StaticLength + 6, JBB);
   }//for
   ckey = ckey + RowsPerCommit;
@@ -2605,7 +2606,7 @@ void Ndbcntr::execTCKEYCONF(Signal* sign
     Uint32 transId2 = keyConf->transId2;
     signal->theData[0] = transId1;
     signal->theData[1] = transId2;
-    sendSignal(DBTC_REF, GSN_TC_COMMIT_ACK, signal, 2, JBB);    
+    sendSignal(ctcReference, GSN_TC_COMMIT_ACK, signal, 2, JBB);
   }//if
   
   cresponses = cresponses + TcKeyConf::getNoOfOperations(confInfo);
@@ -2634,7 +2635,7 @@ void Ndbcntr::crSystab8Lab(Signal* signa
   signal->theData[0] = ctcConnectionP;
   signal->theData[1] = reference();
   signal->theData[2] = 0;
-  sendSignal(DBTC_REF, GSN_TCRELEASEREQ, signal, 2, JBB);
+  sendSignal(ctcReference, GSN_TCRELEASEREQ, signal, 2, JBB);
   return;
 }//Ndbcntr::crSystab8Lab()
 

=== modified file 'storage/ndb/src/mgmclient/CMakeLists.txt'
--- a/storage/ndb/src/mgmclient/CMakeLists.txt	2010-11-10 09:42:49 +0000
+++ b/storage/ndb/src/mgmclient/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -18,8 +18,9 @@ INCLUDE_DIRECTORIES(${NDB_SOURCE_DIR}/sr
 ADD_LIBRARY(ndbmgmclient STATIC
             CommandInterpreter.cpp)
 
-ADD_EXECUTABLE(ndb_mgm
-               main.cpp)
+MYSQL_ADD_EXECUTABLE(ndb_mgm
+  main.cpp
+  COMPONENT ClusterManagementClient)
 
 TARGET_LINK_LIBRARIES(ndb_mgm
                ndbmgmclient
@@ -34,5 +35,3 @@ TARGET_LINK_LIBRARIES(ndb_mgm
 IF(UNIX)
   TARGET_LINK_LIBRARIES(ndb_mgm ${READLINE_LIBRARY})
 ENDIF(UNIX)
-
-INSTALL(TARGETS ndb_mgm DESTINATION bin)

=== modified file 'storage/ndb/src/mgmsrv/CMakeLists.txt'
--- a/storage/ndb/src/mgmsrv/CMakeLists.txt	2010-11-11 15:08:52 +0000
+++ b/storage/ndb/src/mgmsrv/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -48,15 +48,16 @@ IF(WIN32)
     ${CMAKE_SOURCE_DIR}/storage/ndb/src/common/logger/msg00001.bin)
 ENDIF()
 
-ADD_EXECUTABLE(ndb_mgmd
-  MgmtSrvr.cpp main.cpp Services.cpp ConfigManager.cpp ${NDB_MGMD_EXTRA_SRC})
+MYSQL_ADD_EXECUTABLE(ndb_mgmd
+  MgmtSrvr.cpp main.cpp Services.cpp ConfigManager.cpp
+  ${NDB_MGMD_EXTRA_SRC}
+  DESTINATION ${INSTALL_SBINDIR}
+  COMPONENT ClusterManagementServer)
 TARGET_LINK_LIBRARIES(ndb_mgmd ndbconf ndbclient ndbmgmclient)
 IF(UNIX)
   TARGET_LINK_LIBRARIES(ndb_mgmd ${READLINE_LIBRARY})
 ENDIF(UNIX)
 
-INSTALL(TARGETS ndb_mgmd DESTINATION libexec)
-
 ADD_EXECUTABLE(MgmConfig-t
                testConfig.cpp)
 TARGET_LINK_LIBRARIES(MgmConfig-t

=== modified file 'storage/ndb/src/mgmsrv/ConfigManager.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigManager.cpp	2011-01-11 19:13:37 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigManager.cpp	2011-01-18 09:52:49 +0000
@@ -1752,8 +1752,6 @@ ConfigManager::run()
   // Build bitmaks of all mgm nodes in config
   m_config->get_nodemask(m_all_mgm, NDB_MGM_NODE_TYPE_MGM);
 
-  m_started.set(m_facade->ownId());
-
   // exclude nowait-nodes from config change protcol
   m_all_mgm.bitANDC(m_opts.nowait_nodes);
   m_all_mgm.set(m_facade->ownId()); // Never exclude own node
@@ -2199,10 +2197,9 @@ ConfigManager::load_saved_config(const B
   return conf;
 }
 
-
 bool
 ConfigManager::get_packed_config(ndb_mgm_node_type nodetype,
-                                 BaseString& buf64, BaseString& error)
+                                 BaseString* buf64, BaseString& error)
 {
   Guard g(m_config_mutex);
 
@@ -2248,23 +2245,26 @@ ConfigManager::get_packed_config(ndb_mgm
   }
 
   require(m_config != 0);
-  if (!m_packed_config.length())
+  if (buf64)
   {
-    // No packed config exist, generate a new one
-    Config config_copy(m_config);
-    if (!m_dynamic_ports.set_in_config(&config_copy))
-    {
-      error.assign("get_packed_config, failed to set dynamic ports in config");
-      return false;
-    }
-
-    if (!config_copy.pack64(m_packed_config))
+    if (!m_packed_config.length())
     {
-      error.assign("get_packed_config, failed to pack config_copy");
-      return false;
+      // No packed config exist, generate a new one
+      Config config_copy(m_config);
+      if (!m_dynamic_ports.set_in_config(&config_copy))
+      {
+        error.assign("get_packed_config, failed to set dynamic ports in config");
+        return false;
+      }
+      
+      if (!config_copy.pack64(m_packed_config))
+      {
+        error.assign("get_packed_config, failed to pack config_copy");
+        return false;
+      }
     }
+    buf64->assign(m_packed_config, m_packed_config.length());
   }
-  buf64.assign(m_packed_config, m_packed_config.length());
   return true;
 }
 

=== modified file 'storage/ndb/src/mgmsrv/ConfigManager.hpp'
--- a/storage/ndb/src/mgmsrv/ConfigManager.hpp	2010-09-17 14:30:59 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigManager.hpp	2011-01-18 09:52:49 +0000
@@ -250,7 +250,7 @@ public:
     Retrieve the current configuration in base64 packed format
    */
   bool get_packed_config(ndb_mgm_node_type nodetype,
-                         BaseString& buf64, BaseString& error);
+                         BaseString * buf64, BaseString& error);
 
   static Config* load_config(const char* config_filename, bool mycnf,
                              BaseString& msg);

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-01-10 13:30:14 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-01-18 09:52:49 +0000
@@ -434,14 +434,6 @@ MgmtSrvr::start_transporter(const Config
     DBUG_RETURN(false);
   }
 
-  /**
-   * Wait for loopback interface to be enabled
-   */
-  while (!theFacade->ext_isConnected(_ownNodeId))
-  {
-    NdbSleep_MilliSleep(20);
-  }
-
   _ownReference = numberToRef(_blockNumber, _ownNodeId);
 
   /*
@@ -744,7 +736,7 @@ bool
 MgmtSrvr::get_packed_config(ndb_mgm_node_type node_type,
                             BaseString& buf64, BaseString& error)
 {
-  return m_config_manager->get_packed_config(node_type, buf64, error);
+  return m_config_manager->get_packed_config(node_type, &buf64, error);
 }
 
 
@@ -2857,6 +2849,10 @@ MgmtSrvr::try_alloc(unsigned id, const c
                     const struct sockaddr *client_addr,
                     Uint32 timeout_ms)
 {
+  if (theFacade && theFacade->ext_isConnected(id))
+  {
+    return -1;
+  }
   if (client_addr != 0)
   {
     int res = alloc_node_id_req(id, type, timeout_ms);
@@ -2939,6 +2935,23 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
   }
 
   Uint32 timeout_ms = Uint32(1000 * timeout_s);
+  Uint64 stop = NdbTick_CurrentMillisecond() + timeout_ms;
+  BaseString getconfig_message;
+  while (!m_config_manager->get_packed_config(type, 0, getconfig_message))
+  {
+    /**
+     * Wait for config to get confirmed before allocating node id
+     */
+    if (NdbTick_CurrentMillisecond() > stop)
+    {
+      error_code = NDB_MGM_ALLOCID_ERROR;
+      error_string.append("Unable to allocate nodeid as configuration"
+                          " not yet confirmed");
+      DBUG_RETURN(false);
+    }
+
+    NdbSleep_MilliSleep(20);
+  }
 
   Guard g(m_node_id_mutex);
 

=== modified file 'storage/ndb/src/ndbapi/ClusterMgr.cpp'
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp	2011-01-10 13:30:14 +0000
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp	2011-01-25 12:22:49 +0000
@@ -141,12 +141,6 @@ ClusterMgr::configure(Uint32 nodeId,
       theNodes[i]= Node();
   }
 
-  /* Init own node info */
-  trp_node &node= theNodes[getOwnNodeId()];
-  assert(node.defined);
-  node.set_connected(true);
-  node.set_confirmed(true);
-
 #if 0
   print_nodes("init");
 #endif
@@ -181,12 +175,19 @@ void
 ClusterMgr::startThread() {
   Guard g(clusterMgrThreadMutex);
 
-  theStop = 0;
+  theStop = -1;
   theClusterMgrThread = NdbThread_Create(runClusterMgr_C,
                                          (void**)this,
                                          0, // default stack size
                                          "ndb_clustermgr",
                                          NDB_THREAD_PRIO_HIGH);
+  Uint32 cnt = 0;
+  while (theStop == -1 && cnt < 60)
+  {
+    NdbCondition_WaitTimeout(waitForHBCond, clusterMgrThreadMutex, 1000);
+  }
+
+  assert(theStop == 0);
 }
 
 void
@@ -262,7 +263,6 @@ ClusterMgr::forceHB()
   req->mysql_version = NDB_MYSQL_VERSION_D;
 
   {
-    Guard g(clusterMgrThreadMutex);
     lock();
     int nodeId= 0;
     for(int i=0;
@@ -289,15 +289,39 @@ ClusterMgr::forceHB()
 }
 
 void
-ClusterMgr::force_update_connections()
+ClusterMgr::startup()
 {
-  theFacade.lock_mutex();
-  theFacade.theTransporterRegistry->update_connections();
-  theFacade.unlock_mutex();
+  assert(theStop == -1);
+  Uint32 nodeId = getOwnNodeId();
+  Node & cm_node = theNodes[nodeId];
+  trp_node & theNode = cm_node;
+  assert(theNode.defined);
+
+  lock();
+  theFacade.doConnect(nodeId);
+  unlock();
+
+  for (Uint32 i = 0; i<3000; i++)
+  {
+    lock();
+    theFacade.theTransporterRegistry->update_connections();
+    unlock();
+    if (theNode.is_connected())
+      break;
+    NdbSleep_MilliSleep(20);
+  }
+
+  assert(theNode.is_connected());
+  Guard g(clusterMgrThreadMutex);
+  theStop = 0;
+  NdbCondition_Broadcast(waitForHBCond);
 }
 
 void
-ClusterMgr::threadMain( ){
+ClusterMgr::threadMain()
+{
+  startup();
+
   NdbApiSignal signal(numberToRef(API_CLUSTERMGR, theFacade.ownId()));
   
   signal.theVerId_signalNumber   = GSN_API_REGREQ;
@@ -309,6 +333,12 @@ ClusterMgr::threadMain( ){
   req->version = NDB_VERSION;
   req->mysql_version = NDB_MYSQL_VERSION_D;
   
+  NdbApiSignal nodeFail_signal(numberToRef(API_CLUSTERMGR, getOwnNodeId()));
+  nodeFail_signal.theVerId_signalNumber = GSN_NODE_FAILREP;
+  nodeFail_signal.theReceiversBlockNumber = API_CLUSTERMGR;
+  nodeFail_signal.theTrace  = 0;
+  nodeFail_signal.theLength = NodeFailRep::SignalLengthLong;
+
   NDB_TICKS timeSlept = 100;
   NDB_TICKS now = NdbTick_CurrentMillisecond();
 
@@ -346,7 +376,13 @@ ClusterMgr::threadMain( ){
       m_cluster_state = CS_waiting_for_first_connect;
     }
 
-    lock();
+
+    NodeFailRep * nodeFailRep = CAST_PTR(NodeFailRep,
+                                         nodeFail_signal.getDataPtrSend());
+    nodeFailRep->noOfNodes = 0;
+    NodeBitmask::clear(nodeFailRep->theNodes);
+
+    trp_client::lock();
     for (int i = 1; i < MAX_NODES; i++){
       /**
        * Send register request (heartbeat) to all available nodes 
@@ -358,9 +394,6 @@ ClusterMgr::threadMain( ){
       Node & cm_node = theNodes[nodeId];
       trp_node & theNode = cm_node;
 
-      if (nodeId == getOwnNodeId())
-        continue;
-
       if (!theNode.defined)
 	continue;
 
@@ -373,6 +406,15 @@ ClusterMgr::threadMain( ){
 	continue;
       }
       
+      if (nodeId == getOwnNodeId() && theNode.is_confirmed())
+      {
+        /**
+         * Don't send HB to self more than once
+         * (once needed to avoid weird special cases in e.g ConfigManager)
+         */
+        continue;
+      }
+
       cm_node.hbCounter += (Uint32)timeSlept;
       if (cm_node.hbCounter >= m_max_api_reg_req_interval ||
           cm_node.hbCounter >= cm_node.hbFrequency)
@@ -386,7 +428,7 @@ ClusterMgr::threadMain( ){
           cm_node.hbCounter = 0;
 	}
 
-        if(theNode.m_info.m_type == NodeInfo::MGM)
+        if (theNode.m_info.m_type != NodeInfo::DB)
           signal.theReceiversBlockNumber = API_CLUSTERMGR;
         else
           signal.theReceiversBlockNumber = QMGR;
@@ -397,11 +439,18 @@ ClusterMgr::threadMain( ){
 	raw_sendSignal(&signal, nodeId);
       }//if
       
-      if (cm_node.hbMissed == 4 && cm_node.hbFrequency > 0){
-	reportNodeFailed(i);
-      }//if
+      if (cm_node.hbMissed == 4 && cm_node.hbFrequency > 0)
+      {
+        nodeFailRep->noOfNodes++;
+        NodeBitmask::set(nodeFailRep->theNodes, nodeId);
+      }
     }
-    unlock();
+
+    if (nodeFailRep->noOfNodes)
+    {
+      raw_sendSignal(&nodeFail_signal, getOwnNodeId());
+    }
+    trp_client::unlock();
   }
 }
 
@@ -418,20 +467,15 @@ ClusterMgr::trp_deliver_signal(const Ndb
     break;
 
   case GSN_API_REGCONF:
-  {
-    execAPI_REGCONF(theData);
-
-    // Distribute signal to all threads/blocks
-    theFacade.for_each(this, sig, ptr);
+     execAPI_REGCONF(sig, ptr);
     break;
-  }
 
   case GSN_API_REGREF:
     execAPI_REGREF(theData);
     break;
 
   case GSN_NODE_FAILREP:
-    execNODE_FAILREP(theData);
+    execNODE_FAILREP(sig, ptr);
     break;
 
   case GSN_NF_COMPLETEREP:
@@ -499,6 +543,16 @@ ClusterMgr::trp_deliver_signal(const Ndb
     theFacade.for_each(this, sig, ptr);
     return;
   }
+  case GSN_CONNECT_REP:
+  {
+    execCONNECT_REP(sig, ptr);
+    return;
+  }
+  case GSN_DISCONNECT_REP:
+  {
+    execDISCONNECT_REP(sig, ptr);
+    return;
+  }
   default:
     break;
 
@@ -628,8 +682,11 @@ ClusterMgr::execAPI_REGREQ(const Uint32
 }
 
 void
-ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
-  const ApiRegConf * const apiRegConf = (ApiRegConf *)&theData[0];
+ClusterMgr::execAPI_REGCONF(const NdbApiSignal * signal,
+                            const LinearSectionPtr ptr[])
+{
+  const ApiRegConf * apiRegConf = CAST_CONSTPTR(ApiRegConf,
+                                                signal->getDataPtr());
   const NodeId nodeId = refToNode(apiRegConf->qmgrRef);
   
 #ifdef DEBUG_REG
@@ -677,17 +734,30 @@ ClusterMgr::execAPI_REGCONF(const Uint32
     memcpy(&node.m_state, &apiRegConf->nodeState, sizeof(node.m_state) - 24);
   }
   
-  if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED  ||
-			  node.m_state.getSingleUserMode())){
-    set_node_alive(node, true);
-  } else {
-    set_node_alive(node, false);
-  }//if
+  if (node.m_info.m_type == NodeInfo::DB)
+  {
+    /**
+     * Only set DB nodes to "alive"
+     */
+    if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
+                            node.m_state.getSingleUserMode()))
+    {
+      set_node_alive(node, true);
+    }
+    else
+    {
+      set_node_alive(node, false);
+    }
+  }
 
   cm_node.hbMissed = 0;
   cm_node.hbCounter = 0;
   cm_node.hbFrequency = (apiRegConf->apiHeartbeatFrequency * 10) - 50;
 
+  // Distribute signal to all threads/blocks
+  // TODO only if state changed...
+  theFacade.for_each(this, signal, ptr);
+
   check_wait_for_hb(nodeId);
 }
 
@@ -742,17 +812,6 @@ ClusterMgr::execAPI_REGREF(const Uint32
   check_wait_for_hb(nodeId);
 }
 
-
-void
-ClusterMgr::execNODE_FAILREP(const Uint32 * theData){
-  const NodeFailRep * const nodeFail = (NodeFailRep *)&theData[0];
-  for(int i = 1; i < MAX_NDB_NODES; i++){
-    if(NdbNodeBitmask::get(nodeFail->theNodes, i)){
-      reportNodeFailed(i);
-    }
-  }
-}
-
 void
 ClusterMgr::execNF_COMPLETEREP(const NdbApiSignal* signal,
                                const LinearSectionPtr ptr[3])
@@ -781,6 +840,10 @@ ClusterMgr::reportConnected(NodeId nodeI
    * us with the real time-out period to use.
    */
   assert(nodeId > 0 && nodeId < MAX_NODES);
+  if (nodeId == getOwnNodeId())
+  {
+    noOfConnectedNodes--; // Don't count self...
+  }
 
   noOfConnectedNodes++;
 
@@ -791,6 +854,8 @@ ClusterMgr::reportConnected(NodeId nodeI
   cm_node.hbCounter = 0;
   cm_node.hbFrequency = 0;
 
+  assert(theNode.is_connected() == false);
+
   /**
    * make sure the node itself is marked connected even
    * if first API_REGCONF has not arrived
@@ -800,63 +865,112 @@ ClusterMgr::reportConnected(NodeId nodeI
   theNode.m_info.m_version = 0;
   theNode.compatible = true;
   theNode.nfCompleteRep = true;
+  theNode.m_node_fail_rep = false;
   theNode.m_state.startLevel = NodeState::SL_NOTHING;
   theNode.minDbVersion = 0;
   
+  /**
+   * We know that we have clusterMgrThreadMutex and trp_client::mutex
+   *   but we don't know if we are polling...and for_each can
+   *   only be used by a poller...
+   *
+   * Send signal to self, so that we can do this when receiving a signal
+   */
   NdbApiSignal signal(numberToRef(API_CLUSTERMGR, getOwnNodeId()));
   signal.theVerId_signalNumber = GSN_CONNECT_REP;
-  signal.theReceiversBlockNumber = 0;
+  signal.theReceiversBlockNumber = API_CLUSTERMGR;
   signal.theTrace  = 0;
   signal.theLength = 1;
   signal.getDataPtrSend()[0] = nodeId;
-  theFacade.for_each(this, &signal, 0);
+  raw_sendSignal(&signal, getOwnNodeId());
   DBUG_VOID_RETURN;
 }
 
 void
+ClusterMgr::execCONNECT_REP(const NdbApiSignal* sig,
+                            const LinearSectionPtr ptr[])
+{
+  theFacade.for_each(this, sig, 0);
+}
+
+void
+ClusterMgr::set_node_dead(trp_node& theNode)
+{
+  set_node_alive(theNode, false);
+  theNode.set_confirmed(false);
+  theNode.m_state.m_connected_nodes.clear();
+  theNode.m_state.startLevel = NodeState::SL_NOTHING;
+  theNode.m_info.m_connectCount ++;
+  theNode.nfCompleteRep = false;
+}
+
+void
 ClusterMgr::reportDisconnected(NodeId nodeId)
 {
   assert(nodeId > 0 && nodeId < MAX_NODES);
   assert(noOfConnectedNodes > 0);
 
-  reportNodeFailed(nodeId, true);
+  /**
+   * We know that we have clusterMgrThreadMutex and trp_client::mutex
+   *   but we don't know if we are polling...and for_each can
+   *   only be used by a poller...
+   *
+   * Send signal to self, so that we can do this when receiving a signal
+   */
+  NdbApiSignal signal(numberToRef(API_CLUSTERMGR, getOwnNodeId()));
+  signal.theVerId_signalNumber = GSN_DISCONNECT_REP;
+  signal.theReceiversBlockNumber = API_CLUSTERMGR;
+  signal.theTrace  = 0;
+  signal.theLength = DisconnectRep::SignalLength;
+
+  DisconnectRep * rep = CAST_PTR(DisconnectRep, signal.getDataPtrSend());
+  rep->nodeId = nodeId;
+  rep->err = 0;
+  raw_sendSignal(&signal, getOwnNodeId());
 }
 
 void
-ClusterMgr::reportNodeFailed(NodeId nodeId, bool disconnect)
+ClusterMgr::execDISCONNECT_REP(const NdbApiSignal* sig,
+                               const LinearSectionPtr ptr[])
 {
-  // Check array bounds + don't allow node 0 to be touched
+  const DisconnectRep * rep = CAST_CONSTPTR(DisconnectRep, sig->getDataPtr());
+  Uint32 nodeId = rep->nodeId;
+
   assert(nodeId > 0 && nodeId < MAX_NODES);
   Node & cm_node = theNodes[nodeId];
   trp_node & theNode = cm_node;
 
-  set_node_alive(theNode, false);
-  theNode.m_info.m_connectCount ++;
+  bool node_failrep = theNode.m_node_fail_rep;
+  set_node_dead(theNode);
+  theNode.set_connected(false);
 
-  if (disconnect)
-  {
-    noOfConnectedNodes--;
-    theNode.set_confirmed(false);
-    theNode.set_connected(false);
-    theNode.m_state.m_connected_nodes.clear();
-  }
-  
-  if (theNode.is_connected())
+  noOfConnectedNodes--;
+  if (noOfConnectedNodes == 0)
   {
-    theFacade.doDisconnect(nodeId);
+    if (!global_flag_skip_invalidate_cache &&
+        theFacade.m_globalDictCache)
+    {
+      theFacade.m_globalDictCache->lock();
+      theFacade.m_globalDictCache->invalidate_all();
+      theFacade.m_globalDictCache->unlock();
+      m_connect_count ++;
+      m_cluster_state = CS_waiting_for_clean_cache;
+    }
+
+    if (m_auto_reconnect == 0)
+    {
+      theStop = 2;
+    }
   }
 
-  if (theNode.m_info.getType() == NodeInfo::DB)
-    recalcMinDbVersion();
-  
-  const bool report = (theNode.m_state.startLevel != NodeState::SL_NOTHING);  
-  theNode.m_state.startLevel = NodeState::SL_NOTHING;
-  
-  if (disconnect || report)
+  if (node_failrep == false)
   {
+    /**
+     * Inform API
+     */
     NdbApiSignal signal(numberToRef(API_CLUSTERMGR, getOwnNodeId()));
     signal.theVerId_signalNumber = GSN_NODE_FAILREP;
-    signal.theReceiversBlockNumber = 0;
+    signal.theReceiversBlockNumber = API_CLUSTERMGR;
     signal.theTrace  = 0;
     signal.theLength = NodeFailRep::SignalLengthLong;
 
@@ -866,27 +980,58 @@ ClusterMgr::reportNodeFailed(NodeId node
     rep->noOfNodes = 1;
     NodeBitmask::clear(rep->theNodes);
     NodeBitmask::set(rep->theNodes, nodeId);
-    theFacade.for_each(this, &signal, 0);
+    execNODE_FAILREP(&signal, 0);
   }
+}
+
+void
+ClusterMgr::execNODE_FAILREP(const NdbApiSignal* sig,
+                             const LinearSectionPtr ptr[])
+{
+  const NodeFailRep * rep = CAST_CONSTPTR(NodeFailRep, sig->getDataPtr());
+
+  NdbApiSignal signal(sig->theSendersBlockRef);
+  signal.theVerId_signalNumber = GSN_NODE_FAILREP;
+  signal.theReceiversBlockNumber = API_CLUSTERMGR;
+  signal.theTrace  = 0;
+  signal.theLength = NodeFailRep::SignalLengthLong;
   
-  if (noOfConnectedNodes == 0)
-  {
-    if (!global_flag_skip_invalidate_cache &&
-        theFacade.m_globalDictCache)
+  NodeFailRep * copy = CAST_PTR(NodeFailRep, signal.getDataPtrSend());
+  copy->failNo = 0;
+  copy->masterNodeId = 0;
+  copy->noOfNodes = 0;
+  NodeBitmask::clear(copy->theNodes);
+
+  for (Uint32 i = NdbNodeBitmask::find_first(rep->theNodes);
+       i != NdbNodeBitmask::NotFound;
+       i = NdbNodeBitmask::find_next(rep->theNodes, i + 1))
+  {
+    Node & cm_node = theNodes[i];
+    trp_node & theNode = cm_node;
+
+    bool node_failrep = theNode.m_node_fail_rep;
+    bool connected = theNode.is_connected();
+    set_node_dead(theNode);
+
+    if (node_failrep == false)
     {
-      theFacade.m_globalDictCache->lock();
-      theFacade.m_globalDictCache->invalidate_all();
-      theFacade.m_globalDictCache->unlock();
-      m_connect_count ++;
-      m_cluster_state = CS_waiting_for_clean_cache;
+      theNode.m_node_fail_rep = true;
+      NodeBitmask::set(copy->theNodes, i);
+      copy->noOfNodes++;
     }
 
-    if (m_auto_reconnect == 0)
+    if (connected)
     {
-      theStop = 2;
+      theFacade.doDisconnect(i);
     }
   }
-  theNode.nfCompleteRep = false;
+
+  recalcMinDbVersion();
+  if (copy->noOfNodes)
+  {
+    theFacade.for_each(this, &signal, 0); // report GSN_NODE_FAILREP
+  }
+
   if (noOfAliveNodes == 0)
   {
     NdbApiSignal signal(numberToRef(API_CLUSTERMGR, getOwnNodeId()));
@@ -913,7 +1058,6 @@ ClusterMgr::reportNodeFailed(NodeId node
   }
 }
 
-
 void
 ClusterMgr::print_nodes(const char* where, NdbOut& out)
 {
@@ -1287,7 +1431,6 @@ ArbitMgr::sendSignalToQmgr(ArbitSignal&
 #endif
 
   {
-    Guard g(m_clusterMgr.clusterMgrThreadMutex);
     m_clusterMgr.lock();
     m_clusterMgr.raw_sendSignal(&signal, aSignal.data.sender);
     m_clusterMgr.unlock();

=== modified file 'storage/ndb/src/ndbapi/ClusterMgr.hpp'
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp	2011-01-10 13:30:14 +0000
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp	2011-01-18 07:39:47 +0000
@@ -27,6 +27,7 @@
 #include <signaldata/NodeStateSignalData.hpp>
 #include "trp_client.hpp"
 #include "trp_node.hpp"
+#include <signaldata/DisconnectRep.hpp>
 
 extern "C" void* runClusterMgr_C(void * me);
 
@@ -56,9 +57,12 @@ public:
   void set_max_api_reg_req_interval(unsigned int millisec) {
     m_max_api_reg_req_interval = millisec;
   }
-  void force_update_connections();
+
+  void lock() { NdbMutex_Lock(clusterMgrThreadMutex); trp_client::lock(); }
+  void unlock() { trp_client::unlock();NdbMutex_Unlock(clusterMgrThreadMutex); }
 
 private:
+  void startup();
   void threadMain();
   
   int  theStop;
@@ -108,15 +112,15 @@ private:
    */
   NdbMutex*     clusterMgrThreadMutex;
 
-  void reportNodeFailed(NodeId nodeId, bool disconnect = false);
-  
   /**
    * Signals received
    */
   void execAPI_REGREQ    (const Uint32 * theData);
-  void execAPI_REGCONF   (const Uint32 * theData);
+  void execAPI_REGCONF   (const NdbApiSignal*, const LinearSectionPtr ptr[]);
   void execAPI_REGREF    (const Uint32 * theData);
-  void execNODE_FAILREP  (const Uint32 * theData);
+  void execCONNECT_REP   (const NdbApiSignal*, const LinearSectionPtr ptr[]);
+  void execDISCONNECT_REP(const NdbApiSignal*, const LinearSectionPtr ptr[]);
+  void execNODE_FAILREP  (const NdbApiSignal*, const LinearSectionPtr ptr[]);
   void execNF_COMPLETEREP(const NdbApiSignal*, const LinearSectionPtr ptr[]);
 
   void check_wait_for_hb(NodeId nodeId);
@@ -139,6 +143,8 @@ private:
     node.m_alive = alive;
   }
 
+  void set_node_dead(trp_node&);
+
   void print_nodes(const char* where, NdbOut& out = ndbout);
   void recalcMinDbVersion();
 

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2010-09-30 14:27:18 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2011-01-27 13:23:07 +0000
@@ -532,6 +532,7 @@ NdbDictionary::Table::addColumn(const Co
   {
     return -1;
   }
+  col->m_column_no = m_impl.m_columns.size() - 1;
   return 0;
 }
 
@@ -976,6 +977,14 @@ NdbDictionary::Table::getPartitionId(Uin
   }
 }
 
+void
+NdbDictionary::Table::assignObjId(const NdbDictionary::ObjectId& _objId)
+{
+  const NdbDictObjectImpl& objId = NdbDictObjectImpl::getImpl(_objId);
+  m_impl.m_id = objId.m_id;
+  m_impl.m_version = objId.m_version;
+}
+
 /*****************************************************************
  * Index facade
  */
@@ -2064,12 +2073,23 @@ NdbDictionary::Dictionary::~Dictionary()
 int 
 NdbDictionary::Dictionary::createTable(const Table & t)
 {
+  return createTable(t, 0);
+}
+
+int
+NdbDictionary::Dictionary::createTable(const Table & t, ObjectId * objId)
+{
   int ret;
+  ObjectId tmp;
+  if (objId == 0)
+    objId = &tmp;
+
   if (likely(! is_ndb_blob_table(t.getName())))
   {
     DO_TRANS(
       ret,
-      m_impl.createTable(NdbTableImpl::getImpl(t))
+      m_impl.createTable(NdbTableImpl::getImpl(t),
+                         NdbDictObjectImpl::getImpl( *objId))
     );
   }
   else

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-01-12 08:04:39 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-01-27 13:23:07 +0000
@@ -2879,7 +2879,7 @@ NdbDictInterface::parseTableInfo(NdbTabl
  * Create table and alter table
  */
 int
-NdbDictionaryImpl::createTable(NdbTableImpl &t)
+NdbDictionaryImpl::createTable(NdbTableImpl &t, NdbDictObjectImpl & objid)
 { 
   DBUG_ENTER("NdbDictionaryImpl::createTable");
 
@@ -2910,6 +2910,8 @@ NdbDictionaryImpl::createTable(NdbTableI
   Uint32* data = (Uint32*)m_receiver.m_buffer.get_data();
   t.m_id = data[0];
   t.m_version = data[1];
+  objid.m_id = data[0];
+  objid.m_version = data[1];
 
   // update table def from DICT - by-pass cache
   NdbTableImpl* t2 =
@@ -3014,7 +3016,8 @@ NdbDictionaryImpl::createBlobTables(cons
       assert(bc != NULL);
       bc->setStorageType(d);
     }
-    if (createTable(bt) != 0) {
+    NdbDictionary::ObjectId objId; // ignore objid
+    if (createTable(bt, NdbDictObjectImpl::getImpl(objId)) != 0) {
       DBUG_RETURN(-1);
     }
   }

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2010-11-09 20:40:03 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-01-27 13:23:07 +0000
@@ -817,7 +817,7 @@ public:
   bool setTransporter(class Ndb * ndb, class TransporterFacade * tf);
   bool setTransporter(class TransporterFacade * tf);
 
-  int createTable(NdbTableImpl &t);
+  int createTable(NdbTableImpl &t, NdbDictObjectImpl &);
   int optimizeTable(const NdbTableImpl &t,
                     NdbOptimizeTableHandleImpl &h);
   int optimizeIndex(const NdbIndexImpl &index,

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.cpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-01-10 13:30:14 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-01-26 08:34:31 +0000
@@ -485,9 +485,9 @@ void TransporterFacade::threadMainReceiv
 #endif
   while(!theStopReceive)
   {
-    NdbMutex_Lock(theMutexPtr);
+    theClusterMgr->lock();
     theTransporterRegistry->update_connections();
-    NdbMutex_Unlock(theMutexPtr);
+    theClusterMgr->unlock();
     NdbSleep_MilliSleep(100);
   }//while
   theTransporterRegistry->stopReceiving();
@@ -541,7 +541,7 @@ TransporterFacade::TransporterFacade(Glo
   m_globalDictCache(cache)
 {
   DBUG_ENTER("TransporterFacade::TransporterFacade");
-  theMutexPtr = NdbMutex_Create();
+  theMutexPtr = NdbMutex_CreateWithName("TTFM");
   sendPerformedLastInterval = 0;
 
   for (int i = 0; i < NO_API_FIXED_BLOCKS; i++)
@@ -599,14 +599,6 @@ TransporterFacade::do_connect_mgm(NodeId
     }
   }
 
-  /**
-   * Also setup Loopback Transporter
-   */
-  if (is_mgmd(nodeId, conf))
-  {
-    doConnect(nodeId);
-  }
-
   DBUG_RETURN(true);
 }
 
@@ -624,7 +616,7 @@ TransporterFacade::configure(NodeId node
   if (!IPCConfig::configureTransporters(nodeId,
                                         * conf,
                                         * theTransporterRegistry,
-                                        is_mgmd(nodeId, conf)))
+                                        true))
     DBUG_RETURN(false);
 
   // Configure cluster manager
@@ -663,7 +655,12 @@ TransporterFacade::configure(NodeId node
   // Open connection between MGM servers
   if (!do_connect_mgm(nodeId, conf))
     DBUG_RETURN(false);
-  
+
+  /**
+   * Also setup Loopback Transporter
+   */
+  doConnect(nodeId);
+
   DBUG_RETURN(true);
 }
 
@@ -874,7 +871,9 @@ TransporterFacade::sendSignal(const NdbA
     if (ss == SEND_OK)
     {
       assert(theClusterMgr->getNodeInfo(aNode).is_confirmed() ||
-             aSignal->readSignalNumber() == GSN_API_REGREQ);
+             aSignal->readSignalNumber() == GSN_API_REGREQ ||
+             (aSignal->readSignalNumber() == GSN_CONNECT_REP &&
+              aNode == ownId()));
     }
     return (ss == SEND_OK ? 0 : -1);
   }
@@ -1978,7 +1977,9 @@ TransporterFacade::ext_set_max_api_reg_r
 void
 TransporterFacade::ext_update_connections()
 {
-  theClusterMgr->force_update_connections();
+  theClusterMgr->lock();
+  theTransporterRegistry->update_connections();
+  theClusterMgr->unlock();
 }
 
 struct in_addr
@@ -1996,14 +1997,19 @@ TransporterFacade::ext_forceHB()
 bool
 TransporterFacade::ext_isConnected(NodeId aNodeId)
 {
-  return theTransporterRegistry->is_connected(aNodeId);
+  bool val;
+  theClusterMgr->lock();
+  val = theClusterMgr->theNodes[aNodeId].is_connected();
+  theClusterMgr->unlock();
+  return val;
 }
 
 void
 TransporterFacade::ext_doConnect(int aNodeId)
 {
-  lock_mutex();
+  theClusterMgr->lock();
+  assert(theClusterMgr->theNodes[aNodeId].is_connected() == false);
   doConnect(aNodeId);
-  unlock_mutex();
+  theClusterMgr->unlock();
 }
 

=== modified file 'storage/ndb/src/ndbapi/trp_client.cpp'
--- a/storage/ndb/src/ndbapi/trp_client.cpp	2011-01-12 08:04:39 +0000
+++ b/storage/ndb/src/ndbapi/trp_client.cpp	2011-01-21 18:31:51 +0000
@@ -112,6 +112,12 @@ trp_client::do_forceSend(int val)
   }
 }
 
+int
+trp_client::safe_sendSignal(const NdbApiSignal* signal, Uint32 nodeId)
+{
+  return m_facade->m_poll_owner->raw_sendSignal(signal, nodeId);
+}
+
 #include "NdbImpl.hpp"
 
 PollGuard::PollGuard(NdbImpl& impl)

=== modified file 'storage/ndb/src/ndbapi/trp_client.hpp'
--- a/storage/ndb/src/ndbapi/trp_client.hpp	2011-01-12 08:04:39 +0000
+++ b/storage/ndb/src/ndbapi/trp_client.hpp	2011-01-21 18:31:51 +0000
@@ -180,11 +180,4 @@ trp_client::raw_sendFragmentedSignal(con
   return m_facade->sendFragmentedSignal(signal, nodeId, ptr, secs);
 }
 
-inline
-int
-trp_client::safe_sendSignal(const NdbApiSignal* signal, Uint32 nodeId)
-{
-  return m_facade->m_poll_owner->raw_sendSignal(signal, nodeId);
-}
-
 #endif

=== modified file 'storage/ndb/src/ndbapi/trp_node.cpp'
--- a/storage/ndb/src/ndbapi/trp_node.cpp	2010-12-21 11:43:32 +0000
+++ b/storage/ndb/src/ndbapi/trp_node.cpp	2011-01-18 07:39:47 +0000
@@ -21,7 +21,7 @@
 trp_node::trp_node()
 {
   compatible = nfCompleteRep = true;
-  m_connected = defined = m_alive = m_api_reg_conf = false;
+  m_connected = defined = m_alive = m_api_reg_conf = m_node_fail_rep = false;
   bzero(&m_state, sizeof(m_state));
   m_state.init();
   m_state.startLevel = NodeState::SL_NOTHING;
@@ -37,6 +37,7 @@ trp_node::operator==(const trp_node& oth
           defined == other.defined &&
           m_alive == other.m_alive &&
           m_api_reg_conf == other.m_api_reg_conf &&
+          m_node_fail_rep == other.m_node_fail_rep &&
           minDbVersion == other.minDbVersion &&
           memcmp(&m_state, &other.m_state, sizeof(m_state)) == 0);
 }
@@ -50,6 +51,7 @@ operator<<(NdbOut& out, const trp_node&
       << ", connected: " << n.m_connected
       << ", api_reg_conf: " << n.m_api_reg_conf
       << ", alive: " << n.m_alive
+      << ", nodefailrep: " << n.m_node_fail_rep
       << ", nfCompleteRep: " << n.nfCompleteRep
       << ", minDbVersion: " << n.minDbVersion
       << ", state: " << n.m_state

=== modified file 'storage/ndb/src/ndbapi/trp_node.hpp'
--- a/storage/ndb/src/ndbapi/trp_node.hpp	2010-12-21 11:43:32 +0000
+++ b/storage/ndb/src/ndbapi/trp_node.hpp	2011-01-18 07:39:47 +0000
@@ -28,15 +28,21 @@ NdbOut& operator<<(NdbOut&, const struct
 struct trp_node
 {
   trp_node();
-  bool defined;
-  bool compatible;    // Version is compatible
-  bool nfCompleteRep; // NF Complete Rep has arrived
-  bool m_alive;       // Node is alive
-  Uint32 minDbVersion;
 
   NodeInfo  m_info;
   NodeState m_state;
 
+  Uint32 minDbVersion;
+  bool defined;
+  bool compatible;     // Version is compatible
+  bool nfCompleteRep;  // NF Complete Rep has arrived
+  bool m_alive;        // Node is alive
+  bool m_node_fail_rep;// NodeFailRep has arrived
+private:
+  bool m_connected;     // Transporter connected
+  bool m_api_reg_conf;// API_REGCONF has arrived
+public:
+
   void set_connected(bool connected) {
     assert(defined);
     m_connected = connected;
@@ -50,7 +56,8 @@ struct trp_node
   }
 
   void set_confirmed(bool confirmed) {
-    assert(is_connected()); // Must be connected to change confirmed
+    if (confirmed)
+      assert(is_connected());
     m_api_reg_conf = confirmed;
   }
 
@@ -64,8 +71,6 @@ struct trp_node
   bool operator==(const trp_node& other) const;
 
 private:
-  bool m_connected;     // Transporter connected
-  bool m_api_reg_conf;// API_REGCONF has arrived
 
   friend NdbOut& operator<<(NdbOut&, const trp_node&);
 };

=== modified file 'storage/ndb/test/ndbapi/testMgmd.cpp'
--- a/storage/ndb/test/ndbapi/testMgmd.cpp	2010-09-22 11:53:53 +0000
+++ b/storage/ndb/test/ndbapi/testMgmd.cpp	2011-01-18 07:22:33 +0000
@@ -971,16 +971,16 @@ TESTCASE("NoCfgCache",
   INITIALIZER(runTestNoConfigCache);
 }
 
-TESTCASE("Bug56844",
+TESTCASE("Bug45495",
          "Test that mgmd can be restarted in any order")
 {
-  INITIALIZER(runBug56844);
+  INITIALIZER(runTestBug45495);
 }
 
-TESTCASE("Bug45495",
-         "Test that mgmd can be restarted in any order")
+TESTCASE("Bug56844",
+         "Test that mgmd can be reloaded in parallel")
 {
-  INITIALIZER(runTestBug45495);
+  INITIALIZER(runBug56844);
 }
 
 NDBT_TESTSUITE_END(testMgmd);

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2010-11-30 07:47:55 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2011-01-17 14:59:31 +0000
@@ -132,7 +132,7 @@ bool configure(atrt_config&, int setup);
 bool setup_directories(atrt_config&, int setup);
 bool setup_files(atrt_config&, int setup, int sshx);
 
-bool deploy(atrt_config&);
+bool deploy(int, atrt_config&);
 bool sshx(atrt_config&, unsigned procmask);
 bool start(atrt_config&, unsigned procmask);
 

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2010-11-29 15:42:25 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2011-01-26 09:54:24 +0000
@@ -212,7 +212,7 @@ main(int argc, char ** argv)
   
   if (g_do_deploy)
   {
-    if (!deploy(g_config))
+    if (!deploy(g_do_deploy, g_config))
     {
       g_logger.critical("Failed to deploy");
       goto end;
@@ -613,8 +613,11 @@ parse_args(int argc, char** argv)
 	g_do_setup = 2;
 	break;
       case 'd':
-	g_do_deploy = 1;
+	g_do_deploy = 3;
 	break;
+      case 'D':
+        g_do_deploy = 2; // only binaries
+        break;
       case 'x':
 	g_do_sshx = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API;
 	break;
@@ -1369,18 +1372,25 @@ do_rsync(const char *dir, const char *ds
 }
 
 bool
-deploy(atrt_config & config)
+deploy(int d, atrt_config & config)
 {
   for (size_t i = 0; i<config.m_hosts.size(); i++)
   {
-    if (!do_rsync(g_basedir, config.m_hosts[i]->m_hostname.c_str()))
-      return false;
+    if (d & 1)
+    {
+      if (!do_rsync(g_basedir, config.m_hosts[i]->m_hostname.c_str()))
+        return false;
+    }
 
-    if (!do_rsync(g_prefix, config.m_hosts[i]->m_hostname.c_str()))
-      return false;
+    if (d & 2)
+    {
+      if (!do_rsync(g_prefix, config.m_hosts[i]->m_hostname.c_str()))
+        return false;
     
-    if (g_prefix1 && !do_rsync(g_prefix1, config.m_hosts[i]->m_hostname.c_str()))
-      return false;
+      if (g_prefix1 && 
+          !do_rsync(g_prefix1, config.m_hosts[i]->m_hostname.c_str()))
+        return false;
+    }
   }
   
   return true;

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2010-11-15 14:18:10 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-01-26 08:54:07 +0000
@@ -18,30 +18,58 @@ INCLUDE(${CMAKE_SOURCE_DIR}/storage/ndb/
 INCLUDE_DIRECTORIES(
   ${CMAKE_SOURCE_DIR}/storage/ndb/src/ndbapi)
 
-ADD_EXECUTABLE(ndb_waiter waiter.cpp)
+MYSQL_ADD_EXECUTABLE(ndb_waiter
+  waiter.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_waiter ndbportlib ndbNDBT)
-ADD_EXECUTABLE(ndb_drop_table drop_tab.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_drop_table
+  drop_tab.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_drop_table ndbNDBT ndbgeneral)
-ADD_EXECUTABLE(ndb_delete_all delete_all.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_delete_all
+  delete_all.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_delete_all ndbNDBT)
-ADD_EXECUTABLE(ndb_desc desc.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_desc
+  desc.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_desc ndbNDBT)
-ADD_EXECUTABLE(ndb_drop_index drop_index.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_drop_index
+  drop_index.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_drop_index ndbNDBT)
-ADD_EXECUTABLE(ndb_show_tables listTables.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_show_tables
+  listTables.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_show_tables ndbclient ndbNDBT)
-ADD_EXECUTABLE(ndb_select_all select_all.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_select_all
+  select_all.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_select_all ndbNDBT)
-ADD_EXECUTABLE(ndb_select_count select_count.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_select_count
+  select_count.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_select_count ndbNDBT)
-ADD_EXECUTABLE(ndb_restore
-               restore/restore_main.cpp
-               restore/consumer.cpp
-               restore/consumer_restore.cpp
-               restore/consumer_printer.cpp
-               restore/Restore.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_restore
+  restore/restore_main.cpp
+  restore/consumer.cpp
+  restore/consumer_restore.cpp
+  restore/consumer_printer.cpp
+  restore/Restore.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_restore ndbNDBT ndbgeneral)
-ADD_EXECUTABLE(ndb_config ndb_config.cpp)
+
+MYSQL_ADD_EXECUTABLE(ndb_config
+  ndb_config.cpp
+  COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_config ndbmgmclient ndbconf)
 
 SET(options "-I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmapi")

=== modified file 'storage/ndb/tools/restore/consumer_restore.cpp'
--- a/storage/ndb/tools/restore/consumer_restore.cpp	2011-01-14 15:25:27 +0000
+++ b/storage/ndb/tools/restore/consumer_restore.cpp	2011-01-26 09:54:24 +0000
@@ -21,6 +21,7 @@
 #include <kernel/ndb_limits.h>
 #include <my_sys.h>
 #include <NdbSleep.h>
+#include <NdbTick.h>
 
 #include <ndb_internal.hpp>
 #include <ndb_logevent.h>
@@ -362,6 +363,7 @@ BackupRestore::rebuild_indexes(const Tab
   for(size_t i = 0; i<indexes.size(); i++)
   {
     NdbDictionary::Index * idx = indexes[i];
+    Uint64 start = NdbTick_CurrentMillisecond();
     info << "Rebuilding index " << idx->getName() << " on table "
         << tab->getName() << " ..." << flush;
     if (dict->createIndex(* idx, 1) != 0)
@@ -373,7 +375,8 @@ BackupRestore::rebuild_indexes(const Tab
 
       return false;
     }
-    info << "OK" << endl;
+    Uint64 stop = NdbTick_CurrentMillisecond();
+    info << "OK (" << ((stop - start)/1000) << "s)" <<endl;
   }
 
   return true;

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-telco-7.0 branch (magnus.blaudd:3148 to 3160) Magnus Blåudd27 Jan