List:Commits« Previous MessageNext Message »
From:Jan Wedvik Date:October 20 2011 12:53pm
Subject:bzr push into mysql-5.1-telco-7.0-spj-scan-vs-scan branch (jan.wedvik:3572
to 3574)
View as plain text  
 3574 Jan Wedvik	2011-10-20
      Added regression test cases for the commit below:
      ---------------------------------------------------
      
      This commit concerns SPJ (i.e. pushed queries).
      
      The commit fixes two errors that may happend when there is a pruned child
      scan operation:
      1. The api did not set SIP_PRUNE_PARAMS as it should.
      2. There was an error in Dbspj::parseScanIndex().  When it called 
      Dbspj::expand() to process prune keys, it used to start reading
      query parameters from after whatever parseDA() had consumed. Now it starts 
      from the beginning of the query parameters (for that operation). In other 
      words, the parameter values are only sent once, and these values are used for 
      building both the scan bounds and the prune key. 

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.test
 3573 Jan Wedvik	2011-10-20 [merge]
      Merged from mysql-5.1-telco-7.0.

    added:
      mysql-test/include/not_ndb_is.inc
      mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result
      mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test
      storage/ndb/test/run-test/conf-daily-perf.cnf
      storage/ndb/test/run-test/daily-perf-tests.txt
    modified:
      mysql-test/r/information_schema.result
      mysql-test/r/information_schema_db.result
      mysql-test/suite/funcs_1/r/is_columns_is.result
      mysql-test/suite/funcs_1/r/is_tables_is.result
      mysql-test/suite/funcs_1/t/is_columns_is.test
      mysql-test/suite/funcs_1/t/is_tables_is.test
      mysql-test/t/information_schema.test
      mysql-test/t/information_schema_db.test
      mysql-test/t/mysqlshow.test
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_connection.cc
      sql/ha_ndbinfo.cc
      sql/ha_ndbinfo.h
      sql/sql_parse.cc
      storage/ndb/include/ndbapi/Ndb.hpp
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/NdbImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/test/ndbapi/Makefile.am
      storage/ndb/test/ndbapi/testDict.cpp
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/run-test/files.cpp
      storage/ndb/test/run-test/main.cpp
      support-files/compiler_warnings.supp
      tests/mysql_client_test.c
 3572 jonas oreland	2011-10-13 [merge]
      ndb - merge 70 to 70-spj-scan-scan

    added:
      mysql-test/suite/ndb_big/bug37983-master.opt
      mysql-test/suite/ndb_big/disabled.def
      storage/ndb/include/portlib/NdbGetRUsage.h
      storage/ndb/src/common/portlib/NdbGetRUsage.cpp
      storage/ndb/src/kernel/blocks/thrman.cpp
      storage/ndb/src/kernel/blocks/thrman.hpp
      storage/ndb/tools/ndbinfo_select_all.cpp
    modified:
      client/mysqlbinlog.cc
      mysql-test/mysql-test-run.pl
      mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_db_filter.test
      mysql-test/suite/ndb/r/ndb_alter_table.result
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_index_stat.result
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/r/ndbinfo_dump.result
      mysql-test/suite/ndb/t/ndb_alter_table.test
      sql/ha_ndb_index_stat.cc
      sql/ha_ndb_index_stat.h
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      storage/ndb/include/kernel/BlockNumbers.h
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/kernel/signaldata/SchemaTrans.hpp
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/include/ndb_constants.h
      storage/ndb/src/common/debugger/BlockNames.cpp
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/portlib/Makefile.am
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/kernel/SimBlockList.cpp
      storage/ndb/src/kernel/blocks/CMakeLists.txt
      storage/ndb/src/kernel/blocks/LocalProxy.cpp
      storage/ndb/src/kernel/blocks/LocalProxy.hpp
      storage/ndb/src/kernel/blocks/Makefile.am
      storage/ndb/src/kernel/blocks/PgmanProxy.cpp
      storage/ndb/src/kernel/blocks/PgmanProxy.hpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbinfo/Dbinfo.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhProxy.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjProxy.hpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
      storage/ndb/src/kernel/blocks/record_types.hpp
      storage/ndb/src/kernel/blocks/tsman.cpp
      storage/ndb/src/kernel/ndbd.cpp
      storage/ndb/src/kernel/vm/DLFifoList.hpp
      storage/ndb/src/kernel/vm/DLHashTable.hpp
      storage/ndb/src/kernel/vm/DataBuffer2.hpp
      storage/ndb/src/kernel/vm/Ndbinfo.hpp
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
      storage/ndb/src/kernel/vm/Pool.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/dummy_nonmt.cpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/kernel/vm/mt.hpp
      storage/ndb/src/kernel/vm/mt_thr_config.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.hpp
      storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp
      storage/ndb/src/mgmsrv/ConfigInfo.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/HugoCalculator.hpp
      storage/ndb/test/include/HugoOperations.hpp
      storage/ndb/test/include/HugoQueryBuilder.hpp
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/run-test/daily-devel-tests.txt
      storage/ndb/test/run-test/files.cpp
      storage/ndb/test/run-test/main.cpp
      storage/ndb/test/run-test/setup.cpp
      storage/ndb/test/src/HugoOperations.cpp
      storage/ndb/test/src/NDBT_Find.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndbinfo_sql.cpp
=== added file 'mysql-test/include/not_ndb_is.inc'
--- a/mysql-test/include/not_ndb_is.inc	1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/not_ndb_is.inc	2011-10-17 14:16:56 +0000
@@ -0,0 +1,27 @@
+#
+# Check if cluster is available by selecting from is.engines
+# if an error about no such table occurs bail out
+#
+
+disable_result_log;
+disable_query_log;
+
+--error 0, 1109
+select @have_ndb_is:= count(*) from information_schema.plugins
+where plugin_name like '%ndb%'
+  and PLUGIN_TYPE = 'INFORMATION SCHEMA';
+
+
+if ($mysql_errno){
+  # For backward compatibility, implement old fashioned way
+  # to check here ie. use SHOW VARIABLES LIKE "have_ndb"
+  die Can not determine if server supports ndb without is.engines table;
+}
+
+
+if (`select @have_ndb_is`){
+  skip NDB information schema table installed;
+}
+
+enable_query_log;
+enable_result_log;

=== modified file 'mysql-test/r/information_schema.result'
--- a/mysql-test/r/information_schema.result	2011-03-29 14:09:05 +0000
+++ b/mysql-test/r/information_schema.result	2011-10-17 14:16:56 +0000
@@ -39,8 +39,7 @@ insert into t5 values (10);
 create view v1 (c) as
 SELECT table_name FROM information_schema.TABLES
 WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
-table_name<>'ndb_binlog_index' AND
-table_name<>'ndb_apply_status';
+table_name not like 'ndb%';
 select * from v1;
 c
 CHARACTER_SETS
@@ -850,7 +849,7 @@ VIEWS	TABLE_NAME	select
 delete from mysql.user where user='mysqltest_4';
 delete from mysql.db where user='mysqltest_4';
 flush privileges;
-SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
+SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name not like 'ndb%' GROUP BY TABLE_SCHEMA;
 table_schema	count(*)
 information_schema	28
 mysql	22
@@ -1230,7 +1229,8 @@ INNER JOIN
 information_schema.columns c1
 ON t.table_schema = c1.table_schema AND
 t.table_name = c1.table_name
-WHERE t.table_schema = 'information_schema' AND
+WHERE t.table_name not like 'ndb%' AND
+t.table_schema = 'information_schema' AND
 c1.ordinal_position =
 ( SELECT COALESCE(MIN(c2.ordinal_position),1)
 FROM information_schema.columns c2
@@ -1273,7 +1273,8 @@ INNER JOIN
 information_schema.columns c1
 ON t.table_schema = c1.table_schema AND
 t.table_name = c1.table_name
-WHERE t.table_schema = 'information_schema' AND
+WHERE t.table_name not like 'ndb%' AND
+t.table_schema = 'information_schema' AND
 c1.ordinal_position =
 ( SELECT COALESCE(MIN(c2.ordinal_position),1)
 FROM information_schema.columns c2
@@ -1365,7 +1366,8 @@ count(*) as num1
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+t.table_schema = 'information_schema' and
 c1.ordinal_position =
 (select isnull(c2.column_type) -
 isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/r/information_schema_db.result'
--- a/mysql-test/r/information_schema_db.result	2010-11-30 17:51:25 +0000
+++ b/mysql-test/r/information_schema_db.result	2011-10-17 14:16:56 +0000
@@ -3,7 +3,7 @@ drop view if exists v1,v2;
 drop function if exists f1;
 drop function if exists f2;
 use INFORMATION_SCHEMA;
-show tables;
+show tables where Tables_in_information_schema NOT LIKE 'ndb%';
 Tables_in_information_schema
 CHARACTER_SETS
 COLLATIONS

=== modified file 'mysql-test/suite/funcs_1/r/is_columns_is.result'
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result	2010-10-06 10:06:47 +0000
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result	2011-10-18 07:22:32 +0000
@@ -1,6 +1,6 @@
 SELECT * FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema, table_name, column_name;
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	ORDINAL_POSITION	COLUMN_DEFAULT	IS_NULLABLE	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	NUMERIC_PRECISION	NUMERIC_SCALE	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE	COLUMN_KEY	EXTRA	PRIVILEGES	COLUMN_COMMENT	STORAGE	FORMAT
 NULL	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	1		NO	varchar	32	96	NULL	NULL	utf8	utf8_general_ci	varchar(32)			select		Default	Default
@@ -312,7 +312,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH = 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -324,7 +324,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH <> 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -336,7 +336,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH IS NULL
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -357,7 +357,7 @@ COLLATION_NAME,
 COLUMN_TYPE
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY TABLE_SCHEMA, TABLE_NAME, ORDINAL_POSITION;
 COL_CML	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE
 3.0000	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	varchar	32	96	utf8	utf8_general_ci	varchar(32)

=== modified file 'mysql-test/suite/funcs_1/r/is_tables_is.result'
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result	2008-06-18 17:23:55 +0000
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result	2011-10-18 07:22:32 +0000
@@ -11,7 +11,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	NULL
 TABLE_SCHEMA	information_schema
@@ -649,7 +649,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	NULL
 TABLE_SCHEMA	information_schema

=== modified file 'mysql-test/suite/funcs_1/t/is_columns_is.test'
--- a/mysql-test/suite/funcs_1/t/is_columns_is.test	2008-06-16 18:39:58 +0000
+++ b/mysql-test/suite/funcs_1/t/is_columns_is.test	2011-10-18 07:22:32 +0000
@@ -18,5 +18,5 @@
 --source include/not_embedded.inc
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling';
+AND table_name <> 'profiling' and table_name not like 'ndb%';
 --source suite/funcs_1/datadict/columns.inc

=== modified file 'mysql-test/suite/funcs_1/t/is_tables_is.test'
--- a/mysql-test/suite/funcs_1/t/is_tables_is.test	2008-03-07 16:33:07 +0000
+++ b/mysql-test/suite/funcs_1/t/is_tables_is.test	2011-10-18 07:22:32 +0000
@@ -13,6 +13,6 @@
 #
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling';
+AND table_name <> 'profiling' and table_name not like 'ndb%';
 --source suite/funcs_1/datadict/tables1.inc
 

=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-03 09:41:51 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-20 12:52:58 +0000
@@ -5474,6 +5474,38 @@ select count(*) from t1 as x1 join t1 as
 count(*)
 3
 drop table t1;
+create table t1 
+(a int not null,
+b int not null, 
+c int not null,
+d int not null,
+primary key(a,b,c,d)) engine=ndb partition by key (b,c);
+insert into t1 values (0x4f, 0x4f, 0x4f, 0x4f);
+explain select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=x2.c;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	2	Using where with pushed condition
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	8	func,test.x1.b	1	Parent of 2 pushed join@1; Using where
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	12	test.x2.d,test.x1.d,test.x2.c	1	Child of 'x2' in pushed join@1
+select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=x2.c;
+a	b	c	d	a	b	c	d	a	b	c	d
+79	79	79	79	79	79	79	79	79	79	79	79
+explain select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=0x4f;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	2	Using where with pushed condition
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	8	func,test.x1.b	1	Parent of 2 pushed join@1; Using where
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	12	test.x2.d,test.x1.d,const	1	Child of 'x2' in pushed join@1
+select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=0x4f;
+a	b	c	d	a	b	c	d	a	b	c	d
+79	79	79	79	79	79	79	79	79	79	79	79
+drop table t1;
 create temporary table spj_counts_at_end
 select counter_name, sum(val) as val 
 from ndbinfo.counters 
@@ -5490,10 +5522,10 @@ and spj_counts_at_end.counter_name <> 'R
        and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
        and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
-CONST_PRUNED_RANGE_SCANS_RECEIVED	6
+CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	250
-PRUNED_RANGE_SCANS_RECEIVED	25
-RANGE_SCANS_RECEIVED	728
+PRUNED_RANGE_SCANS_RECEIVED	27
+RANGE_SCANS_RECEIVED	732
 READS_RECEIVED	58
 TABLE_SCANS_RECEIVED	250
 drop table spj_counts_at_startup;
@@ -5505,9 +5537,9 @@ pruned_scan_count
 sorted_scan_count
 10
 pushed_queries_defined
-401
+405
 pushed_queries_dropped
 11
 pushed_queries_executed
-548
+550
 set ndb_join_pushdown = @save_ndb_join_pushdown;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-10-03 09:41:51 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-10-20 12:52:58 +0000
@@ -3843,6 +3843,40 @@ select count(*) from t1 as x1 join t1 as
 connection ddl;
 drop table t1;
 
+####################
+# Test pruned child scans using parameter values (known regression).
+####################
+create table t1 
+       (a int not null,
+       b int not null, 
+       c int not null,
+       d int not null,
+       primary key(a,b,c,d)) engine=ndb partition by key (b,c);
+
+connection spj;
+insert into t1 values (0x4f, 0x4f, 0x4f, 0x4f);
+
+# Prune key depends on parent row.
+explain select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=x2.c;
+
+select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=x2.c;
+
+# Prune key is fixed.
+explain select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=0x4f;
+
+select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=0x4f;
+
+connection ddl;
+drop table t1;
+
 ########################################
 # Verify DBSPJ counters for entire test:
 # Note: These tables are 'temporary' withing 'connection spj'

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	2011-10-20 12:31:31 +0000
@@ -0,0 +1,196 @@
+include/master-slave.inc
+[connection master]
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT '0000-00-00',
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT '0000-00-00',
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, 1);
+INSERT INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, 4);
+INSERT INTO t1(a) VALUES (5);
+INSERT INTO t2(a,b) VALUES (1, null);
+INSERT INTO t2(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t2(a) VALUES (3);
+INSERT INTO t3(a,b) VALUES (1, null);
+INSERT INTO t3(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t3(a) VALUES (3);
+INSERT INTO t4(a,b,c) VALUES (1, null, 1);
+INSERT INTO t4(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t4(a,b) VALUES (3, null);
+INSERT INTO t4(a,c) VALUES (4, 4);
+INSERT INTO t4(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+include/diff_tables.inc [master:t2, slave:t2]
+TABLES t2 and t3 must be different.
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	NULL	500
+2	1111-11-11	500
+3	NULL	500
+SELECT * FROM t4 ORDER BY a;
+a	b	c
+1	NULL	1
+2	1111-11-11	2
+3	NULL	NULL
+4	NULL	4
+5	NULL	NULL
+SELECT * FROM t4 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+4	NULL
+5	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,'1111-11-11', 1);
+REPLACE INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+UPDATE t1 set b= NULL, c= 300 where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, 300);
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+************* CLEANING *************
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT NULL, `c` BIT DEFAULT NULL, 
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT b'01', `c` BIT DEFAULT NULL,
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, b'01');
+INSERT INTO t1(a,b,c) VALUES (2,b'00', b'01');
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, b'01');
+INSERT INTO t1(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be different.
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,b'00', b'01');
+REPLACE INTO t1(a,b,c) VALUES (2,b'00',b'01');
+UPDATE t1 set b= NULL, c= b'00' where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, b'00');
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+DROP TABLE t1;
+################################################################################
+#                       NULL ---> NOT NULL (STRICT MODE)
+#                    UNCOMMENT THIS AFTER FIXING BUG#43992
+################################################################################
+################################################################################
+#                       NULL ---> NOT NULL (NON-STRICT MODE)
+################################################################################
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT NOT NULL, 
+`c` INT NOT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a) VALUES (1);
+INSERT INTO t1(a, b) VALUES (2, NULL);
+INSERT INTO t1(a, b) VALUES (3, 1);
+INSERT INTO t2(a) VALUES (1);
+INSERT INTO t2(a, b) VALUES (2, NULL);
+INSERT INTO t2(a, b) VALUES (3, 1);
+INSERT INTO t3(a) VALUES (1);
+INSERT INTO t3(a, b) VALUES (2, NULL);
+INSERT INTO t3(a, b) VALUES (3, 1);
+INSERT INTO t3(a, b) VALUES (4, 1);
+REPLACE INTO t3(a, b) VALUES (5, null);
+REPLACE INTO t3(a, b) VALUES (3, null);
+UPDATE t3 SET b = NULL where a = 4;
+************* SHOWING THE RESULT SETS *************
+SELECT * FROM t1 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t1 ORDER BY a;
+a	b	c
+SELECT * FROM t2 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t2 ORDER BY a;
+a	b	c
+1	0	NULL
+2	0	NULL
+3	1	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	NULL
+4	NULL
+5	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	0	500
+2	0	500
+3	0	500
+4	0	500
+5	0	500
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	2011-10-20 12:31:31 +0000
@@ -0,0 +1,8 @@
+-- source include/have_binlog_format_row.inc
+-- source include/have_ndb.inc
+-- source include/master-slave.inc
+
+let $engine = 'NDB';
+-- source extra/rpl_tests/rpl_not_null.test
+
+--source include/rpl_end.inc

=== modified file 'mysql-test/t/information_schema.test'
--- a/mysql-test/t/information_schema.test	2010-06-23 16:25:31 +0000
+++ b/mysql-test/t/information_schema.test	2011-10-17 14:16:56 +0000
@@ -44,8 +44,7 @@ insert into t5 values (10);
 create view v1 (c) as
  SELECT table_name FROM information_schema.TABLES
   WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
-        table_name<>'ndb_binlog_index' AND
-        table_name<>'ndb_apply_status';
+        table_name not like 'ndb%';
 select * from v1;
 
 select c,table_name from v1
@@ -539,7 +538,7 @@ flush privileges;
 # Bug#9404 information_schema: Weird error messages
 # with SELECT SUM() ... GROUP BY queries
 #
-SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
+SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name not like 'ndb%' GROUP BY TABLE_SCHEMA;
 
 
 #
@@ -921,7 +920,8 @@ SELECT t.table_name, c1.column_name
        information_schema.columns c1
        ON t.table_schema = c1.table_schema AND
           t.table_name = c1.table_name
-  WHERE t.table_schema = 'information_schema' AND
+  WHERE t.table_name not like 'ndb%' AND
+        t.table_schema = 'information_schema' AND
         c1.ordinal_position =
         ( SELECT COALESCE(MIN(c2.ordinal_position),1)
             FROM information_schema.columns c2
@@ -935,7 +935,8 @@ SELECT t.table_name, c1.column_name
        information_schema.columns c1
        ON t.table_schema = c1.table_schema AND
           t.table_name = c1.table_name
-  WHERE t.table_schema = 'information_schema' AND
+  WHERE t.table_name not like 'ndb%' AND
+        t.table_schema = 'information_schema' AND
         c1.ordinal_position =
         ( SELECT COALESCE(MIN(c2.ordinal_position),1)
             FROM information_schema.columns c2
@@ -1032,7 +1033,8 @@ select t.table_name, group_concat(t.tabl
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+      t.table_schema = 'information_schema' and
         c1.ordinal_position =
         (select isnull(c2.column_type) -
          isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/t/information_schema_db.test'
--- a/mysql-test/t/information_schema_db.test	2009-09-28 11:25:47 +0000
+++ b/mysql-test/t/information_schema_db.test	2011-10-17 14:16:56 +0000
@@ -13,7 +13,7 @@ drop function if exists f2;
 
 use INFORMATION_SCHEMA;
 --replace_result Tables_in_INFORMATION_SCHEMA Tables_in_information_schema
-show tables;
+show tables where Tables_in_INFORMATION_SCHEMA NOT LIKE 'ndb%';
 --replace_result 'Tables_in_INFORMATION_SCHEMA (T%)' 'Tables_in_information_schema (T%)'
 show tables from INFORMATION_SCHEMA like 'T%';
 create database `inf%`;

=== modified file 'mysql-test/t/mysqlshow.test'
--- a/mysql-test/t/mysqlshow.test	2006-07-22 03:29:25 +0000
+++ b/mysql-test/t/mysqlshow.test	2011-10-17 14:16:56 +0000
@@ -1,6 +1,9 @@
 # Can't run test of external client with embedded server
 -- source include/not_embedded.inc
 
+# Test lists tables in Information_schema, and ndb adds some
+-- source include/not_ndb_is.inc
+
 --disable_warnings
 DROP TABLE IF EXISTS t1,t2,test1,test2;
 --enable_warnings

=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-10-08 16:56:43 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-10-17 12:43:31 +0000
@@ -1984,7 +1984,7 @@ ndb_index_stat_thread_func(void *arg __a
   }
 
   /* Get thd_ndb for this thread */
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     pthread_mutex_lock(&LOCK_ndb_index_stat_thread);

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-10-13 20:08:25 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-20 12:51:03 +0000
@@ -11935,7 +11935,8 @@ int ha_ndbcluster::close(void)
   wait on condition for a Ndb object to be released.
   - Alt.2 Seize/release from pool, wait until next release 
 */
-Thd_ndb* ha_ndbcluster::seize_thd_ndb()
+Thd_ndb*
+ha_ndbcluster::seize_thd_ndb(THD * thd)
 {
   Thd_ndb *thd_ndb;
   DBUG_ENTER("seize_thd_ndb");
@@ -11958,6 +11959,10 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb()
     delete thd_ndb;
     thd_ndb= NULL;
   }
+  else
+  {
+    thd_ndb->ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(thd_ndb);
 }
 
@@ -11994,7 +11999,10 @@ bool Thd_ndb::recycle_ndb(THD* thd)
                          ndb->getNdbError().message));
     DBUG_RETURN(false);
   }
-
+  else
+  {
+   ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(true);
 }
 
@@ -12032,7 +12040,7 @@ Ndb* check_ndb_in_thd(THD* thd, bool val
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   if (!thd_ndb)
   {
-    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
       return NULL;
     set_thd_ndb(thd, thd_ndb);
   }
@@ -15827,7 +15835,7 @@ pthread_handler_t ndb_util_thread_func(v
   pthread_mutex_unlock(&LOCK_ndb_util_thread);
 
   /* Get thd_ndb for this thread */
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     pthread_mutex_lock(&LOCK_ndb_util_thread);
@@ -18288,12 +18296,8 @@ struct st_mysql_storage_engine ndbcluste
 { MYSQL_HANDLERTON_INTERFACE_VERSION };
 
 
-#include "ha_ndbinfo.h"
-
-extern struct st_mysql_sys_var* ndbinfo_system_variables[];
-
-struct st_mysql_storage_engine ndbinfo_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+extern struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin;
+extern struct st_mysql_plugin ndbinfo_plugin;
 
 mysql_declare_plugin(ndbcluster)
 {
@@ -18310,20 +18314,9 @@ mysql_declare_plugin(ndbcluster)
   system_variables,           /* system variables */
   NULL                        /* config options                  */
 },
-{
-  MYSQL_STORAGE_ENGINE_PLUGIN,
-  &ndbinfo_storage_engine,
-  "ndbinfo",
-  "Sun Microsystems Inc.",
-  "MySQL Cluster system information storage engine",
-  PLUGIN_LICENSE_GPL,
-  ndbinfo_init,               /* plugin init */
-  ndbinfo_deinit,             /* plugin deinit */
-  0x0001,                     /* plugin version */
-  NULL,                       /* status variables */
-  ndbinfo_system_variables,   /* system variables */
-  NULL                        /* config options */
-}
+ndbinfo_plugin, /* ndbinfo plugin */
+/* IS plugin table which maps between mysql connection id and ndb trans-id */
+i_s_ndb_transid_mysql_connection_map_plugin
 mysql_declare_plugin_end;
 
 #endif

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-09-09 13:13:52 +0000
+++ b/sql/ha_ndbcluster.h	2011-10-20 12:51:03 +0000
@@ -728,7 +728,7 @@ class ha_ndbcluster: public handler
   int ndb_update_row(const uchar *old_data, uchar *new_data,
                      int is_bulk_update);
 
-  static Thd_ndb* seize_thd_ndb();
+  static Thd_ndb* seize_thd_ndb(THD*);
   static void release_thd_ndb(Thd_ndb* thd_ndb);
  
 static void set_dbname(const char *pathname, char *dbname);

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-09-29 13:32:44 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-17 12:43:31 +0000
@@ -2098,7 +2098,7 @@ int ndbcluster_log_schema_op(THD *thd,
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   if (!thd_ndb)
   {
-    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
     {
       sql_print_error("Could not allocate Thd_ndb object");
       DBUG_RETURN(1);
@@ -6765,7 +6765,7 @@ restart_cluster_failure:
   int have_injector_mutex_lock= 0;
   do_ndbcluster_binlog_close_connection= BCCC_exit;
 
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     ndb_binlog_thread_running= -1;

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-10-20 12:23:31 +0000
@@ -306,4 +306,117 @@ void ndb_get_connection_stats(Uint64* st
   }
 }
 
+static ST_FIELD_INFO ndb_transid_mysql_connection_map_fields_info[] =
+{
+  {
+    "mysql_connection_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  {
+    "node_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+  {
+    "ndb_transid",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  { 0, 0, MYSQL_TYPE_NULL, 0, 0, "", SKIP_OPEN_TABLE }
+};
+
+static
+int
+ndb_transid_mysql_connection_map_fill_table(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
+
+  const bool all = check_global_access(thd, PROCESS_ACL);
+  const ulonglong self = thd_get_thread_id(thd);
+
+  TABLE* table= tables->table;
+  for (uint i = 0; i<g_pool_alloc; i++)
+  {
+    if (g_pool[i])
+    {
+      g_pool[i]->lock_ndb_objects();
+      const Ndb * p = g_pool[i]->get_next_ndb_object(0);
+      while (p)
+      {
+        Uint64 connection_id = p->getCustomData64();
+        if ((connection_id == self) || all)
+        {
+          table->field[0]->set_notnull();
+          table->field[0]->store(p->getCustomData64(), true);
+          table->field[1]->set_notnull();
+          table->field[1]->store(g_pool[i]->node_id());
+          table->field[2]->set_notnull();
+          table->field[2]->store(p->getNextTransactionId(), true);
+          schema_table_store_record(thd, table);
+          p = g_pool[i]->get_next_ndb_object(p);
+        }
+        g_pool[i]->unlock_ndb_objects();
+      }
+    }
+  }
+
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_init(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_init");
+  ST_SCHEMA_TABLE* schema = reinterpret_cast<ST_SCHEMA_TABLE*>(p);
+  schema->fields_info = ndb_transid_mysql_connection_map_fields_info;
+  schema->fill_table = ndb_transid_mysql_connection_map_fill_table;
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_deinit(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_deinit");
+  DBUG_RETURN(0);
+}
+
+#include <mysql/plugin.h>
+static struct st_mysql_information_schema i_s_info =
+{
+  MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin =
+{
+  MYSQL_INFORMATION_SCHEMA_PLUGIN,
+  &i_s_info,
+  "ndb_transid_mysql_connection_map",
+  "Oracle Corporation",
+  "Map between mysql connection id and ndb transaction id",
+  PLUGIN_LICENSE_GPL,
+  ndb_transid_mysql_connection_map_init,
+  ndb_transid_mysql_connection_map_deinit,
+  0x0001,
+  NULL,
+  NULL,
+  NULL
+};
+
 #endif /* WITH_NDBCLUSTER_STORAGE_ENGINE */

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2011-08-27 09:54:26 +0000
+++ b/sql/ha_ndbinfo.cc	2011-10-17 12:43:31 +0000
@@ -736,7 +736,9 @@ ndbinfo_find_files(handlerton *hton, THD
 
 handlerton* ndbinfo_hton;
 
-int ndbinfo_init(void *plugin)
+static
+int
+ndbinfo_init(void *plugin)
 {
   DBUG_ENTER("ndbinfo_init");
 
@@ -779,7 +781,9 @@ int ndbinfo_init(void *plugin)
   DBUG_RETURN(0);
 }
 
-int ndbinfo_deinit(void *plugin)
+static
+int
+ndbinfo_deinit(void *plugin)
 {
   DBUG_ENTER("ndbinfo_deinit");
 
@@ -804,6 +808,27 @@ struct st_mysql_sys_var* ndbinfo_system_
   NULL
 };
 
+struct st_mysql_storage_engine ndbinfo_storage_engine=
+{
+  MYSQL_HANDLERTON_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin ndbinfo_plugin =
+{
+  MYSQL_STORAGE_ENGINE_PLUGIN,
+  &ndbinfo_storage_engine,
+  "ndbinfo",
+  "Sun Microsystems Inc.",
+  "MySQL Cluster system information storage engine",
+  PLUGIN_LICENSE_GPL,
+  ndbinfo_init,               /* plugin init */
+  ndbinfo_deinit,             /* plugin deinit */
+  0x0001,                     /* plugin version */
+  NULL,                       /* status variables */
+  ndbinfo_system_variables,   /* system variables */
+  NULL                        /* config options */
+};
+
 template class Vector<const NdbInfoRecAttr*>;
 
 #endif

=== modified file 'sql/ha_ndbinfo.h'
--- a/sql/ha_ndbinfo.h	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbinfo.h	2011-10-17 12:43:31 +0000
@@ -20,9 +20,6 @@
 
 #include <mysql/plugin.h>
 
-int ndbinfo_init(void *plugin);
-int ndbinfo_deinit(void *plugin);
-
 class ha_ndbinfo: public handler
 {
 public:

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2011-06-30 15:59:25 +0000
+++ b/sql/sql_parse.cc	2011-10-17 09:17:54 +0000
@@ -1476,6 +1476,14 @@ bool dispatch_command(enum enum_server_c
   case COM_REFRESH:
   {
     int not_used;
+#ifndef MCP_BUG13001491
+    /*
+      Initialize thd->lex since it's used in many base functions, such as
+      open_tables(). Otherwise, it remains unitialized and may cause crash
+      during execution of COM_REFRESH.
+    */
+    lex_start(thd);
+#endif
     status_var_increment(thd->status_var.com_stat[SQLCOM_FLUSH]);
     ulong options= (ulong) (uchar) packet[0];
     if (check_global_access(thd,RELOAD_ACL))
@@ -6978,7 +6986,18 @@ bool reload_acl_and_cache(THD *thd, ulon
     if (ha_flush_logs(NULL))
       result=1;
     if (flush_error_log())
+#ifndef MCP_BUG13001491
+    {
+      /*
+        When flush_error_log() failed, my_error() has not been called.
+        So, we have to do it here to keep the protocol.
+      */
+      my_error(ER_UNKNOWN_ERROR, MYF(0));
+      result= 1;
+    }
+#else
       result=1;
+#endif
   }
 #ifdef HAVE_QUERY_CACHE
   if (options & REFRESH_QUERY_CACHE_FREE)

=== modified file 'storage/ndb/include/ndbapi/Ndb.hpp'
--- a/storage/ndb/include/ndbapi/Ndb.hpp	2011-09-09 13:13:52 +0000
+++ b/storage/ndb/include/ndbapi/Ndb.hpp	2011-10-20 12:51:03 +0000
@@ -1762,7 +1762,19 @@ public:
   /* Get/Set per-Ndb custom data pointer */
   void setCustomData(void*);
   void* getCustomData() const;
-  
+
+  /* Get/Set per-Ndb custom data pointer */
+  /* NOTE: shares storage with void*
+   * i.e can not be used together with setCustomData
+   */
+  void setCustomData64(Uint64);
+  Uint64 getCustomData64() const;
+
+  /**
+   * transid next startTransaction() on this ndb-object will get
+   */
+  Uint64 getNextTransactionId() const;
+
   /* Some client behaviour counters to assist
    * optimisation
    */

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-09-09 13:13:52 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-10-20 12:51:03 +0000
@@ -51,6 +51,7 @@
 
 #include <NdbSleep.h>
 #include <SafeCounter.hpp>
+#include <SectionReader.hpp>
 
 #define ZREPORT_MEMORY_USAGE 1000
 
@@ -2678,6 +2679,31 @@ Cmvmi::execTESTSIG(Signal* signal){
     return;
   }
 
+  /**
+   * Testing Api fragmented signal send/receive
+   */
+  if (testType == 40)
+  {
+    /* Fragmented signal sent from Api, we'll check it and return it */
+    Uint32 expectedVal = 0;
+    for (Uint32 s = 0; s < handle.m_cnt; s++)
+    {
+      SectionReader sr(handle.m_ptr[s].i, getSectionSegmentPool());
+      Uint32 received;
+      while (sr.getWord(&received))
+      {
+        ndbrequire(received == expectedVal ++);
+      }
+    }
+
+    /* Now return it back to the Api, no callback, so framework
+     * can time-slice the send
+     */
+    sendFragmentedSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB, &handle);
+
+    return;
+  }
+
   if(signal->getSendersBlockRef() == ref){
     /**
      * Signal from API (not via NodeReceiverGroup)

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-13 20:08:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-20 12:51:03 +0000
@@ -20680,31 +20680,6 @@ Dbdict::createFile_parse(Signal* signal,
     return;
   }
 
-  /**
-   * auto-connect
-   */
-  if (f.FilegroupId == RNIL && f.FilegroupVersion == RNIL)
-  {
-    jam();
-    Filegroup_hash::Iterator it;
-    c_filegroup_hash.first(it);
-    while (!it.isNull())
-    {
-      jam();
-      if ((f.FileType == DictTabInfo::Undofile &&
-           it.curr.p->m_type == DictTabInfo::LogfileGroup) ||
-          (f.FileType == DictTabInfo::Datafile &&
-           it.curr.p->m_type == DictTabInfo::Tablespace))
-      {
-        jam();
-        f.FilegroupId = it.curr.p->key;
-        f.FilegroupVersion = it.curr.p->m_version;
-        break;
-      }
-      c_filegroup_hash.next(it);
-    }
-  }
-
   // Get Filegroup
   FilegroupPtr fg_ptr;
   if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId))
@@ -21433,21 +21408,6 @@ Dbdict::createFilegroup_parse(Signal* si
       setError(error, CreateFilegroupRef::InvalidExtentSize, __LINE__);
       return;
     }
-
-    /**
-     * auto-connect
-     */
-    if (fg.TS_LogfileGroupId == RNIL && fg.TS_LogfileGroupVersion == RNIL)
-    {
-      jam();
-      Filegroup_hash::Iterator it;
-      if (c_filegroup_hash.first(it))
-      {
-        jam();
-        fg.TS_LogfileGroupId = it.curr.p->key;
-        fg.TS_LogfileGroupVersion = it.curr.p->m_version;
-      }
-    }
   }
   else if(fg.FilegroupType == DictTabInfo::LogfileGroup)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-13 20:08:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-20 12:51:03 +0000
@@ -23447,12 +23447,8 @@ Dblqh::ndbinfo_write_op(Ndbinfo::Row & r
   row.write_uint32(tcPtr.p->tcBlockref); // tcref
   row.write_uint32(tcPtr.p->applRef);    // apiref
 
-  char transid[64];
-  BaseString::snprintf(transid, sizeof(transid),
-                       "%.8x.%.8x",
-                       tcPtr.p->transid[0],
-                       tcPtr.p->transid[1]);
-  row.write_string(transid);
+  row.write_uint32(tcPtr.p->transid[0]);
+  row.write_uint32(tcPtr.p->transid[1]);
   row.write_uint32(tcPtr.p->tableref);
   row.write_uint32(tcPtr.p->fragmentid);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-29 13:07:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-20 12:51:03 +0000
@@ -3601,6 +3601,7 @@ Dbspj::computeHash(Signal* signal,
     (MAX_KEY_SIZE_IN_WORDS + 1) / 2;
   Uint64 tmp64[MAX_KEY_SIZE_IN_LONG_WORDS];
   Uint32 *tmp32 = (Uint32*)tmp64;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -3639,6 +3640,7 @@ Dbspj::computePartitionHash(Signal* sign
   Uint64 *tmp64 = _space;
   Uint32 *tmp32 = (Uint32*)tmp64;
   Uint32 sz = ptr.sz;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -4456,6 +4458,12 @@ Dbspj::parseScanIndex(Build_context& ctx
     data.m_firstExecution = true;
     data.m_batch_chunks = 0;
 
+    /**
+     * We will need to look at the parameters again if the scan is pruned and the prune
+     * key uses parameter values. Therefore, we keep a reference to the start of the
+     * parameter buffer.
+     */
+    DABuffer origParam = param;
     err = parseDA(ctx, requestPtr, treeNodePtr,
                   tree, treeBits, param, paramBits);
     if (unlikely(err != 0))
@@ -4482,7 +4490,7 @@ Dbspj::parseScanIndex(Build_context& ctx
         /**
          * Expand pattern into a new pattern (with linked values)
          */
-        err = expand(pattern, treeNodePtr, tree, len, param, cnt);
+        err = expand(pattern, treeNodePtr, tree, len, origParam, cnt);
         if (unlikely(err != 0))
           break;
 
@@ -4501,7 +4509,7 @@ Dbspj::parseScanIndex(Build_context& ctx
          */
         Uint32 prunePtrI = RNIL;
         bool hasNull;
-        err = expand(prunePtrI, tree, len, param, cnt, hasNull);
+        err = expand(prunePtrI, tree, len, origParam, cnt, hasNull);
         if (unlikely(err != 0))
           break;
 
@@ -6189,6 +6197,7 @@ Uint32
 Dbspj::appendToPattern(Local_pattern_store & pattern,
                        DABuffer & tree, Uint32 len)
 {
+  jam();
   if (unlikely(tree.ptr + len > tree.end))
     return DbspjErr::InvalidTreeNodeSpecification;
 
@@ -6203,6 +6212,7 @@ Uint32
 Dbspj::appendParamToPattern(Local_pattern_store& dst,
                             const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6218,6 +6228,7 @@ Uint32
 Dbspj::appendParamHeadToPattern(Local_pattern_store& dst,
                                 const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6235,6 +6246,7 @@ Dbspj::appendTreeToSection(Uint32 & ptrI
   /**
    * TODO handle errors
    */
+  jam();
   Uint32 SZ = 16;
   Uint32 tmp[16];
   while (len > SZ)
@@ -6293,6 +6305,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Section & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6316,6 +6329,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Linear & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6335,6 +6349,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Linear & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6353,6 +6368,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Section & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6378,6 +6394,7 @@ Dbspj::appendAttrinfoToSection(Uint32 &
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Section & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6400,6 +6417,7 @@ Dbspj::appendPkColToSection(Uint32 & dst
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   Uint32 offset = row.m_header->m_offset[col];
   Uint32 tmp = row.m_data[offset];
   Uint32 len = AttributeHeader::getDataSize(tmp);
@@ -6413,6 +6431,7 @@ Dbspj::appendFromParent(Uint32 & dst, Lo
                         Uint32 levels, const RowPtr & rowptr,
                         bool& hasNull)
 {
+  jam();
   Ptr<TreeNode> treeNodePtr;
   m_treenode_pool.getPtr(treeNodePtr, rowptr.m_src_node_ptrI);
   Uint32 corrVal = rowptr.m_src_correlation;
@@ -6527,6 +6546,7 @@ Dbspj::appendDataToSection(Uint32 & ptrI
                            Local_pattern_store::ConstDataBufferIterator& it,
                            Uint32 len, bool& hasNull)
 {
+  jam();
   if (unlikely(len==0))
   {
     jam();
@@ -6732,6 +6752,7 @@ Uint32
 Dbspj::expand(Uint32 & ptrI, DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle error
    */
@@ -6816,6 +6837,7 @@ Dbspj::expand(Local_pattern_store& dst,
               DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt)
 {
+  jam();
   /**
    * TODO handle error
    */

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-13 20:08:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-20 12:51:03 +0000
@@ -13343,17 +13343,12 @@ Dbtc::ndbinfo_write_trans(Ndbinfo::Row &
     return false;
   }
 
-  char transid[64];
-  BaseString::snprintf(transid, sizeof(transid),
-                       "%.8x.%.8x",
-                       transPtr.p->transid[0],
-                       transPtr.p->transid[1]);
-
   row.write_uint32(getOwnNodeId());
   row.write_uint32(instance());   // block instance
   row.write_uint32(transPtr.i);
   row.write_uint32(transPtr.p->ndbapiBlockref);
-  row.write_string(transid);
+  row.write_uint32(transPtr.p->transid[0]);
+  row.write_uint32(transPtr.p->transid[1]);
   row.write_uint32(conState);
   row.write_uint32(transPtr.p->m_flags);
   row.write_uint32(transPtr.p->lqhkeyreqrec);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-10-17 13:37:47 +0000
@@ -149,8 +149,9 @@ public:
   // schema trans
   Uint32 c_schemaTransId;
   Uint32 c_schemaTransKey;
-  Uint32 c_hashMapId;
-  Uint32 c_hashMapVersion;
+  // intersignal transient store of: hash_map, logfilegroup, tablesspace
+  Uint32 c_objectId; 
+  Uint32 c_objectVersion;;
 
 public:
   Ndbcntr(Block_context&);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-07-09 11:16:31 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-10-20 12:51:03 +0000
@@ -2204,8 +2204,8 @@ Ndbcntr::execCREATE_HASH_MAP_CONF(Signal
   if (conf->senderData == 0)
   {
     jam();
-    c_hashMapId = conf->objectId;
-    c_hashMapVersion = conf->objectVersion;
+    c_objectId = conf->objectId;
+    c_objectVersion = conf->objectVersion;
   }
 
   createSystableLab(signal, 0);
@@ -2274,8 +2274,8 @@ Ndbcntr::createDDObjects(Signal * signal
     {
       jam();
       fg.TS_ExtentSize = Uint32(entry->size);
-      fg.TS_LogfileGroupId = RNIL;
-      fg.TS_LogfileGroupVersion = RNIL;
+      fg.TS_LogfileGroupId = c_objectId;
+      fg.TS_LogfileGroupVersion = c_objectVersion;
     }
 
     SimpleProperties::UnpackStatus s;
@@ -2310,8 +2310,8 @@ Ndbcntr::createDDObjects(Signal * signal
     DictFilegroupInfo::File f; f.init();
     BaseString::snprintf(f.FileName, sizeof(f.FileName), "%s", entry->name);
     f.FileType = entry->type;
-    f.FilegroupId = RNIL;
-    f.FilegroupVersion = RNIL;
+    f.FilegroupId = c_objectId;
+    f.FilegroupVersion = c_objectVersion;
     f.FileSizeHi = Uint32(entry->size >> 32);
     f.FileSizeLo = Uint32(entry->size);
 
@@ -2371,6 +2371,8 @@ Ndbcntr::execCREATE_FILEGROUP_CONF(Signa
 {
   jamEntry();
   CreateFilegroupConf* conf = (CreateFilegroupConf*)signal->getDataPtr();
+  c_objectId = conf->filegroupId;
+  c_objectVersion = conf->filegroupVersion;
   createDDObjects(signal, conf->senderData + 1);
 }
 
@@ -2433,8 +2435,8 @@ void Ndbcntr::createSystableLab(Signal*
   //w.add(DictTabInfo::KeyLength, 1);
   w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType);
   w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE);
-  w.add(DictTabInfo::HashMapObjectId, c_hashMapId);
-  w.add(DictTabInfo::HashMapVersion, c_hashMapVersion);
+  w.add(DictTabInfo::HashMapObjectId, c_objectId);
+  w.add(DictTabInfo::HashMapVersion, c_objectVersion);
 
   for (unsigned i = 0; i < table.columnCount; i++) {
     const SysColumn& column = table.columnList[i];

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-09-14 12:19:37 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-10-20 12:51:03 +0000
@@ -470,31 +470,34 @@ void Qmgr::setCCDelay(UintR aCCDelay)
 void Qmgr::execCONNECT_REP(Signal* signal)
 {
   jamEntry();
-  const Uint32 nodeId = signal->theData[0];
+  const Uint32 connectedNodeId = signal->theData[0];
 
   if (ERROR_INSERTED(931))
   {
     jam();
-    ndbout_c("Discarding CONNECT_REP(%d)", nodeId);
-    infoEvent("Discarding CONNECT_REP(%d)", nodeId);
+    ndbout_c("Discarding CONNECT_REP(%d)", connectedNodeId);
+    infoEvent("Discarding CONNECT_REP(%d)", connectedNodeId);
     return;
   }
 
-  c_connectedNodes.set(nodeId);
+  c_connectedNodes.set(connectedNodeId);
 
-  NodeRecPtr nodePtr;
-  nodePtr.i = nodeId;
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  nodePtr.p->m_secret = 0;
+  {
+    NodeRecPtr connectedNodePtr;
+    connectedNodePtr.i = connectedNodeId;
+    ptrCheckGuard(connectedNodePtr, MAX_NODES, nodeRec);
+    connectedNodePtr.p->m_secret = 0;
+  }
 
-  nodePtr.i = getOwnNodeId();
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  NodeInfo nodeInfo = getNodeInfo(nodeId);
-  switch(nodePtr.p->phase){
+  NodeRecPtr myNodePtr;
+  myNodePtr.i = getOwnNodeId();
+  ptrCheckGuard(myNodePtr, MAX_NODES, nodeRec);
+  NodeInfo connectedNodeInfo = getNodeInfo(connectedNodeId);
+  switch(myNodePtr.p->phase){
   case ZRUNNING:
-    if (nodeInfo.getType() == NodeInfo::DB)
+    if (connectedNodeInfo.getType() == NodeInfo::DB)
     {
-      ndbrequire(!c_clusterNodes.get(nodeId));
+      ndbrequire(!c_clusterNodes.get(connectedNodeId));
     }
   case ZSTARTING:
     jam();
@@ -504,16 +507,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     return;
   case ZAPI_ACTIVE:
+    ndbrequire(false);
   case ZAPI_INACTIVE:
-    return;
+    ndbrequire(false);
   case ZINIT:
-    ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM);
+    ndbrequire(getNodeInfo(connectedNodeId).m_type == NodeInfo::MGM);
     break;
   default:
     ndbrequire(false);
   }
 
-  if (nodeInfo.getType() != NodeInfo::DB)
+  if (connectedNodeInfo.getType() != NodeInfo::DB)
   {
     jam();
     return;
@@ -522,24 +526,24 @@ void Qmgr::execCONNECT_REP(Signal* signa
   switch(c_start.m_gsn){
   case GSN_CM_REGREQ:
     jam();
-    sendCmRegReq(signal, nodeId);
+    sendCmRegReq(signal, connectedNodeId);
 
     /**
      * We're waiting for CM_REGCONF c_start.m_nodes contains all configured
      *   nodes
      */
-    ndbrequire(nodePtr.p->phase == ZSTARTING);
-    ndbrequire(c_start.m_nodes.isWaitingFor(nodeId));
+    ndbrequire(myNodePtr.p->phase == ZSTARTING);
+    ndbrequire(c_start.m_nodes.isWaitingFor(connectedNodeId));
     return;
   case GSN_CM_NODEINFOREQ:
     jam();
     
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
       ndbrequire(getOwnNodeId() != cpresident);
-      ndbrequire(nodePtr.p->phase == ZSTARTING);
-      sendCmNodeInfoReq(signal, nodeId, nodePtr.p);
+      ndbrequire(myNodePtr.p->phase == ZSTARTING);
+      sendCmNodeInfoReq(signal, connectedNodeId, myNodePtr.p);
       return;
     }
     return;
@@ -547,17 +551,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     
     ndbrequire(getOwnNodeId() != cpresident);
-    ndbrequire(nodePtr.p->phase == ZRUNNING);
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    ndbrequire(myNodePtr.p->phase == ZRUNNING);
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
-      c_start.m_nodes.clearWaitingFor(nodeId);
+      c_start.m_nodes.clearWaitingFor(connectedNodeId);
       c_start.m_gsn = RNIL;
       
       NodeRecPtr addNodePtr;
-      addNodePtr.i = nodeId;
+      addNodePtr.i = connectedNodeId;
       ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
-      cmAddPrepare(signal, addNodePtr, nodePtr.p);
+      cmAddPrepare(signal, addNodePtr, myNodePtr.p);
       return;
     }
   }
@@ -565,11 +569,11 @@ void Qmgr::execCONNECT_REP(Signal* signa
     (void)1;
   }
   
-  ndbrequire(!c_start.m_nodes.isWaitingFor(nodeId));
-  ndbrequire(!c_readnodes_nodes.get(nodeId));
-  c_readnodes_nodes.set(nodeId);
+  ndbrequire(!c_start.m_nodes.isWaitingFor(connectedNodeId));
+  ndbrequire(!c_readnodes_nodes.get(connectedNodeId));
+  c_readnodes_nodes.set(connectedNodeId);
   signal->theData[0] = reference();
-  sendSignal(calcQmgrBlockRef(nodeId), GSN_READ_NODESREQ, signal, 1, JBA);
+  sendSignal(calcQmgrBlockRef(connectedNodeId), GSN_READ_NODESREQ, signal, 1, JBA);
   return;
 }//Qmgr::execCONNECT_REP()
 
@@ -4788,7 +4792,9 @@ void Qmgr::failReport(Signal* signal,
     if (ERROR_INSERTED(938))
     {
       nodeFailCount++;
-      ndbout_c("QMGR : execFAIL_REP : %u nodes have failed", nodeFailCount);
+      ndbout_c("QMGR : execFAIL_REP(Failed : %u Source : %u  Cause : %u) : "
+               "%u nodes have failed", 
+               aFailedNode, sourceNode, aFailCause, nodeFailCount);
       /* Count DB nodes */
       Uint32 nodeCount = 0;
       for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
@@ -6877,6 +6883,12 @@ Qmgr::execNODE_PINGCONF(Signal* signal)
     return;
   }
 
+  if (ERROR_INSERTED(938))
+  {
+    ndbout_c("QMGR : execNODE_PING_CONF() from %u in tick %u",
+             sendersNodeId, m_connectivity_check.m_tick);
+  }
+
   /* Node must have been pinged, we must be waiting for the response,
    * or the node must have already failed
    */

=== modified file 'storage/ndb/src/kernel/vm/NdbinfoTables.cpp'
--- a/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-13 09:13:33 +0000
+++ b/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-17 13:32:49 +0000
@@ -204,14 +204,15 @@ DECLARE_NDBINFO_TABLE(THREADSTAT, 18) =
   }
 };
 
-DECLARE_NDBINFO_TABLE(TRANSACTIONS, 10) =
-{ { "transactions", 10, 0, "transactions" },
+DECLARE_NDBINFO_TABLE(TRANSACTIONS, 11) =
+{ { "transactions", 11, 0, "transactions" },
   {
     {"node_id",             Ndbinfo::Number, "node id"},
     {"block_instance",      Ndbinfo::Number, "TC instance no"},
     {"objid",               Ndbinfo::Number, "Object id of transaction object"},
     {"apiref",              Ndbinfo::Number, "API reference"},
-    {"transid",             Ndbinfo::String, "Transaction id"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
     {"state",               Ndbinfo::Number, "Transaction state"},
     {"flags",               Ndbinfo::Number, "Transaction flags"},
     {"c_ops",               Ndbinfo::Number, "No of operations in transaction" },
@@ -220,15 +221,16 @@ DECLARE_NDBINFO_TABLE(TRANSACTIONS, 10)
   }
 };
 
-DECLARE_NDBINFO_TABLE(OPERATIONS, 11) =
-{ { "operations", 11, 0, "operations" },
+DECLARE_NDBINFO_TABLE(OPERATIONS, 12) =
+{ { "operations", 12, 0, "operations" },
   {
     {"node_id",             Ndbinfo::Number, "node id"},
     {"block_instance",      Ndbinfo::Number, "LQH instance no"},
     {"objid",               Ndbinfo::Number, "Object id of operation object"},
     {"tcref",               Ndbinfo::Number, "TC reference"},
     {"apiref",              Ndbinfo::Number, "API reference"},
-    {"transid",             Ndbinfo::String, "Transaction id"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
     {"tableid",             Ndbinfo::Number, "Table id"},
     {"fragmentid",          Ndbinfo::Number, "Fragment id"},
     {"op",                  Ndbinfo::Number, "Operation type"},

=== modified file 'storage/ndb/src/ndbapi/Ndb.cpp'
--- a/storage/ndb/src/ndbapi/Ndb.cpp	2011-09-07 17:12:12 +0000
+++ b/storage/ndb/src/ndbapi/Ndb.cpp	2011-10-17 12:43:31 +0000
@@ -2254,13 +2254,31 @@ Ndb::getNdbErrorDetail(const NdbError& e
 void
 Ndb::setCustomData(void* _customDataPtr)
 {
-  theImpl->customDataPtr = _customDataPtr;
+  theImpl->customData = Uint64(_customDataPtr);
 }
 
 void*
 Ndb::getCustomData() const
 {
-  return theImpl->customDataPtr;
+  return (void*)theImpl->customData;
+}
+
+void
+Ndb::setCustomData64(Uint64 _customData)
+{
+  theImpl->customData = _customData;
+}
+
+Uint64
+Ndb::getCustomData64() const
+{
+  return theImpl->customData;
+}
+
+Uint64
+Ndb::getNextTransactionId() const
+{
+  return theFirstTransId;
 }
 
 Uint32

=== modified file 'storage/ndb/src/ndbapi/NdbImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-09-09 13:13:52 +0000
+++ b/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-10-20 12:51:03 +0000
@@ -129,7 +129,7 @@ public:
 
   BaseString m_systemPrefix; // Buffer for preformatted for <sys>/<def>/
   
-  void* customDataPtr;
+  Uint64 customData;
 
   Uint64 clientStats[ Ndb::NumClientStatistics ];
   

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-29 13:07:26 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-20 12:51:03 +0000
@@ -1661,7 +1661,8 @@ NdbQueryIndexScanOperationDefImpl::NdbQu
                            int& error)
   : NdbQueryScanOperationDefImpl(table,options,ident,ix,id,error),
   m_interface(*this), 
-  m_index(index)
+  m_index(index),
+  m_paramInPruneKey(false)
 {
   memset(&m_bound, 0, sizeof m_bound);
   if (bound!=NULL) {
@@ -2316,7 +2317,7 @@ NdbQueryLookupOperationDefImpl::appendKe
 
 
 Uint32
-NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef) const
+NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef)
 {
   Uint32 appendedPattern = 0;
 
@@ -2408,6 +2409,7 @@ NdbQueryIndexScanOperationDefImpl::appen
           }
           case NdbQueryOperandImpl::Param:
             appendedPattern |= QN_ScanIndexNode::SI_PRUNE_PARAMS;
+            m_paramInPruneKey = true;
             serializedDef.append(QueryPattern::param(paramCnt++));
             break;
           default:

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-29 13:07:26 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-20 12:51:03 +0000
@@ -382,6 +382,15 @@ public:
   virtual const IndexBound* getBounds() const
   { return NULL; } 
 
+  /** 
+   * True if this is a prunable scan and there are NdbQueryParamOperands in the
+   * distribution key.
+   */
+  virtual bool hasParamInPruneKey() const
+  {
+    return false;
+  }
+
   // Return 'true' is query type is a multi-row scan
   virtual bool isScanOperation() const = 0;
 
@@ -523,7 +532,7 @@ protected:
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const
   { return 0; }
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef)
   { return 0; }
 
 }; // class NdbQueryScanOperationDefImpl
@@ -553,11 +562,16 @@ public:
   virtual const IndexBound* getBounds() const
   { return &m_bound; } 
 
+  bool hasParamInPruneKey() const
+  {
+    return m_paramInPruneKey;
+  }
+
 protected:
   // Append pattern for creating complete range bounds to serialized code 
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const;
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const;
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef);
 
 private:
 
@@ -583,6 +597,12 @@ private:
 
   /** True if there is a set of bounds.*/
   IndexBound m_bound;
+
+  /** 
+   * True if scan is prunable and there are NdbQueryParamOperands in the 
+   * distribution key.
+   */
+  bool m_paramInPruneKey;
 }; // class NdbQueryIndexScanOperationDefImpl
 
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-09-20 10:49:34 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-20 12:51:03 +0000
@@ -4562,6 +4562,10 @@ NdbQueryOperationImpl::prepareAttrInfo(U
     {
       requestInfo |= QN_ScanIndexParameters::SIP_PARALLEL;
     }
+    if (def.hasParamInPruneKey())
+    {
+      requestInfo |= QN_ScanIndexParameters::SIP_PRUNE_PARAMS;
+    }
     param->requestInfo = requestInfo;
     // Check that both values fit in param->batchSize.
     assert(getMaxBatchRows() < (1<<QN_ScanIndexParameters::BatchRowBits));

=== modified file 'storage/ndb/src/ndbapi/Ndbinit.cpp'
--- a/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-10-17 12:43:31 +0000
@@ -207,7 +207,7 @@ NdbImpl::NdbImpl(Ndb_cluster_connection
     wakeHandler(0),
     wakeContext(~Uint32(0)),
     m_ev_op(0),
-    customDataPtr(0)
+    customData(0)
 {
   int i;
   for (i = 0; i < MAX_NDB_NODES; i++) {

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.cpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-09-09 13:13:52 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-10-20 12:51:03 +0000
@@ -1191,9 +1191,11 @@ TransporterFacade::sendFragmentedSignal(
       /* This section fits whole, move onto next */
       this_chunk_sz+= remaining_sec_sz;
       i++;
+      continue;
     }
     else
     {
+      assert(this_chunk_sz <= CHUNK_SZ);
       /* This section doesn't fit, truncate it */
       unsigned send_sz= CHUNK_SZ - this_chunk_sz;
       if (i != start_i)
@@ -1205,19 +1207,34 @@ TransporterFacade::sendFragmentedSignal(
          * The final piece does not need to be a multiple of
          * NDB_SECTION_SEGMENT_SZ
          * 
-         * Note that this can push this_chunk_sz above CHUNK_SZ
-         * Should probably round-down, but need to be careful of
-         * 'can't fit any' cases.  Instead, CHUNK_SZ is defined
-         * with some slack below MAX_SENT_MESSAGE_BYTESIZE
+         * We round down the available send space to the nearest whole 
+         * number of segments.
+         * If there's not enough space for one segment, then we round up
+         * to one segment.  This can make us send more than CHUNK_SZ, which
+         * is ok as it's defined as less than the maximum message length.
          */
-	send_sz=
-	  NDB_SECTION_SEGMENT_SZ
-	  *((send_sz+NDB_SECTION_SEGMENT_SZ-1)
-            /NDB_SECTION_SEGMENT_SZ);
-        if (send_sz > remaining_sec_sz)
-	  send_sz= remaining_sec_sz;
+        send_sz = (send_sz / NDB_SECTION_SEGMENT_SZ) * 
+          NDB_SECTION_SEGMENT_SZ;                        /* Round down */
+        send_sz = MAX(send_sz, NDB_SECTION_SEGMENT_SZ);  /* At least one */
+        send_sz = MIN(send_sz, remaining_sec_sz);        /* Only actual data */
+        
+        /* If we've squeezed the last bit of data in, jump out of 
+         * here to send the last fragment.
+         * Otherwise, send what we've collected so far.
+         */
+        if ((send_sz == remaining_sec_sz) &&      /* All sent */
+            (i == secs - 1))                      /* No more sections */
+        {
+          this_chunk_sz+=  remaining_sec_sz;
+          i++;
+          continue;
+        }
       }
 
+      /* At this point, there must be data to send in a further signal */
+      assert((send_sz < remaining_sec_sz) ||
+             (i < secs - 1));
+
       /* Modify tmp generic section ptr to describe truncated
        * section
        */
@@ -1256,9 +1273,6 @@ TransporterFacade::sendFragmentedSignal(
                  tmp_signal.readSignalNumber() == GSN_API_REGREQ);
         }
       }
-      // setup variables for next signal
-      start_i= i;
-      this_chunk_sz= 0;
       assert(remaining_sec_sz >= send_sz);
       Uint32 remaining= remaining_sec_sz - send_sz;
       tmp_ptr[i].sz= remaining;
@@ -1271,6 +1285,10 @@ TransporterFacade::sendFragmentedSignal(
       if (remaining == 0)
         /* This section's done, move onto the next */
 	i++;
+      
+      // setup variables for next signal
+      start_i= i;
+      this_chunk_sz= 0;
     }
   }
 

=== modified file 'storage/ndb/test/ndbapi/Makefile.am'
--- a/storage/ndb/test/ndbapi/Makefile.am	2011-09-13 13:47:43 +0000
+++ b/storage/ndb/test/ndbapi/Makefile.am	2011-10-20 12:51:03 +0000
@@ -112,6 +112,8 @@ testMgmd_CXXFLAGS = -I$(top_srcdir)/stor
 testSingleUserMode_SOURCES = testSingleUserMode.cpp
 testNativeDefault_SOURCES = testNativeDefault.cpp
 testNdbApi_SOURCES = testNdbApi.cpp
+testNdbApi_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/ndbapi \
+	-I$(top_srcdir)/storage/ndb/include/transporter
 testNodeRestart_SOURCES = testNodeRestart.cpp
 testUpgrade_SOURCES = testUpgrade.cpp
 testUpgrade_LDADD = $(LDADD) $(top_srcdir)/libmysql_r/libmysqlclient_r.la

=== modified file 'storage/ndb/test/ndbapi/testDict.cpp'
--- a/storage/ndb/test/ndbapi/testDict.cpp	2011-04-23 08:21:36 +0000
+++ b/storage/ndb/test/ndbapi/testDict.cpp	2011-10-20 12:21:10 +0000
@@ -8167,6 +8167,8 @@ runBug58277loadtable(NDBT_Context* ctx,
     int cnt = 0;
     for (int i = 0; i < rows; i++)
     {
+      int retries = 10;
+  retry:
       NdbTransaction* pTx = 0;
       CHK2((pTx = pNdb->startTransaction()) != 0, pNdb->getNdbError());
 
@@ -8183,7 +8185,19 @@ runBug58277loadtable(NDBT_Context* ctx,
         int x[] = {
          -630
         };
-        CHK3(pTx->execute(Commit) == 0, pTx->getNdbError(), x);
+        int res = pTx->execute(Commit);
+        if (res != 0 &&
+            pTx->getNdbError().status == NdbError::TemporaryError)
+        {
+          retries--;
+          if (retries >= 0)
+          {
+            pTx->close();
+            NdbSleep_MilliSleep(10);
+            goto retry;
+          }
+        }
+        CHK3(res == 0, pTx->getNdbError(), x);
         cnt++;
       }
       while (0);

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-09-29 06:48:39 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-10-14 13:24:26 +0000
@@ -25,6 +25,8 @@
 #include <random.h>
 #include <NdbTick.h>
 #include <my_sys.h>
+#include <SignalSender.hpp>
+#include <GlobalSignalNumbers.h>
 
 #define MAX_NDB_OBJECTS 32678
 
@@ -4972,6 +4974,635 @@ int runNdbClusterConnectionConnect(NDBT_
   return NDBT_OK;
 }
 
+/* Testing fragmented signal send/receive */
+
+/*
+  SectionStore
+
+  Abstraction of long section storage api.
+  Used by FragmentAssembler to assemble received long sections
+*/
+class SectionStore
+{
+public:
+  virtual ~SectionStore() {};
+  virtual int appendToSection(Uint32 secId, LinearSectionPtr ptr) = 0;
+};
+
+/*
+  Basic Section Store
+
+  Naive implementation using malloc.  Real usage might use something better.
+*/
+class BasicSectionStore : public SectionStore
+{
+public:
+  BasicSectionStore()
+  {
+    init();
+  };
+
+  ~BasicSectionStore()
+  {
+    freeStorage();
+  };
+
+  void init()
+  {
+    ptrs[0].p = NULL;
+    ptrs[0].sz = 0;
+
+    ptrs[2] = ptrs[1] = ptrs[0];
+  }
+
+  void freeStorage()
+  {
+    free(ptrs[0].p);
+    free(ptrs[1].p);
+    free(ptrs[2].p);
+  }
+
+  virtual int appendToSection(Uint32 secId, LinearSectionPtr ptr)
+  {
+    /* Potentially expensive re-alloc + copy */
+    assert(secId < 3);
+    
+    Uint32 existingSz = ptrs[secId].sz;
+    Uint32* existingBuff = ptrs[secId].p;
+
+    Uint32 newSize = existingSz + ptr.sz;
+    Uint32* newBuff = (Uint32*) realloc(existingBuff, newSize * 4);
+
+    if (!newBuff)
+      return -1;
+    
+    memcpy(newBuff + existingSz, ptr.p, ptr.sz * 4);
+    
+    ptrs[secId].p = newBuff;
+    ptrs[secId].sz = existingSz + ptr.sz;
+
+    return 0;
+  }
+    
+  LinearSectionPtr ptrs[3];
+};
+
+
+
+/*
+  FragmentAssembler
+
+  Used to assemble sections from multiple fragment signals, and 
+  produce a 'normal' signal.
+  
+  Requires a SectionStore implementation to accumulate the section
+  fragments
+
+  Might be useful generic utility, or not.
+
+  Usage : 
+    FragmentAssembler fa(ss);
+    while (!fa.isComplete())
+    {
+      sig = waitSignal();
+      ss.handleSignal(sig, sections);
+    }
+
+    fa.getSignalHeader();
+    fa.getSignalBody();
+    fa.getSectionStore(); ..
+
+*/
+class FragmentAssembler
+{
+public:
+  enum AssemblyError
+  {
+    NoError = 0,
+    FragmentSequence = 1,
+    FragmentSource = 2,
+    FragmentIdentity = 3,
+    SectionAppend = 4
+  };
+
+  FragmentAssembler(SectionStore* _secStore):
+    secsReceived(0),
+    secStore(_secStore),
+    complete(false),
+    fragId(0),
+    sourceNode(0),
+    error(NoError)
+  {}
+
+  int handleSignal(const SignalHeader* sigHead,
+                   const Uint32* sigBody,
+                   LinearSectionPtr* sections)
+  {
+    Uint32 sigLen = sigHead->theLength;
+    
+    if (fragId == 0)
+    {
+      switch (sigHead->m_fragmentInfo)
+      {
+      case 0:
+      {
+        /* Not fragmented, pass through */
+        sh = *sigHead;
+        memcpy(signalBody, sigBody, sigLen * 4);
+        Uint32 numSecs = sigHead->m_noOfSections;
+        for (Uint32 i=0; i<numSecs; i++)
+        {
+          if (secStore->appendToSection(i, sections[i]) != 0)
+          {
+            error = SectionAppend;
+            return -1;
+          }
+        }
+        complete = true;
+        break;
+      }
+      case 1:
+      {
+        /* Start of fragmented signal */
+        Uint32 incomingFragId;
+        Uint32 incomingSourceNode;
+        Uint32 numSecsInFragment;
+        
+        if (handleFragmentSections(sigHead, sigBody, sections,
+                                   &incomingFragId, &incomingSourceNode,
+                                   &numSecsInFragment) != 0)
+          return -1;
+        
+        assert(incomingFragId != 0);
+        fragId = incomingFragId;
+        sourceNode = incomingSourceNode;
+        assert(numSecsInFragment > 0);
+        
+        break;
+      }
+      default:
+      {
+        /* Error, out of sequence fragment */
+        error = FragmentSequence;
+        return -1;
+        break;
+      }
+      }
+    }
+    else
+    {
+      /* FragId != 0 */
+      switch (sigHead->m_fragmentInfo)
+      {
+      case 0:
+      case 1:
+      {
+        /* Error, out of sequence fragment */
+        error = FragmentSequence;
+        return -1;
+      }
+      case 2:
+        /* Fall through */
+      case 3:
+      {
+        /* Body fragment */
+        Uint32 incomingFragId;
+        Uint32 incomingSourceNode;
+        Uint32 numSecsInFragment;
+        
+        if (handleFragmentSections(sigHead, sigBody, sections,
+                                   &incomingFragId, &incomingSourceNode,
+                                   &numSecsInFragment) != 0)
+          return -1;
+
+        if (incomingSourceNode != sourceNode)
+        {
+          /* Error in source node */
+          error = FragmentSource;
+          return -1;
+        }
+        if (incomingFragId != fragId)
+        {
+          error = FragmentIdentity;
+          return -1;
+        }
+        
+        if (sigHead->m_fragmentInfo == 3)
+        {
+          /* Final fragment, contains actual signal body */
+          memcpy(signalBody,
+                 sigBody,
+                 sigLen * 4);
+          sh = *sigHead;
+          sh.theLength = sigLen - (numSecsInFragment + 1);
+          sh.m_noOfSections = 
+            ((secsReceived & 4)? 1 : 0) +
+            ((secsReceived & 2)? 1 : 0) +
+            ((secsReceived & 1)? 1 : 0);
+          sh.m_fragmentInfo = 0;
+          
+          complete=true;
+        }
+        break;
+      }
+      default:
+      {
+        /* Bad fragmentinfo field */
+        error = FragmentSequence;
+        return -1;
+      }
+      }
+    }
+
+    return 0;
+  }
+
+  int handleSignal(NdbApiSignal* signal,
+                   LinearSectionPtr* sections)
+  {
+    return handleSignal(signal, signal->getDataPtr(), sections);
+  }
+
+  bool isComplete()
+  {
+    return complete;
+  }
+
+  /* Valid if isComplete() */
+  SignalHeader getSignalHeader()
+  {
+    return sh;
+  }
+  
+  /* Valid if isComplete() */
+  Uint32* getSignalBody()
+  {
+    return signalBody;
+  }
+
+  /* Valid if isComplete() */
+  Uint32 getSourceNode()
+  {
+    return sourceNode;
+  }
+
+  SectionStore* getSectionStore()
+  {
+    return secStore;
+  }
+
+  AssemblyError getError() const
+  {
+    return error;
+  }
+  
+private:
+  int handleFragmentSections(const SignalHeader* sigHead,
+                             const Uint32* sigBody,
+                             LinearSectionPtr* sections,
+                             Uint32* incomingFragId,
+                             Uint32* incomingSourceNode,
+                             Uint32* numSecsInFragment)
+  {
+    Uint32 sigLen = sigHead->theLength;
+    
+    *numSecsInFragment = sigHead->m_noOfSections;
+    assert(sigLen >= (1 + *numSecsInFragment));
+           
+    *incomingFragId = sigBody[sigLen - 1];
+    *incomingSourceNode = refToNode(sigHead->theSendersBlockRef);
+    const Uint32* secIds = &sigBody[sigLen - (*numSecsInFragment) - 1];
+    
+    for (Uint32 i=0; i < *numSecsInFragment; i++)
+    {
+      secsReceived |= (1 < secIds[i]);
+      
+      if (secStore->appendToSection(secIds[i], sections[i]) != 0)
+      {
+        error = SectionAppend;
+        return -1;
+      }
+    }
+    
+    return 0;
+  }
+
+  Uint32 secsReceived;
+  SectionStore* secStore;
+  bool complete;
+  Uint32 fragId;
+  Uint32 sourceNode;
+  SignalHeader sh;
+  Uint32 signalBody[NdbApiSignal::MaxSignalWords];
+  AssemblyError error;
+};                 
+
+static const Uint32 MAX_SEND_BYTES=32768; /* Align with TransporterDefinitions.hpp */
+static const Uint32 MAX_SEND_WORDS=MAX_SEND_BYTES/4;
+static const Uint32 SEGMENT_WORDS= 60; /* Align with SSPool etc */
+static const Uint32 SEGMENT_BYTES = SEGMENT_WORDS * 4;
+//static const Uint32 MAX_SEGS_PER_SEND=64; /* 6.3 */
+static const Uint32 MAX_SEGS_PER_SEND = (MAX_SEND_BYTES / SEGMENT_BYTES) - 2; /* Align with TransporterFacade.cpp */
+static const Uint32 MAX_WORDS_PER_SEND = MAX_SEGS_PER_SEND * SEGMENT_WORDS;
+static const Uint32 HALF_MAX_WORDS_PER_SEND = MAX_WORDS_PER_SEND / 2;
+static const Uint32 THIRD_MAX_WORDS_PER_SEND = MAX_WORDS_PER_SEND / 3;
+static const Uint32 MEDIUM_SIZE = 5000;
+
+/* Most problems occurred with sections lengths around the boundary
+ * of the max amount sent - MAX_WORDS_PER_SEND, so we define interesting
+ * sizes so that we test behavior around these boundaries
+ */
+static Uint32 interestingSizes[] = 
+{
+  0,
+  1, 
+  MEDIUM_SIZE,
+  THIRD_MAX_WORDS_PER_SEND -1,
+  THIRD_MAX_WORDS_PER_SEND,
+  THIRD_MAX_WORDS_PER_SEND +1,
+  HALF_MAX_WORDS_PER_SEND -1,
+  HALF_MAX_WORDS_PER_SEND,
+  HALF_MAX_WORDS_PER_SEND + 1,
+  MAX_WORDS_PER_SEND -1, 
+  MAX_WORDS_PER_SEND, 
+  MAX_WORDS_PER_SEND + 1,
+  (2* MAX_SEND_WORDS) + 1,
+  1234 /* Random */
+};
+
+
+/* 
+   FragSignalChecker
+
+   Class for testing fragmented signal send + receive
+*/
+class FragSignalChecker
+{
+public:
+
+  Uint32* buffer;
+
+  FragSignalChecker()
+  {
+    buffer= NULL;
+    init();
+  }
+
+  ~FragSignalChecker()
+  {
+    free(buffer);
+  }
+
+  void init()
+  {
+    buffer = (Uint32*) malloc(getBufferSize());
+
+    if (buffer)
+    {
+      /* Init to a known pattern */
+      for (Uint32 i = 0; i < (getBufferSize()/4); i++)
+      {
+        buffer[i] = i;
+      }
+    }
+  }
+
+  static Uint32 getNumInterestingSizes()
+  {
+    return sizeof(interestingSizes) / sizeof(Uint32);
+  }
+
+  static Uint32 getNumIterationsRequired()
+  {
+    /* To get combinatorial coverage, need each of 3
+     * sections with each of the interesting sizes
+     */
+    Uint32 numSizes = getNumInterestingSizes();
+    return numSizes * numSizes * numSizes;
+  }
+
+  static Uint32 getSecSz(Uint32 secNum, Uint32 iter)
+  {
+    assert(secNum < 3);
+    Uint32 numSizes = getNumInterestingSizes();
+    Uint32 divisor = (secNum == 0 ? 1 : 
+                      secNum == 1 ? numSizes :
+                      numSizes * numSizes);
+    /* offset ensures only end sections are 0 length */
+    Uint32 index = (iter / divisor) % numSizes;
+    if ((index == 0) && (iter >= (divisor * numSizes)))
+      index = 1; /* Avoid lower numbered section being empty */
+    Uint32 value = interestingSizes[index];
+    if(value == 1234)
+    {
+      value = 1 + (rand() % (2* MAX_WORDS_PER_SEND));
+    }
+    return value;
+  }
+
+  static Uint32 getBufferSize()
+  {
+    const Uint32 MaxSectionWords = (2 * MAX_SEND_WORDS) + 1;
+    const Uint32 MaxTotalSectionsWords = MaxSectionWords * 3;
+    return MaxTotalSectionsWords * 4;
+  }
+
+  int sendRequest(SignalSender* ss, 
+                  Uint32* sizes)
+  {
+    /* 
+     * We want to try out various interactions between the
+     * 3 sections and the length of the data sent
+     * - All fit in one 'chunk'
+     * - None fit in one 'chunk'
+     * - Each ends on a chunk boundary
+     *
+     * Max send size is ~ 32kB
+     * Segment size is 60 words / 240 bytes
+     *  -> 136 segments / chunk
+     *  -> 134 segments / chunk 'normally' sent
+     *  -> 32160 bytes
+     */
+    g_err << "Sending "
+          << sizes[0]
+          << " " << sizes[1]
+          << " " << sizes[2]
+          << endl;
+    
+    const Uint32 numSections = 
+      (sizes[0] ? 1 : 0) + 
+      (sizes[1] ? 1 : 0) + 
+      (sizes[2] ? 1 : 0);
+    const Uint32 testType = 40;
+    const Uint32 fragmentLength = 1;
+    const Uint32 print = 1;
+    const Uint32 len = 5 + numSections;
+    SimpleSignal request(false);
+    
+    Uint32* signalBody = request.getDataPtrSend();
+    signalBody[0] = ss->getOwnRef();
+    signalBody[1] = testType;
+    signalBody[2] = fragmentLength;
+    signalBody[3] = print;
+    signalBody[4] = 0; /* Return count */
+    signalBody[5] = sizes[0];
+    signalBody[6] = sizes[1];
+    signalBody[7] = sizes[2];
+    
+    
+    request.ptr[0].sz = sizes[0];
+    request.ptr[0].p = &buffer[0];
+    request.ptr[1].sz = sizes[1];
+    request.ptr[1].p = &buffer[sizes[0]];
+    request.ptr[2].sz = sizes[2];
+    request.ptr[2].p = &buffer[sizes[0] + sizes[1]];
+    
+    request.header.m_noOfSections= numSections;
+    
+    int rc = 0;
+    ss->lock();
+    rc = ss->sendFragmentedSignal(ss->get_an_alive_node(),
+                                  request,
+                                  CMVMI,
+                                  GSN_TESTSIG,
+                                  len);
+    ss->unlock();
+    
+    if (rc != 0)
+    {
+      g_err << "Error sending signal" << endl;
+      return rc;
+    }
+    
+    return 0;
+  }
+
+  int waitResponse(SignalSender* ss,
+                   Uint32* expectedSz)
+  {
+    /* Here we need to wait for all of the signals which
+     * comprise a fragmented send, and check that
+     * the data is as expected
+     */
+    BasicSectionStore bss;
+    FragmentAssembler fa(&bss);
+    
+    while(true)
+    {
+      ss->lock();
+      SimpleSignal* response = ss->waitFor(10000);
+      ss->unlock();
+      
+      if (!response)
+      {
+        g_err << "Timed out waiting for response" << endl;
+        return -1;
+      }
+      
+      //response->print();
+      
+      if (response->header.theVerId_signalNumber == GSN_TESTSIG)
+      {
+        if (fa.handleSignal(&response->header,
+                            response->getDataPtr(),
+                            response->ptr) != 0)
+        {
+          g_err << "Error assembling fragmented signal."
+                << "  Error is "
+                << (Uint32) fa.getError()
+                << endl;
+          return -1;
+        }
+        
+        if (fa.isComplete())
+        {
+          Uint32 expectedWord = 0;
+          for (Uint32 i=0; i < 3; i++)
+          {
+            if (bss.ptrs[i].sz != expectedSz[i])
+            {
+              g_err << "Wrong size for section : "
+                    << i
+                    << " expected " << expectedSz[i]
+                    << " but received " << bss.ptrs[i].sz
+                    << endl;
+              return -1;
+            }
+            
+            for (Uint32 d=0; d < expectedSz[i]; d++)
+            {
+              if (bss.ptrs[i].p[d] != expectedWord)
+              {
+                g_err << "Bad data in section "
+                      << i
+                      << " at word number "
+                      << d
+                      << ".  Expected "
+                      << expectedWord
+                      << " but found "
+                      << bss.ptrs[i].p[d]
+                      << endl;
+                return -1;
+              }
+              expectedWord++;
+            }
+          }
+          
+          break;
+        }
+        
+      }
+    }
+    
+    return 0;
+  }
+  
+  int runTest(SignalSender* ss)
+  {
+    for (Uint32 iter=0; 
+         iter < getNumIterationsRequired(); 
+         iter++)
+    {
+      int rc;
+      Uint32 sizes[3];
+      sizes[0] = getSecSz(0, iter);
+      sizes[1] = getSecSz(1, iter);
+      sizes[2] = getSecSz(2, iter);
+      
+      /* Build request, including sections */
+      rc = sendRequest(ss, sizes);
+      if (rc != 0)
+      {
+        g_err << "Failed sending request on iteration " << iter 
+              << " with rc " << rc << endl;
+        return NDBT_FAILED;
+      }
+      
+      /* Wait for response */
+      rc = waitResponse(ss, sizes);
+      if (rc != 0)
+      {
+        g_err << "Failed waiting for response on iteration " << iter
+              << " with rc " << rc << endl;
+        return NDBT_FAILED;
+      }
+    }
+    
+    return NDBT_OK;
+  }
+};
+
+
+int testFragmentedSend(NDBT_Context* ctx, NDBT_Step* step){
+  Ndb* pNdb= GETNDB(step);
+  Ndb_cluster_connection* conn = &pNdb->get_ndb_cluster_connection();
+  SignalSender ss(conn);
+  FragSignalChecker fsc;
+  
+  return fsc.runTest(&ss);
+}
+
+
 
 NDBT_TESTSUITE(testNdbApi);
 TESTCASE("MaxNdb", 
@@ -5245,6 +5876,10 @@ TESTCASE("NdbClusterConnectSR",
   STEPS(runNdbClusterConnect, MAX_NODES);
   STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
 }
+TESTCASE("TestFragmentedSend",
+         "Test fragmented send behaviour"){
+  INITIALIZER(testFragmentedSend);
+}
 NDBT_TESTSUITE_END(testNdbApi);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-17 13:54:09 +0000
@@ -4726,17 +4726,23 @@ int runSplitLatency25PctFail(NDBT_Contex
   /**
    * Now wait for half of cluster to die...
    */
-  ndbout_c("Waiting for half of cluster to die");
-  int not_started = 0;
   const int node_count = restarter.getNumDbNodes();
+  ndbout_c("Waiting for half of cluster (%u/%u) to die", node_count/2, node_count);
+  int not_started = 0;
   do
   {
     not_started = 0;
     for (int i = 0; i < node_count; i++)
     {
-      if (restarter.getNodeStatus(restarter.getDbNodeId(i)) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+      int nodeId = restarter.getDbNodeId(i);
+      int status = restarter.getNodeStatus(nodeId);
+      ndbout_c("Node %u status %u", nodeId, status);
+      if (status == NDB_MGM_NODE_STATUS_NOT_STARTED)
         not_started++;
     }
+    NdbSleep_MilliSleep(2000);
+    ndbout_c("%u / %u in state NDB_MGM_NODE_STATUS_NOT_STARTED(%u)",
+             not_started, node_count, NDB_MGM_NODE_STATUS_NOT_STARTED);
   } while (2 * not_started != node_count);
 
   ndbout_c("Restarting cluster");

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2011-10-03 11:06:06 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2011-10-20 11:43:11 +0000
@@ -201,6 +201,7 @@ extern const char * g_ndbd_bin_path;
 extern const char * g_ndbmtd_bin_path;
 extern const char * g_mysqld_bin_path;
 extern const char * g_mysql_install_db_bin_path;
+extern const char * g_libmysqlclient_so_path;
 
 extern const char * g_search_path[];
 

=== added file 'storage/ndb/test/run-test/conf-daily-perf.cnf'
--- a/storage/ndb/test/run-test/conf-daily-perf.cnf	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/conf-daily-perf.cnf	2011-10-16 18:36:16 +0000
@@ -0,0 +1,64 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+fix-nodeid=1
+mt = 2
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+loose-skip-bdb
+skip-grant-tables
+socket=mysql.sock
+
+ndbcluster=1
+ndb-force-send=1
+ndb-use-exact-count=0
+ndb-extra-logging=1
+ndb-autoincrement-prefetch-sz=256
+engine-condition-pushdown=1
+ndb-cluster-connection-pool=4
+
+key_buffer = 256M
+max_allowed_packet = 16M
+sort_buffer_size = 512K
+read_buffer_size = 256K
+read_rnd_buffer_size = 512K
+myisam_sort_buffer_size = 8M
+max-connections=200
+thread-cache-size=128
+
+query_cache_type = 0
+query_cache_size = 0
+table_open_cache=1024
+table_definition_cache=256
+
+[client]
+protocol=tcp
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host5,CHOOSE_host6,CHOOSE_host7,CHOOSE_host8
+ndbapi= CHOOSE_host2,CHOOSE_host3,CHOOSE_host4
+mysqld = CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 250M
+DataMemory = 1500M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 5
+NoOfFragmentLogFiles = 8
+FragmentLogFileSize = 64M
+ODirect=1
+MaxNoOfExecutionThreads=8
+
+SharedGlobalMemory=256M
+DiskPageBufferMemory=256M
+FileSystemPath=/data0/autotest
+FileSystemPathDataFiles=/data1/autotest
+FileSystemPathUndoFiles=/data2/autotest
+InitialLogfileGroup=undo_buffer_size=64M;undofile01.dat:256M;undofile02.dat:256M
+InitialTablespace=datafile01.dat:256M;datafile02.dat:256M

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-13 20:08:25 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-20 12:51:03 +0000
@@ -1830,3 +1830,8 @@ max-time: 500
 cmd: testNdbApi
 args: -n NdbClusterConnectSR T1
 
+# Fragmented signal send
+max-time 1800
+cmd: testNdbApi
+args: -n TestFragmentedSend T1
+

=== added file 'storage/ndb/test/run-test/daily-perf-tests.txt'
--- a/storage/ndb/test/run-test/daily-perf-tests.txt	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/daily-perf-tests.txt	2011-10-16 18:36:16 +0000
@@ -0,0 +1,140 @@
+# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+max-time: 300
+cmd: DbCreate
+args:
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 25
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 100
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 200
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1 -proc 25
+type: bench
+
+# baseline
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord
+type: bench
+
+# minimal record
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 2
+type: bench
+
+# 4k record
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 25 -s 40
+type: bench
+
+# baseline DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord
+type: bench
+
+# minimal record DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 2
+type: bench
+
+# 4k record DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 25 -s 40
+type: bench
+
+# sql
+max-time: 600
+client: ndb-sql-perf-create-table.sh
+args: t1
+
+max-time: 600
+client: ndb-sql-perf-select.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=1
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-select.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=4
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-update.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=1
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-update.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=4
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-table.sh
+args: t1
+mysqld:
+
+# sql join
+max-time: 600
+client: ndb-sql-perf-load-tpcw.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-tpcw-getBestSeller.sh
+args:
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-tpcw.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-load-music-store.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-select-music-store.sh
+args:
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-music-store.sh
+args:
+

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2011-10-13 20:08:25 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2011-10-20 12:51:03 +0000
@@ -122,6 +122,24 @@ printfile(FILE* out, Properties& props,
   fflush(out);
 }
 
+static
+char *
+dirname(const char * path)
+{
+  char * s = strdup(path);
+  size_t len = strlen(s);
+  for (size_t i = 1; i<len; i++)
+  {
+    if (s[len - i] == '/')
+    {
+      s[len - i] = 0;
+      return s;
+    }
+  }
+  free(s);
+  return 0;
+}
+
 bool
 setup_files(atrt_config& config, int setup, int sshx)
 {
@@ -313,8 +331,23 @@ setup_files(atrt_config& config, int set
         }
         fprintf(fenv, "$PATH\n");
 	keys.push_back("PATH");
+
+        {
+          /**
+           * In 5.5...binaries aren't compiled with rpath
+           * So we need an explicit LD_LIBRARY_PATH
+           *
+           * Use path from libmysqlclient.so
+           */
+          char * dir = dirname(g_libmysqlclient_so_path);
+          fprintf(fenv, "LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n", dir);
+          free(dir);
+          keys.push_back("LD_LIBRARY_PATH");
+        }
+
 	for (size_t k = 0; k<keys.size(); k++)
 	  fprintf(fenv, "export %s\n", keys[k].c_str());
+
 	fflush(fenv);
 	fclose(fenv);
       }

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2011-10-03 14:59:24 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2011-10-20 11:43:11 +0000
@@ -86,6 +86,7 @@ const char * g_ndbd_bin_path = 0;
 const char * g_ndbmtd_bin_path = 0;
 const char * g_mysqld_bin_path = 0;
 const char * g_mysql_install_db_bin_path = 0;
+const char * g_libmysqlclient_so_path = 0;
 
 static struct
 {
@@ -93,11 +94,12 @@ static struct
   const char * exe;
   const char ** var;
 } g_binaries[] = {
-  { true,  "ndb_mgmd",         &g_ndb_mgmd_bin_path},
-  { true,  "ndbd",             &g_ndbd_bin_path },
-  { false, "ndbmtd",           &g_ndbmtd_bin_path },
-  { true,  "mysqld",           &g_mysqld_bin_path },
-  { true,  "mysql_install_db", &g_mysql_install_db_bin_path },
+  { true,  "ndb_mgmd",          &g_ndb_mgmd_bin_path},
+  { true,  "ndbd",              &g_ndbd_bin_path },
+  { false, "ndbmtd",            &g_ndbmtd_bin_path },
+  { true,  "mysqld",            &g_mysqld_bin_path },
+  { true,  "mysql_install_db",  &g_mysql_install_db_bin_path },
+  { true,  "libmysqlclient.so", &g_libmysqlclient_so_path },
   { true, 0, 0 }
 };
 
@@ -108,6 +110,8 @@ g_search_path[] =
   "libexec",
   "sbin",
   "scripts",
+  "lib",
+  "lib/mysql",
   0
 };
 static bool find_binaries();

=== modified file 'support-files/compiler_warnings.supp'
--- a/support-files/compiler_warnings.supp	2011-06-30 15:59:25 +0000
+++ b/support-files/compiler_warnings.supp	2011-10-20 11:45:13 +0000
@@ -59,23 +59,23 @@ db_vrfy.c : .*comparison is always false
 # Ignore all conversion warnings on windows 64
 # (Is safe as we are not yet supporting strings >= 2G)
 #
-.* : conversion from '__int64' to .*int'.*
-.* : conversion from '__int64' to 'uint8'.*
-.* : conversion from '__int64' to 'uint32'.*
-.* : conversion from '__int64' to 'u.*long'.*
-.* : conversion from '__int64' to 'long'.*
-.* : conversion from '__int64' to 'off_t'.*
-.* : conversion from '.*size_t' to .*int'.*
-.* : conversion from '.*size_t' to 'TaoCrypt::word32'.*
-.* : conversion from '.*size_t' to 'u.*long'.*
-.* : conversion from '.*size_t' to 'uint32'.*
-.* : conversion from '.*size_t' to 'off_t'.*
-.* : conversion from '.*size_t' to 'size_s'.*
-.* : conversion from '.*size_t' to 'DWORD'.*
-.* : conversion from '.*size_t' to 'uLongf'.*
-.* : conversion from '.*size_t' to 'UINT'.*
-.* : conversion from '.*size_t' to 'uInt'.*
-.* : conversion from '.*size_t' to 'uint16'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint8'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'TaoCrypt::word32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'size_s'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'DWORD'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uLongf'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'UINT'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uInt'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint16'.*
 
 #
 # Ignore a few specific warnings in ndb

=== modified file 'tests/mysql_client_test.c'
--- a/tests/mysql_client_test.c	2011-06-30 15:55:35 +0000
+++ b/tests/mysql_client_test.c	2011-10-17 11:35:32 +0000
@@ -18399,6 +18399,87 @@ static void test_bug47485()
 }
 
 
+#ifndef MCP_BUG13001491
+/*
+  Bug#13001491: MYSQL_REFRESH CRASHES WHEN STORED ROUTINES ARE RUN CONCURRENTLY.
+*/
+static void test_bug13001491()
+{
+  int rc;
+  char query[MAX_TEST_QUERY_LENGTH];
+  MYSQL *c;
+
+  myheader("test_bug13001491");
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "GRANT ALL PRIVILEGES ON *.* TO mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "GRANT RELOAD ON *.* TO mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+
+  c= mysql_client_init(NULL);
+
+  DIE_UNLESS(mysql_real_connect(c, opt_host, "mysqltest_u1", NULL,
+                                current_db, opt_port, opt_unix_socket,
+                                CLIENT_MULTI_STATEMENTS |
+                                CLIENT_MULTI_RESULTS));
+
+  rc= mysql_query(c, "DROP PROCEDURE IF EXISTS p1");
+  myquery(rc);
+
+  rc= mysql_query(c,
+    "CREATE PROCEDURE p1() "
+    "BEGIN "
+    " DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END; "
+    " SELECT COUNT(*) "
+    " FROM INFORMATION_SCHEMA.PROCESSLIST "
+    " GROUP BY user "
+    " ORDER BY NULL "
+    " INTO @a; "
+    "END");
+  myquery(rc);
+
+  rc= mysql_query(c, "CALL p1()");
+  myquery(rc);
+
+  mysql_free_result(mysql_store_result(c));
+
+  /* Check that mysql_refresh() succeeds without REFRESH_LOG. */
+  rc= mysql_refresh(c, REFRESH_GRANT |
+                       REFRESH_TABLES | REFRESH_HOSTS |
+                       REFRESH_STATUS | REFRESH_THREADS);
+  myquery(rc);
+
+  /*
+    Check that mysql_refresh(REFRESH_LOG) does not crash the server even if it
+    fails. mysql_refresh(REFRESH_LOG) fails when error log points to unavailable
+    location.
+  */
+  mysql_refresh(c, REFRESH_LOG);
+
+  rc= mysql_query(c, "DROP PROCEDURE p1");
+  myquery(rc);
+
+  mysql_close(c);
+  c= NULL;
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "DROP USER mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+}
+
+#endif
 /*
   Read and parse arguments and MySQL options from my.cnf
 */
@@ -18725,6 +18806,9 @@ static struct my_tests_st my_tests[]= {
   { "test_bug42373", test_bug42373 },
   { "test_bug54041", test_bug54041 },
   { "test_bug47485", test_bug47485 },
+#ifndef MCP_BUG13001491
+  { "test_bug13001491", test_bug13001491 },
+#endif
   { 0, 0 }
 };
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0-spj-scan-vs-scan branch (jan.wedvik:3572to 3574) Jan Wedvik20 Oct