List:Commits« Previous MessageNext Message »
From:Pekka Nousiainen Date:October 28 2011 9:11pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (pekka.nousiainen:4590 to 4629)
View as plain text  
 4629 Frazer Clement	2011-10-28 [merge]
      Merge 6.3->7.0

    modified:
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 4628 Ole John Aske	2011-10-28
      
      Fix for handling errors during execute and retrieve of SPJ results
      from a pushed lookup query:
      
      If we missed to catch an error returned by 
      NdbTransaction::execute() or available through
      NdbQuery::getErrorCode() a later ::fetchNext()
      will succeed without returning an error code. It could also return an
      incorrect result consisting of a partial results set from those NdbOperation
      which did return something before the failure was received
      
      This fix will reuse the same error handling mechanism for a lookup
      which is already used by a scan query.
      
      This will put the query into a persistent 'failed' state where any
      further operations (except ::close()) on that query will return
      the error :
      
      "ERROR: 4816 A previous query operation failed, which you missed to catch." 

    modified:
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
 4627 jonas oreland	2011-10-28
      ndb - forgot to add file

    added:
      storage/ndb/include/kernel/statedesc.hpp
 4626 jonas oreland	2011-10-28 [merge]
      ndb - merge 70-tip

    modified:
      storage/ndb/test/src/HugoQueries.cpp
 4625 jonas oreland	2011-10-28
      ndb - new view for ndbinfo.{cluster|server}_transaction ndbinfo.{cluster|server}operations
        add quoting in views
        add support for prefix on lookup table (i.e add replace_tags in a few places)
      
        and yes, the state extractor is ugly...but it has been deemed good-enough for now
          (as all other solutions we could think of...also were ugly)

    added:
      storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
    modified:
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/t/ndbinfo.test
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndbinfo_sql.cpp
 4624 jonas oreland	2011-10-28
      ndb - fix inverted PROCESS_ACL check :-(

    modified:
      sql/ha_ndbcluster_connection.cc
 4623 Jonas Oreland	2011-10-24
      ndb - introduce OutputStream::write to avoid the warnings about incorrect format-specifier

    modified:
      storage/ndb/include/util/OutputStream.hpp
      storage/ndb/src/common/util/OutputStream.cpp
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/test/include/NdbMgmd.hpp
      storage/ndb/test/ndbapi/testMgm.cpp
 4622 jonas oreland	2011-10-23
      ndb - fix unfortunate warnings fix

    modified:
      storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
 4621 jonas oreland	2011-10-23
      ndb - fix a strict alias warning (by using a union)

    modified:
      storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
 4620 jonas oreland	2011-10-22
      ndb - yet another warning

    modified:
      storage/ndb/test/include/NDBT_Table.hpp
 4619 jonas oreland	2011-10-22
      ndb - kill a few warnings

    modified:
      sql/ha_ndbcluster.cc
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
 4618 jonas oreland	2011-10-22
      ndb - fix bug in patch that always allows one to see own transactions in new is-view

    modified:
      sql/ha_ndbcluster_connection.cc
 4617 jonas oreland	2011-10-21
      ndb - warnings (back port from 5.5)

    modified:
      storage/ndb/src/common/debugger/EventLogger.cpp
      storage/ndb/src/common/debugger/SignalLoggerManager.cpp
      storage/ndb/src/common/logger/LogHandler.cpp
      storage/ndb/src/common/logger/Logger.cpp
      storage/ndb/src/common/portlib/NdbConfig.c
      storage/ndb/src/common/portlib/NdbDir.cpp
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/common/transporter/TransporterRegistry.cpp
      storage/ndb/src/common/util/BaseString.cpp
      storage/ndb/src/common/util/ConfigValues.cpp
      storage/ndb/src/common/util/File.cpp
      storage/ndb/src/common/util/InputStream.cpp
      storage/ndb/src/common/util/NdbSqlUtil.cpp
      storage/ndb/src/common/util/Parser.cpp
      storage/ndb/src/common/util/Properties.cpp
      storage/ndb/src/common/util/socket_io.cpp
      storage/ndb/src/cw/cpcd/APIService.cpp
      storage/ndb/src/cw/cpcd/CPCD.cpp
      storage/ndb/src/cw/cpcd/Monitor.cpp
      storage/ndb/src/cw/cpcd/Process.cpp
      storage/ndb/src/kernel/blocks/backup/read.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp
      storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp
      storage/ndb/src/kernel/error/ErrorReporter.cpp
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmclient/CommandInterpreter.cpp
      storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/src/ndbapi/NdbBlob.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/test/ndbapi/ScanFunctions.hpp
      storage/ndb/test/src/DbUtil.cpp
      storage/ndb/test/src/HugoQueries.cpp
      storage/ndb/test/src/HugoQueryBuilder.cpp
      storage/ndb/test/src/NDBT_Test.cpp
      storage/ndb/test/src/NdbBackup.cpp
      storage/ndb/test/src/NdbRestarter.cpp
      storage/ndb/test/src/getarg.c
      storage/ndb/test/tools/cpcc.cpp
      storage/ndb/tools/restore/consumer_restore.cpp
      storage/ndb/tools/waiter.cpp
 4616 jonas oreland	2011-10-21 [merge]
      ndb - merge 63 to 70

    modified:
      config/ac-macros/misc.m4
 4615 jonas oreland	2011-10-20
      ndb - warnings stab 3

    modified:
      sql/ha_ndbcluster.cc
      storage/ndb/src/common/util/ndbzio.c
      storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp
      storage/ndb/tools/ndb_dump_frm_data.cpp
 4614 jonas oreland	2011-10-20
      ndb stab 2 at new warnings

    modified:
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbinfo.cc
      storage/ndb/src/common/portlib/ndb_daemon.cc
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
 4613 Jonas Oreland	2011-10-20
      ndb - first stab at windows warning

    modified:
      sql/ha_ndbcluster.cc
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
      storage/ndb/src/kernel/error/ndbd_exit_codes.c
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/mgmapi/ndb_logevent.cpp
      storage/ndb/src/mgmsrv/Defragger.hpp
      storage/ndb/src/ndbapi/ndberror.c
 4612 Jonas Oreland	2011-10-20
      ndb - fix compiler warning in Dbspj

    modified:
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
 4611 Jan Wedvik	2011-10-20
      This commit concerns SPJ (i.e. pushed queries).
      
      The commit fixes two errors that may happend when there is a pruned child
      scan operation:
      1. The api did not set SIP_PRUNE_PARAMS as it should.
      2. There was an error in Dbspj::parseScanIndex().  When it called 
      Dbspj::expand() to process prune keys, it used to start reading
      query parameters from after whatever parseDA() had consumed. Now it starts 
      from the beginning of the query parameters (for that operation). In other 
      words, the parameter values are only sent once, and these values are used for 
      building both the scan bounds and the prune key. 

    modified:
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
 4610 Frazer Clement	2011-10-20
      Bug#13117187 NDB : BAD CONNECT_REP STATE ASSERTION
      
      Initial patch to clean up existing code.
      Reuse of nodePtr var for running and connecting node removed.
      Running node in ZAPI_ states case branches turned into errors.
      
      Further work required to handle case of connectedNode in ZPREPARE_FAIL
      (Waiting for president to commit failure handling), and potentially other
      connectedNode states.

    modified:
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
 4609 Martin Skold	2011-10-20
      Bug #11755904  47742: EXTRA-COLUMNS ON THE SLAVE DO NOT WORK WITH NDB AS EXPECTED: Added test rpl_ndb_not_null

    added:
      mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result
      mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test
 4608 Jonas Oreland	2011-10-20
      ndb - always allow thread to see own transids

    modified:
      sql/ha_ndbcluster_connection.cc
 4607 Jonas Oreland	2011-10-20
      ndb - handle temporary error in testDict -n Bug57057. Seen once in autotest...why oh why can't we used the hugo-things..which has retries build in

    modified:
      storage/ndb/test/ndbapi/testDict.cpp
 4606 Jonas Oreland	2011-10-20
      ndb - don't suppress integral conversion warnings for ndb (and expect 50-100 new warnings on windows :)

    modified:
      support-files/compiler_warnings.supp
 4605 Jonas Oreland	2011-10-20
      ndb - add handling of LD_LIBRARY_PATH to atrt (needed for 5.5)

    modified:
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/files.cpp
      storage/ndb/test/run-test/main.cpp
 4604 jonas oreland	2011-10-18
      ndb - unbreak funcs_1 too...

    modified:
      mysql-test/suite/funcs_1/r/is_columns_is.result
      mysql-test/suite/funcs_1/r/is_tables_is.result
      mysql-test/suite/funcs_1/t/is_columns_is.test
      mysql-test/suite/funcs_1/t/is_tables_is.test
 4603 Mauritz Sundell	2011-10-17 [merge]
      ndb - merge others pulls

    added:
      mysql-test/include/not_ndb_is.inc
    modified:
      mysql-test/r/information_schema.result
      mysql-test/r/information_schema_db.result
      mysql-test/t/information_schema.test
      mysql-test/t/information_schema_db.test
      mysql-test/t/mysqlshow.test
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
 4602 Mauritz Sundell	2011-10-17 [merge]
      merge due to forgetting pull before commit...

    modified:
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_connection.cc
      sql/ha_ndbinfo.cc
      sql/ha_ndbinfo.h
      storage/ndb/include/ndbapi/Ndb.hpp
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/NdbImpl.hpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      tests/mysql_client_test.c
 4601 Mauritz Sundell	2011-10-17
      ndb - make ndbcntr pass filegroup id and version instead of looking it up in dbdict
      
      Ndbcntr did not set filegroup id and version while creating some DDO,
      and let dbdict look it up from newly created filegroups. Now ndbdcntr
      pass filegroup id and version and dbdict does not try to look it up if
      missing.

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
 4600 magnus.blaudd@stripped	2011-10-17 [merge]
      Merge

    modified:
      sql/sql_parse.cc
      tests/mysql_client_test.c
 4599 jonas oreland	2011-10-16
      ndb - test/conf for daily-perf...all wishful thinking for now

    added:
      storage/ndb/test/run-test/conf-daily-perf.cnf
      storage/ndb/test/run-test/daily-perf-tests.txt
 4598 Frazer Clement	2011-10-14
      Bug #13087016 - NDBAPI : SENDFRAGMENTED BOUNDARY CASES 
      
      Fix boundary cases in NdbApi fragmented signal send code.
      
      Add test coverage.

    modified:
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/test/ndbapi/Makefile.am
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 4597 Jonas Oreland	2011-10-13
      ndb - forgot to update result file for ndbinfo after improving table comments

    modified:
      mysql-test/suite/ndb/r/ndbinfo.result
 4596 Jonas Oreland	2011-10-13
      ndb - ndb - review comment move ndbinfo_send_row out of ndbinfo_write_X

    modified:
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
 4595 Mauritz Sundell	2011-10-13
      ndb - move helper methods from entry class to super class for DLHashTable
      
        To enable multiple hash tables on same object the helper methods
        equal() and hashValue() need to be different not only the linked
        list members (nextHash, prevHash).
      
        Moving the methods from entry class, T, to superclass, U. All used
        instances of DLHashTable used U=T so no change in function is
        expected.

    modified:
      storage/ndb/src/kernel/vm/DLHashTable.hpp
 4594 Jonas Oreland	2011-10-13
      ndb - improve column comments on new ndb$info tables

    modified:
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
 4593 Pekka Nousiainen	2011-10-13 [merge]
      merge bug32040 to 7.0

    modified:
      storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
 4592 Jonas Oreland	2011-10-12
      ndb - fix bug in arena-ification of dbdict

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
 4591 Jonas Oreland	2011-10-12
      ndb - add some new utility methods to Hugo

    modified:
      storage/ndb/test/include/HugoCalculator.hpp
      storage/ndb/test/include/HugoOperations.hpp
      storage/ndb/test/src/HugoOperations.cpp
 4590 Pekka Nousiainen	2011-10-11 [merge]
      merge 7.0 to wl4124

    added:
      mysql-test/suite/ndb_big/bug37983-master.opt
      mysql-test/suite/ndb_big/disabled.def
    modified:
      mysql-test/mysql-test-run.pl
      mysql-test/suite/ndb/r/ndbinfo.result
      storage/ndb/include/ndb_constants.h
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/vm/Ndbinfo.hpp
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
=== modified file 'config/ac-macros/misc.m4'
--- a/config/ac-macros/misc.m4	2009-11-04 23:08:21 +0000
+++ b/config/ac-macros/misc.m4	2011-10-21 08:30:40 +0000
@@ -460,22 +460,23 @@ AC_DEFUN([MYSQL_STACK_DIRECTION],
 #if defined(__HP_cc) || defined (__HP_aCC) || defined (__hpux)
 #pragma noinline
 #endif
- int find_stack_direction ()
- {
-   static char *addr = 0;
-   auto char dummy;
-   if (addr == 0)
-     {
-       addr = &dummy;
-       return find_stack_direction ();
-     }
-   else
-     return (&dummy > addr) ? 1 : -1;
- }
- int main ()
- {
-   exit (find_stack_direction() < 0);
- }], ac_cv_c_stack_direction=1, ac_cv_c_stack_direction=-1,
+  /* Check stack direction (0-down, 1-up) */
+  int f(int *a)
+  {
+    int b;
+    return(&b > a)?1:-1;
+  }
+  /*
+   Prevent compiler optimizations by calling function 
+   through pointer.
+  */
+  volatile int (*ptr_f)(int *) = f;
+  int main()
+  {
+    int a;
+    exit(ptr_f(&a) < 0);
+  }
+  ], ac_cv_c_stack_direction=1, ac_cv_c_stack_direction=-1,
    ac_cv_c_stack_direction=)])
  AC_DEFINE_UNQUOTED(STACK_DIRECTION, $ac_cv_c_stack_direction)
 ])dnl

=== added file 'mysql-test/include/not_ndb_is.inc'
--- a/mysql-test/include/not_ndb_is.inc	1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/not_ndb_is.inc	2011-10-17 14:16:56 +0000
@@ -0,0 +1,27 @@
+#
+# Check if cluster is available by selecting from is.engines
+# if an error about no such table occurs bail out
+#
+
+disable_result_log;
+disable_query_log;
+
+--error 0, 1109
+select @have_ndb_is:= count(*) from information_schema.plugins
+where plugin_name like '%ndb%'
+  and PLUGIN_TYPE = 'INFORMATION SCHEMA';
+
+
+if ($mysql_errno){
+  # For backward compatibility, implement old fashioned way
+  # to check here ie. use SHOW VARIABLES LIKE "have_ndb"
+  die Can not determine if server supports ndb without is.engines table;
+}
+
+
+if (`select @have_ndb_is`){
+  skip NDB information schema table installed;
+}
+
+enable_query_log;
+enable_result_log;

=== modified file 'mysql-test/r/information_schema.result'
--- a/mysql-test/r/information_schema.result	2011-03-29 14:09:05 +0000
+++ b/mysql-test/r/information_schema.result	2011-10-17 14:16:56 +0000
@@ -39,8 +39,7 @@ insert into t5 values (10);
 create view v1 (c) as
 SELECT table_name FROM information_schema.TABLES
 WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
-table_name<>'ndb_binlog_index' AND
-table_name<>'ndb_apply_status';
+table_name not like 'ndb%';
 select * from v1;
 c
 CHARACTER_SETS
@@ -850,7 +849,7 @@ VIEWS	TABLE_NAME	select
 delete from mysql.user where user='mysqltest_4';
 delete from mysql.db where user='mysqltest_4';
 flush privileges;
-SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
+SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name not like 'ndb%' GROUP BY TABLE_SCHEMA;
 table_schema	count(*)
 information_schema	28
 mysql	22
@@ -1230,7 +1229,8 @@ INNER JOIN
 information_schema.columns c1
 ON t.table_schema = c1.table_schema AND
 t.table_name = c1.table_name
-WHERE t.table_schema = 'information_schema' AND
+WHERE t.table_name not like 'ndb%' AND
+t.table_schema = 'information_schema' AND
 c1.ordinal_position =
 ( SELECT COALESCE(MIN(c2.ordinal_position),1)
 FROM information_schema.columns c2
@@ -1273,7 +1273,8 @@ INNER JOIN
 information_schema.columns c1
 ON t.table_schema = c1.table_schema AND
 t.table_name = c1.table_name
-WHERE t.table_schema = 'information_schema' AND
+WHERE t.table_name not like 'ndb%' AND
+t.table_schema = 'information_schema' AND
 c1.ordinal_position =
 ( SELECT COALESCE(MIN(c2.ordinal_position),1)
 FROM information_schema.columns c2
@@ -1365,7 +1366,8 @@ count(*) as num1
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+t.table_schema = 'information_schema' and
 c1.ordinal_position =
 (select isnull(c2.column_type) -
 isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/r/information_schema_db.result'
--- a/mysql-test/r/information_schema_db.result	2010-11-30 17:51:25 +0000
+++ b/mysql-test/r/information_schema_db.result	2011-10-17 14:16:56 +0000
@@ -3,7 +3,7 @@ drop view if exists v1,v2;
 drop function if exists f1;
 drop function if exists f2;
 use INFORMATION_SCHEMA;
-show tables;
+show tables where Tables_in_information_schema NOT LIKE 'ndb%';
 Tables_in_information_schema
 CHARACTER_SETS
 COLLATIONS

=== modified file 'mysql-test/suite/funcs_1/r/is_columns_is.result'
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result	2010-10-06 10:06:47 +0000
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result	2011-10-18 07:22:32 +0000
@@ -1,6 +1,6 @@
 SELECT * FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema, table_name, column_name;
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	ORDINAL_POSITION	COLUMN_DEFAULT	IS_NULLABLE	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	NUMERIC_PRECISION	NUMERIC_SCALE	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE	COLUMN_KEY	EXTRA	PRIVILEGES	COLUMN_COMMENT	STORAGE	FORMAT
 NULL	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	1		NO	varchar	32	96	NULL	NULL	utf8	utf8_general_ci	varchar(32)			select		Default	Default
@@ -312,7 +312,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH = 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -324,7 +324,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH <> 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -336,7 +336,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH IS NULL
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -357,7 +357,7 @@ COLLATION_NAME,
 COLUMN_TYPE
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY TABLE_SCHEMA, TABLE_NAME, ORDINAL_POSITION;
 COL_CML	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE
 3.0000	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	varchar	32	96	utf8	utf8_general_ci	varchar(32)

=== modified file 'mysql-test/suite/funcs_1/r/is_tables_is.result'
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result	2008-06-18 17:23:55 +0000
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result	2011-10-18 07:22:32 +0000
@@ -11,7 +11,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	NULL
 TABLE_SCHEMA	information_schema
@@ -649,7 +649,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	NULL
 TABLE_SCHEMA	information_schema

=== modified file 'mysql-test/suite/funcs_1/t/is_columns_is.test'
--- a/mysql-test/suite/funcs_1/t/is_columns_is.test	2008-06-16 18:39:58 +0000
+++ b/mysql-test/suite/funcs_1/t/is_columns_is.test	2011-10-18 07:22:32 +0000
@@ -18,5 +18,5 @@
 --source include/not_embedded.inc
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling';
+AND table_name <> 'profiling' and table_name not like 'ndb%';
 --source suite/funcs_1/datadict/columns.inc

=== modified file 'mysql-test/suite/funcs_1/t/is_tables_is.test'
--- a/mysql-test/suite/funcs_1/t/is_tables_is.test	2008-03-07 16:33:07 +0000
+++ b/mysql-test/suite/funcs_1/t/is_tables_is.test	2011-10-18 07:22:32 +0000
@@ -13,6 +13,6 @@
 #
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling';
+AND table_name <> 'profiling' and table_name not like 'ndb%';
 --source suite/funcs_1/datadict/tables1.inc
 

=== modified file 'mysql-test/suite/ndb/r/ndbinfo.result'
--- a/mysql-test/suite/ndb/r/ndbinfo.result	2011-10-11 08:11:15 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo.result	2011-10-28 09:56:57 +0000
@@ -38,7 +38,7 @@ table_id	table_name	comment
 9	nodes	node status
 10	diskpagebuffer	disk page buffer info
 11	threadblocks	which blocks are run in which threads
-12	threadstat	threadstat
+12	threadstat	Statistics on execution threads
 13	transactions	transactions
 14	operations	operations
 SELECT COUNT(*) FROM ndb$tables;
@@ -55,7 +55,7 @@ table_id	table_name	comment
 9	nodes	node status
 10	diskpagebuffer	disk page buffer info
 11	threadblocks	which blocks are run in which threads
-12	threadstat	threadstat
+12	threadstat	Statistics on execution threads
 13	transactions	transactions
 14	operations	operations
 SELECT * FROM ndb$tables WHERE table_name = 'LOGDESTINATION';
@@ -318,6 +318,147 @@ node_id
 1
 2
 
+desc threadblocks;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+thr_no	int(10) unsigned	YES		NULL	
+block_name	varchar(512)	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+select distinct block_name from threadblocks order by 1;
+block_name
+BACKUP
+CMVMI
+DBACC
+DBDICT
+DBDIH
+DBINFO
+DBLQH
+DBSPJ
+DBTC
+DBTUP
+DBTUX
+DBUTIL
+LGMAN
+NDBCNTR
+NDBFS
+PGMAN
+QMGR
+RESTORE
+SUMA
+THRMAN
+TRIX
+TSMAN
+desc threadstat;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+thr_no	int(10) unsigned	YES		NULL	
+thr_nm	varchar(512)	YES		NULL	
+c_loop	bigint(20) unsigned	YES		NULL	
+c_exec	bigint(20) unsigned	YES		NULL	
+c_wait	bigint(20) unsigned	YES		NULL	
+c_l_sent_prioa	bigint(20) unsigned	YES		NULL	
+c_l_sent_priob	bigint(20) unsigned	YES		NULL	
+c_r_sent_prioa	bigint(20) unsigned	YES		NULL	
+c_r_sent_priob	bigint(20) unsigned	YES		NULL	
+os_tid	bigint(20) unsigned	YES		NULL	
+os_now	bigint(20) unsigned	YES		NULL	
+os_ru_utime	bigint(20) unsigned	YES		NULL	
+os_ru_stime	bigint(20) unsigned	YES		NULL	
+os_ru_minflt	bigint(20) unsigned	YES		NULL	
+os_ru_majflt	bigint(20) unsigned	YES		NULL	
+os_ru_nvcsw	bigint(20) unsigned	YES		NULL	
+os_ru_nivcsw	bigint(20) unsigned	YES		NULL	
+select count(*) > 0 block_name from threadstat;
+block_name
+1
+
+desc cluster_transactions;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+state	varchar(256)	YES		NULL	
+count_operations	int(10) unsigned	YES		NULL	
+outstanding_operations	int(10) unsigned	YES		NULL	
+inactive_seconds	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+desc server_transactions;
+Field	Type	Null	Key	Default	Extra
+mysql_connection_id	bigint(21) unsigned	NO		0	
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+state	varchar(256)	YES		NULL	
+count_operations	int(10) unsigned	YES		NULL	
+outstanding_operations	int(10) unsigned	YES		NULL	
+inactive_seconds	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+desc cluster_operations;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+operation_type	varchar(9)	YES		NULL	
+state	varchar(256)	YES		NULL	
+tableid	int(10) unsigned	YES		NULL	
+fragmentid	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+tc_node_id	bigint(21) unsigned	YES		NULL	
+tc_block_no	bigint(21) unsigned	YES		NULL	
+tc_block_instance	bigint(21) unsigned	YES		NULL	
+desc server_operations;
+Field	Type	Null	Key	Default	Extra
+mysql_connection_id	bigint(21) unsigned	NO		0	
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+operation_type	varchar(9)	YES		NULL	
+state	varchar(256)	YES		NULL	
+tableid	int(10) unsigned	YES		NULL	
+fragmentid	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+tc_node_id	bigint(21) unsigned	YES		NULL	
+tc_block_no	bigint(21) unsigned	YES		NULL	
+tc_block_instance	bigint(21) unsigned	YES		NULL	
+
+create table t1 (a int primary key) engine = ndb;
+begin;
+insert into t1 values (1);
+select state, count_operations, outstanding_operations,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_transactions;
+state	count_operations	outstanding_operations	client_node_id
+Started	1	0	<client_node_id>
+select node_id, operation_type, state,
+IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_operations
+order by 1;
+node_id	operation_type	state	tc_node_id	client_node_id
+1	INSERT	Prepared	<tc_node_id>	<client_node_id>
+2	INSERT	Prepared	<tc_node_id>	<client_node_id>
+
+select st.state, st.count_operations, st.outstanding_operations,
+       so.node_id, so.state, so.operation_type
+from server_transactions st,
+     server_operations so
+where st.transid = so.transid
+  and so.tc_node_id = st.node_id
+  and so.tc_block_instance = st.block_instance
+  and so.client_node_id = st.client_node_id
+  and so.client_block_ref = st.client_block_ref;
+state	count_operations	outstanding_operations	node_id	state	operation_type
+Started	1	0	1	Prepared	INSERT
+Started	1	0	2	Prepared	INSERT
+rollback;
+drop table t1;
+
 set @@global.ndbinfo_offline=TRUE;
 select @@ndbinfo_offline;
 @@ndbinfo_offline

=== modified file 'mysql-test/suite/ndb/t/ndbinfo.test'
--- a/mysql-test/suite/ndb/t/ndbinfo.test	2011-05-23 13:45:57 +0000
+++ b/mysql-test/suite/ndb/t/ndbinfo.test	2011-10-28 09:56:57 +0000
@@ -201,6 +201,44 @@ set @@ndbinfo_offline=1;
 let $q1 = SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
 eval $q1;
 
+# new views
+desc threadblocks;
+select distinct block_name from threadblocks order by 1;
+desc threadstat;
+select count(*) > 0 block_name from threadstat;
+
+desc cluster_transactions;
+desc server_transactions;
+desc cluster_operations;
+desc server_operations;
+
+create table t1 (a int primary key) engine = ndb;
+begin;
+insert into t1 values (1);
+select state, count_operations, outstanding_operations,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_transactions;
+select node_id, operation_type, state,
+IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_operations
+order by 1;
+
+--sorted_result
+select st.state, st.count_operations, st.outstanding_operations,
+       so.node_id, so.state, so.operation_type
+from server_transactions st,
+     server_operations so
+where st.transid = so.transid
+  and so.tc_node_id = st.node_id
+  and so.tc_block_instance = st.block_instance
+  and so.client_node_id = st.client_node_id
+  and so.client_block_ref = st.client_block_ref; 
+rollback;
+drop table t1;
+
 # Turn on ndbinfo_offline
 set @@global.ndbinfo_offline=TRUE;
 select @@ndbinfo_offline;

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	2011-10-20 12:31:31 +0000
@@ -0,0 +1,196 @@
+include/master-slave.inc
+[connection master]
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT '0000-00-00',
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT '0000-00-00',
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, 1);
+INSERT INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, 4);
+INSERT INTO t1(a) VALUES (5);
+INSERT INTO t2(a,b) VALUES (1, null);
+INSERT INTO t2(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t2(a) VALUES (3);
+INSERT INTO t3(a,b) VALUES (1, null);
+INSERT INTO t3(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t3(a) VALUES (3);
+INSERT INTO t4(a,b,c) VALUES (1, null, 1);
+INSERT INTO t4(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t4(a,b) VALUES (3, null);
+INSERT INTO t4(a,c) VALUES (4, 4);
+INSERT INTO t4(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+include/diff_tables.inc [master:t2, slave:t2]
+TABLES t2 and t3 must be different.
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	NULL	500
+2	1111-11-11	500
+3	NULL	500
+SELECT * FROM t4 ORDER BY a;
+a	b	c
+1	NULL	1
+2	1111-11-11	2
+3	NULL	NULL
+4	NULL	4
+5	NULL	NULL
+SELECT * FROM t4 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+4	NULL
+5	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,'1111-11-11', 1);
+REPLACE INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+UPDATE t1 set b= NULL, c= 300 where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, 300);
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+************* CLEANING *************
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT NULL, `c` BIT DEFAULT NULL, 
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT b'01', `c` BIT DEFAULT NULL,
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, b'01');
+INSERT INTO t1(a,b,c) VALUES (2,b'00', b'01');
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, b'01');
+INSERT INTO t1(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be different.
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,b'00', b'01');
+REPLACE INTO t1(a,b,c) VALUES (2,b'00',b'01');
+UPDATE t1 set b= NULL, c= b'00' where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, b'00');
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+DROP TABLE t1;
+################################################################################
+#                       NULL ---> NOT NULL (STRICT MODE)
+#                    UNCOMMENT THIS AFTER FIXING BUG#43992
+################################################################################
+################################################################################
+#                       NULL ---> NOT NULL (NON-STRICT MODE)
+################################################################################
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT NOT NULL, 
+`c` INT NOT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a) VALUES (1);
+INSERT INTO t1(a, b) VALUES (2, NULL);
+INSERT INTO t1(a, b) VALUES (3, 1);
+INSERT INTO t2(a) VALUES (1);
+INSERT INTO t2(a, b) VALUES (2, NULL);
+INSERT INTO t2(a, b) VALUES (3, 1);
+INSERT INTO t3(a) VALUES (1);
+INSERT INTO t3(a, b) VALUES (2, NULL);
+INSERT INTO t3(a, b) VALUES (3, 1);
+INSERT INTO t3(a, b) VALUES (4, 1);
+REPLACE INTO t3(a, b) VALUES (5, null);
+REPLACE INTO t3(a, b) VALUES (3, null);
+UPDATE t3 SET b = NULL where a = 4;
+************* SHOWING THE RESULT SETS *************
+SELECT * FROM t1 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t1 ORDER BY a;
+a	b	c
+SELECT * FROM t2 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t2 ORDER BY a;
+a	b	c
+1	0	NULL
+2	0	NULL
+3	1	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	NULL
+4	NULL
+5	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	0	500
+2	0	500
+3	0	500
+4	0	500
+5	0	500
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	2011-10-20 12:31:31 +0000
@@ -0,0 +1,8 @@
+-- source include/have_binlog_format_row.inc
+-- source include/have_ndb.inc
+-- source include/master-slave.inc
+
+let $engine = 'NDB';
+-- source extra/rpl_tests/rpl_not_null.test
+
+--source include/rpl_end.inc

=== modified file 'mysql-test/t/information_schema.test'
--- a/mysql-test/t/information_schema.test	2010-06-23 16:25:31 +0000
+++ b/mysql-test/t/information_schema.test	2011-10-17 14:16:56 +0000
@@ -44,8 +44,7 @@ insert into t5 values (10);
 create view v1 (c) as
  SELECT table_name FROM information_schema.TABLES
   WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
-        table_name<>'ndb_binlog_index' AND
-        table_name<>'ndb_apply_status';
+        table_name not like 'ndb%';
 select * from v1;
 
 select c,table_name from v1
@@ -539,7 +538,7 @@ flush privileges;
 # Bug#9404 information_schema: Weird error messages
 # with SELECT SUM() ... GROUP BY queries
 #
-SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
+SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name not like 'ndb%' GROUP BY TABLE_SCHEMA;
 
 
 #
@@ -921,7 +920,8 @@ SELECT t.table_name, c1.column_name
        information_schema.columns c1
        ON t.table_schema = c1.table_schema AND
           t.table_name = c1.table_name
-  WHERE t.table_schema = 'information_schema' AND
+  WHERE t.table_name not like 'ndb%' AND
+        t.table_schema = 'information_schema' AND
         c1.ordinal_position =
         ( SELECT COALESCE(MIN(c2.ordinal_position),1)
             FROM information_schema.columns c2
@@ -935,7 +935,8 @@ SELECT t.table_name, c1.column_name
        information_schema.columns c1
        ON t.table_schema = c1.table_schema AND
           t.table_name = c1.table_name
-  WHERE t.table_schema = 'information_schema' AND
+  WHERE t.table_name not like 'ndb%' AND
+        t.table_schema = 'information_schema' AND
         c1.ordinal_position =
         ( SELECT COALESCE(MIN(c2.ordinal_position),1)
             FROM information_schema.columns c2
@@ -1032,7 +1033,8 @@ select t.table_name, group_concat(t.tabl
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+      t.table_schema = 'information_schema' and
         c1.ordinal_position =
         (select isnull(c2.column_type) -
          isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/t/information_schema_db.test'
--- a/mysql-test/t/information_schema_db.test	2009-09-28 11:25:47 +0000
+++ b/mysql-test/t/information_schema_db.test	2011-10-17 14:16:56 +0000
@@ -13,7 +13,7 @@ drop function if exists f2;
 
 use INFORMATION_SCHEMA;
 --replace_result Tables_in_INFORMATION_SCHEMA Tables_in_information_schema
-show tables;
+show tables where Tables_in_INFORMATION_SCHEMA NOT LIKE 'ndb%';
 --replace_result 'Tables_in_INFORMATION_SCHEMA (T%)' 'Tables_in_information_schema (T%)'
 show tables from INFORMATION_SCHEMA like 'T%';
 create database `inf%`;

=== modified file 'mysql-test/t/mysqlshow.test'
--- a/mysql-test/t/mysqlshow.test	2006-07-22 03:29:25 +0000
+++ b/mysql-test/t/mysqlshow.test	2011-10-17 14:16:56 +0000
@@ -1,6 +1,9 @@
 # Can't run test of external client with embedded server
 -- source include/not_embedded.inc
 
+# Test lists tables in Information_schema, and ndb adds some
+-- source include/not_ndb_is.inc
+
 --disable_warnings
 DROP TABLE IF EXISTS t1,t2,test1,test2;
 --enable_warnings

=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-10-08 16:56:43 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-10-20 16:18:28 +0000
@@ -227,7 +227,7 @@ ndb_index_stat_opt2str(const Ndb_index_s
     const Ndb_index_stat_opt::Val& v= opt.val[i];
     ptr+= strlen(ptr);
     const char* sep= (ptr == buf ? "" : ",");
-    const uint sz= ptr < end ? end - ptr : 0;
+    const uint sz= ptr < end ? (uint)(end - ptr) : 0;
 
     switch (v.unit) {
     case Ndb_index_stat_opt::Ubool:
@@ -1984,7 +1984,7 @@ ndb_index_stat_thread_func(void *arg __a
   }
 
   /* Get thd_ndb for this thread */
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     pthread_mutex_lock(&LOCK_ndb_index_stat_thread);

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-10-08 16:54:19 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-22 09:38:48 +0000
@@ -1407,8 +1407,7 @@ int ha_ndbcluster::ndb_err(NdbTransactio
       {
         const NDBINDEX *unique_index=
           (const NDBINDEX *) m_index[i].unique_index;
-        if (unique_index &&
-            (char *) unique_index->getObjectId() == error_data)
+        if (unique_index && UintPtr(unique_index->getObjectId()) == UintPtr(error_data))
         {
           dupkey= i;
           break;
@@ -1451,7 +1450,7 @@ bool ha_ndbcluster::get_error_message(in
 
   const NdbError err= ndb->getNdbError(error);
   bool temporary= err.status==NdbError::TemporaryError;
-  buf->set(err.message, strlen(err.message), &my_charset_bin);
+  buf->set(err.message, (uint32)strlen(err.message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
   DBUG_RETURN(temporary);
 }
@@ -2003,7 +2002,7 @@ void ha_ndbcluster::release_blobs_buffer
 */
 
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length)
+            size_t pack_length)
 {
   DBUG_ENTER("cmp_frm");
   /*
@@ -3600,7 +3599,7 @@ count_key_columns(const KEY *key_info, c
       break;
     length+= key_part->store_length;
   }
-  return key_part - first_key_part;
+  return (uint)(key_part - first_key_part);
 }
 
 /* Helper method to compute NDB index bounds. Note: does not set range_no. */
@@ -6117,7 +6116,7 @@ int ha_ndbcluster::ndb_update_row(const
   uint blob_count= 0;
   if (uses_blob_value(table->write_set))
   {
-    int row_offset= new_data - table->record[0];
+    int row_offset= (int)(new_data - table->record[0]);
     int res= set_blob_values(op, row_offset, table->write_set, &blob_count,
                              (batch_allowed && !need_flush));
     if (res != 0)
@@ -7792,7 +7791,7 @@ static int ndbcluster_update_apply_statu
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,
-                   group_master_log_name, strlen(group_master_log_name));
+                   group_master_log_name, (int)strlen(group_master_log_name));
   r|= op->setValue(2u, tmp_buf);
   DBUG_ASSERT(r == 0);
   // start_pos
@@ -9382,7 +9381,7 @@ void ha_ndbcluster::update_create_info(H
            goto err;
 	 const char *tablespace= ts.getName();
          DBUG_PRINT("info", ("Found tablespace '%s'", tablespace));
-         uint tablespace_len= strlen(tablespace);
+         uint tablespace_len= (uint)strlen(tablespace);
          if (tablespace_len != 0) 
          {
            share->tablespace= (char *) alloc_root(&share->mem_root,
@@ -9551,7 +9550,7 @@ int ha_ndbcluster::create(const char *na
     */
     if ((my_errno= write_ndb_file(name)))
       DBUG_RETURN(my_errno);
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, FALSE);
     DBUG_RETURN(my_errno);
   }
@@ -10463,7 +10462,7 @@ int ha_ndbcluster::rename_table(const ch
       this is a "real" rename table, i.e. not tied to an offline alter table
       - send new name == "to" in query field
     */
-    ndbcluster_log_schema_op(thd, to, strlen(to),
+    ndbcluster_log_schema_op(thd, to, (int)strlen(to),
                              old_dbname, m_tabname,
                              ndb_table_id, ndb_table_version,
                              SOT_RENAME_TABLE_PREPARE,
@@ -11127,7 +11126,7 @@ int ha_ndbcluster::open(const char *name
                             name);
     }
     Ndb* ndb= check_ndb_in_thd(thd);
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, FALSE);
     if ((m_share=get_share(name, table, FALSE)) == 0)
     {
@@ -11419,7 +11418,8 @@ int ha_ndbcluster::close(void)
   wait on condition for a Ndb object to be released.
   - Alt.2 Seize/release from pool, wait until next release 
 */
-Thd_ndb* ha_ndbcluster::seize_thd_ndb()
+Thd_ndb*
+ha_ndbcluster::seize_thd_ndb(THD * thd)
 {
   Thd_ndb *thd_ndb;
   DBUG_ENTER("seize_thd_ndb");
@@ -11442,6 +11442,10 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb()
     delete thd_ndb;
     thd_ndb= NULL;
   }
+  else
+  {
+    thd_ndb->ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(thd_ndb);
 }
 
@@ -11478,7 +11482,10 @@ bool Thd_ndb::recycle_ndb(THD* thd)
                          ndb->getNdbError().message));
     DBUG_RETURN(false);
   }
-
+  else
+  {
+   ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(true);
 }
 
@@ -11516,7 +11523,7 @@ Ndb* check_ndb_in_thd(THD* thd, bool val
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   if (!thd_ndb)
   {
-    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
       return NULL;
     set_thd_ndb(thd, thd_ndb);
   }
@@ -11775,7 +11782,7 @@ int ndbcluster_drop_database_impl(THD *t
   List_iterator_fast<char> it(drop_list);
   while ((tabname=it++))
   {
-    tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
+    tablename_to_filename(tabname, tmp, (uint)(FN_REFLEN - (tmp - full_path)-1));
     mysql_mutex_lock(&LOCK_open);
     if (ha_ndbcluster::drop_table(thd, 0, ndb, full_path, dbname, tabname))
     {
@@ -11917,7 +11924,7 @@ int ndbcluster_find_all_files(THD *thd)
       }
       /* finalize construction of path */
       end+= tablename_to_filename(elmt.name, end,
-                                  sizeof(key)-(end-key));
+                                  (uint)(sizeof(key)-(end-key)));
       uchar *data= 0, *pack_data= 0;
       size_t length, pack_length;
       int discover= 0;
@@ -11966,7 +11973,7 @@ int ndbcluster_find_all_files(THD *thd)
       else
       {
         /* set up replication for this table */
-        ndbcluster_create_binlog_setup(thd, ndb, key, end-key,
+        ndbcluster_create_binlog_setup(thd, ndb, key, (uint)(end-key),
                                        elmt.database, elmt.name,
                                        TRUE);
       }
@@ -12138,9 +12145,9 @@ ndbcluster_find_files(handlerton *hton,
     {
       file_name_str= (char*)my_hash_element(&ok_tables, i);
       end= end1 +
-        tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name));
+        tablename_to_filename(file_name_str, end1, (uint)(sizeof(name) - (end1 - name)));
       mysql_mutex_lock(&LOCK_open);
-      ndbcluster_create_binlog_setup(thd, ndb, name, end-name,
+      ndbcluster_create_binlog_setup(thd, ndb, name, (uint)(end-name),
                                      db, file_name_str, TRUE);
       mysql_mutex_unlock(&LOCK_open);
     }
@@ -12205,7 +12212,7 @@ ndbcluster_find_files(handlerton *hton,
     {
       LEX_STRING *tmp_file_name= 0;
       tmp_file_name= thd->make_lex_string(tmp_file_name, file_name_str,
-                                          strlen(file_name_str), TRUE);
+                                          (uint)strlen(file_name_str), TRUE);
       files->push_back(tmp_file_name); 
     }
   }
@@ -12612,7 +12619,7 @@ void ha_ndbcluster::set_dbname(const cha
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, name_len);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, dbname, sizeof(tmp_buff) - 1);
@@ -12644,7 +12651,7 @@ ha_ndbcluster::set_tabname(const char *p
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, end - ptr);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, tabname, sizeof(tmp_buff) - 1);
@@ -13419,7 +13426,7 @@ int handle_trailing_share(THD *thd, NDB_
       share->key_length= min_key_length;
     }
     share->key_length=
-      my_snprintf(share->key, min_key_length + 1, "#leak%lu",
+      (uint)my_snprintf(share->key, min_key_length + 1, "#leak%lu",
                   trailing_share_id++);
   }
   /* Keep it for possible the future trailing free */
@@ -14622,7 +14629,7 @@ ha_ndbcluster::read_multi_range_next(KEY
           need to process all index scan ranges together.
         */
         if (!multi_range_sorted ||
-            (expected_range_no= multi_range_curr - m_multi_ranges)
+            (expected_range_no= (int)(multi_range_curr - m_multi_ranges))
                 == current_range_no)
         {
           *multi_range_found_p= m_multi_ranges + current_range_no;
@@ -14671,7 +14678,7 @@ ha_ndbcluster::read_multi_range_next(KEY
   */
   DBUG_RETURN(read_multi_range_first(multi_range_found_p, 
                                      multi_range_curr,
-                                     multi_range_end - multi_range_curr, 
+                                     (uint)(multi_range_end - multi_range_curr), 
                                      multi_range_sorted,
                                      multi_range_buffer));
 }
@@ -14730,7 +14737,7 @@ ha_ndbcluster::update_table_comment(
         const char*     comment)/* in:  table comment defined by user */
 {
   THD *thd= current_thd;
-  uint length= strlen(comment);
+  uint length= (uint)strlen(comment);
   if (length > 64000 - 3)
   {
     return((char*)comment); /* string too long */
@@ -14751,7 +14758,7 @@ ha_ndbcluster::update_table_comment(
 
   char *str;
   const char *fmt="%s%snumber_of_replicas: %d";
-  const unsigned fmt_len_plus_extra= length + strlen(fmt);
+  const unsigned fmt_len_plus_extra= length + (uint)strlen(fmt);
   if ((str= (char*) my_malloc(fmt_len_plus_extra, MYF(0))) == NULL)
   {
     sql_print_error("ha_ndbcluster::update_table_comment: "
@@ -14850,7 +14857,7 @@ pthread_handler_t ndb_util_thread_func(v
   pthread_mutex_unlock(&LOCK_ndb_util_thread);
 
   /* Get thd_ndb for this thread */
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     pthread_mutex_lock(&LOCK_ndb_util_thread);
@@ -15137,7 +15144,7 @@ ndbcluster_show_status(handlerton *hton,
   else
     update_status_variables(NULL, &ns, g_ndb_cluster_connection);
 
-  buflen=
+  buflen= (uint)
     my_snprintf(buf, sizeof(buf),
                 "cluster_node_id=%ld, "
                 "connected_host=%s, "
@@ -15160,8 +15167,8 @@ ndbcluster_show_status(handlerton *hton,
     if (ns.transaction_hint_count[i] > 0 ||
         ns.transaction_no_hint_count[i] > 0)
     {
-      uint namelen= my_snprintf(name, sizeof(name), "node[%d]", i);
-      buflen= my_snprintf(buf, sizeof(buf),
+      uint namelen= (uint)my_snprintf(name, sizeof(name), "node[%d]", i);
+      buflen= (uint)my_snprintf(buf, sizeof(buf),
                           "transaction_hint=%ld, transaction_no_hint=%ld",
                           ns.transaction_hint_count[i],
                           ns.transaction_no_hint_count[i]);
@@ -15177,12 +15184,12 @@ ndbcluster_show_status(handlerton *hton,
     tmp.m_name= 0;
     while (ndb->get_free_list_usage(&tmp))
     {
-      buflen=
+      buflen= (uint)
         my_snprintf(buf, sizeof(buf),
                   "created=%u, free=%u, sizeof=%u",
                   tmp.m_created, tmp.m_free, tmp.m_sizeof);
       if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                     tmp.m_name, strlen(tmp.m_name), buf, buflen))
+                     tmp.m_name, (uint)strlen(tmp.m_name), buf, buflen))
         DBUG_RETURN(TRUE);
     }
   }
@@ -16678,19 +16685,19 @@ static int ndbcluster_fill_files_table(h
       }
 
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("DATAFILE",8,
                                               system_charset_info);
       table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
       table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(),
-                                                    strlen(df.getTablespace()),
+                                                    (uint)strlen(df.getTablespace()),
                                                     system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->
         store(ts.getDefaultLogfileGroup(),
-              strlen(ts.getDefaultLogfileGroup()),
+              (uint)strlen(ts.getDefaultLogfileGroup()),
               system_charset_info);
       table->field[IS_FILES_ENGINE]->set_notnull();
       table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
@@ -16716,7 +16723,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_ROW_FORMAT]->store("FIXED", 5, system_charset_info);
 
       char extra[30];
-      int len= my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
+      int len= (int)my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
       schema_table_store_record(thd, table);
@@ -16749,12 +16756,12 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
     table->field[IS_FILES_TABLESPACE_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->
       store(ts.getDefaultLogfileGroup(),
-           strlen(ts.getDefaultLogfileGroup()),
+           (uint)strlen(ts.getDefaultLogfileGroup()),
            system_charset_info);
 
     table->field[IS_FILES_ENGINE]->set_notnull();
@@ -16809,7 +16816,7 @@ static int ndbcluster_fill_files_table(h
 
       init_fill_schema_files_row(table);
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8,
@@ -16818,7 +16825,7 @@ static int ndbcluster_fill_files_table(h
       uf.getLogfileGroupId(&objid);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(),
-                                                  strlen(uf.getLogfileGroup()),
+                                                  (uint)strlen(uf.getLogfileGroup()),
                                                        system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId(), true);
@@ -16841,7 +16848,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_VERSION]->store(uf.getObjectVersion(), true);
 
       char extra[100];
-      int len= my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
+      int len= (int)my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
                            id, (ulong) lfg.getUndoBufferSize());
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
@@ -16876,7 +16883,7 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.getObjectId(), true);
@@ -16894,7 +16901,7 @@ static int ndbcluster_fill_files_table(h
     table->field[IS_FILES_VERSION]->store(lfg.getObjectVersion(), true);
 
     char extra[100];
-    int len= my_snprintf(extra,sizeof(extra),
+    int len= (int)my_snprintf(extra,sizeof(extra),
                          "UNDO_BUFFER_SIZE=%lu",
                          (ulong) lfg.getUndoBufferSize());
     table->field[IS_FILES_EXTRA]->set_notnull();
@@ -17310,12 +17317,8 @@ struct st_mysql_storage_engine ndbcluste
 { MYSQL_HANDLERTON_INTERFACE_VERSION };
 
 
-#include "ha_ndbinfo.h"
-
-extern struct st_mysql_sys_var* ndbinfo_system_variables[];
-
-struct st_mysql_storage_engine ndbinfo_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+extern struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin;
+extern struct st_mysql_plugin ndbinfo_plugin;
 
 mysql_declare_plugin(ndbcluster)
 {
@@ -17332,20 +17335,9 @@ mysql_declare_plugin(ndbcluster)
   system_variables,           /* system variables */
   NULL                        /* config options                  */
 },
-{
-  MYSQL_STORAGE_ENGINE_PLUGIN,
-  &ndbinfo_storage_engine,
-  "ndbinfo",
-  "Sun Microsystems Inc.",
-  "MySQL Cluster system information storage engine",
-  PLUGIN_LICENSE_GPL,
-  ndbinfo_init,               /* plugin init */
-  ndbinfo_deinit,             /* plugin deinit */
-  0x0001,                     /* plugin version */
-  NULL,                       /* status variables */
-  ndbinfo_system_variables,   /* system variables */
-  NULL                        /* config options */
-}
+ndbinfo_plugin, /* ndbinfo plugin */
+/* IS plugin table which maps between mysql connection id and ndb trans-id */
+i_s_ndb_transid_mysql_connection_map_plugin
 mysql_declare_plugin_end;
 
 #endif

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-09-07 22:50:01 +0000
+++ b/sql/ha_ndbcluster.h	2011-10-17 12:43:31 +0000
@@ -682,7 +682,7 @@ class ha_ndbcluster: public handler
   int ndb_update_row(const uchar *old_data, uchar *new_data,
                      int is_bulk_update);
 
-  static Thd_ndb* seize_thd_ndb();
+  static Thd_ndb* seize_thd_ndb(THD*);
   static void release_thd_ndb(Thd_ndb* thd_ndb);
  
 static void set_dbname(const char *pathname, char *dbname);

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-09-29 13:32:44 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-20 16:18:28 +0000
@@ -1549,7 +1549,7 @@ static int ndbcluster_find_all_databases
             /* create missing database */
             sql_print_information("NDB: Discovered missing database '%s'", db);
             const int no_print_error[1]= {0};
-            name_len= my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
+            name_len= (unsigned)my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
             run_query(thd, name, name + name_len,
                       no_print_error,    /* print error */
                       TRUE,   /* don't binlog the query */
@@ -1891,12 +1891,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
     
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -1934,12 +1934,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
 
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2098,7 +2098,7 @@ int ndbcluster_log_schema_op(THD *thd,
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   if (!thd_ndb)
   {
-    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
     {
       sql_print_error("Could not allocate Thd_ndb object");
       DBUG_RETURN(1);
@@ -2284,12 +2284,12 @@ int ndbcluster_log_schema_op(THD *thd,
       DBUG_ASSERT(r == 0);
       
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, (int)strlen(log_db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab,
-                       strlen(log_tab));
+                       (int)strlen(log_tab));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2789,7 +2789,7 @@ ndb_binlog_thread_handle_schema_event(TH
           // fall through
         case SOT_RENAME_TABLE_NEW:
         {
-          uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
+          uint end= (uint)my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
                                 "NDB Binlog: Skipping renaming locally "
                                 "defined table '%s.%s' from binlog schema "
                                 "event '%s' from node %d. ",
@@ -2801,7 +2801,7 @@ ndb_binlog_thread_handle_schema_event(TH
         case SOT_DROP_TABLE:
           if (schema_type == SOT_DROP_TABLE)
           {
-            uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
+            uint end= (uint)my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
                                   "NDB Binlog: Skipping dropping locally "
                                   "defined table '%s.%s' from binlog schema "
                                   "event '%s' from node %d. ",
@@ -3562,7 +3562,7 @@ ndb_binlog_index_table__write_rows(THD *
 
     ndb_binlog_index->field[0]->store(first->master_log_pos, true);
     ndb_binlog_index->field[1]->store(first->master_log_file,
-                                      strlen(first->master_log_file),
+                                      (uint)strlen(first->master_log_file),
                                       &my_charset_bin);
     ndb_binlog_index->field[2]->store(epoch= first->epoch, true);
     if (ndb_binlog_index->s->fields > 7)
@@ -4300,7 +4300,7 @@ parse_conflict_fn_spec(const char* confl
   {
     const st_conflict_fn_def &fn= conflict_fns[i];
 
-    uint len= strlen(fn.name);
+    uint len= (uint)strlen(fn.name);
     if (strncmp(ptr, fn.name, len))
       continue;
 
@@ -4372,7 +4372,7 @@ parse_conflict_fn_spec(const char* confl
         }
       }
 
-      uint len= end_arg - start_arg;
+      uint len= (uint)(end_arg - start_arg);
       args[no_args].type=    type;
       args[no_args].ptr=     start_arg;
       args[no_args].len=     len;
@@ -4701,9 +4701,9 @@ ndbcluster_read_replication_table(THD *t
       DBUG_PRINT("info", ("reading[%u]: %s,%s,%u", i, db, table_name, id));
       if ((_op= trans->getNdbOperation(reptab)) == NULL) abort();
       if (_op->readTuple(NdbOperation::LM_CommittedRead)) abort();
-      ndb_pack_varchar(col_db, tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col_db, tmp_buf, db, (int)strlen(db));
       if (_op->equal(col_db->getColumnNo(), tmp_buf)) abort();
-      ndb_pack_varchar(col_table_name, tmp_buf, table_name, strlen(table_name));
+      ndb_pack_varchar(col_table_name, tmp_buf, table_name, (int)strlen(table_name));
       if (_op->equal(col_table_name->getColumnNo(), tmp_buf)) abort();
       if (_op->equal(col_server_id->getColumnNo(), id)) abort();
       if ((col_binlog_type_rec_attr[i]=
@@ -5478,7 +5478,7 @@ ndbcluster_create_event_ops(THD *thd, ND
   Ndb_event_data *event_data= share->event_data;
   int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0;
 #ifdef HAVE_NDB_BINLOG
-  uint len= strlen(share->table_name);
+  uint len= (int)strlen(share->table_name);
 #endif
   if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 &&
       strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
@@ -6765,7 +6765,7 @@ restart_cluster_failure:
   int have_injector_mutex_lock= 0;
   do_ndbcluster_binlog_close_connection= BCCC_exit;
 
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     ndb_binlog_thread_running= -1;
@@ -6851,7 +6851,7 @@ restart_cluster_failure:
     {
       LOG_INFO log_info;
       mysql_bin_log.get_current_log(&log_info);
-      int len=  strlen(log_info.log_file_name);
+      int len=  (uint)strlen(log_info.log_file_name);
       uint no= 0;
       if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
           no == 1)
@@ -7710,7 +7710,7 @@ ndbcluster_show_status_binlog(THD* thd,
     ndb_latest_epoch= injector_ndb->getLatestGCI();
     pthread_mutex_unlock(&injector_mutex);
 
-    buflen=
+    buflen= (uint)
       my_snprintf(buf, sizeof(buf),
                   "latest_epoch=%s, "
                   "latest_trans_epoch=%s, "
@@ -7723,7 +7723,7 @@ ndbcluster_show_status_binlog(THD* thd,
                   llstr(ndb_latest_handled_binlog_epoch, buff4),
                   llstr(ndb_latest_applied_binlog_epoch, buff5));
     if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                   "binlog", strlen("binlog"),
+                   "binlog", (uint)strlen("binlog"),
                    buf, buflen))
       DBUG_RETURN(TRUE);
   }

=== modified file 'sql/ha_ndbcluster_binlog.h'
--- a/sql/ha_ndbcluster_binlog.h	2011-09-07 22:50:01 +0000
+++ b/sql/ha_ndbcluster_binlog.h	2011-10-20 16:18:28 +0000
@@ -299,7 +299,7 @@ ndbcluster_show_status_binlog(THD* thd,
   the ndb binlog code
 */
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length);
+            size_t pack_length);
 int ndbcluster_find_all_files(THD *thd);
 
 char *ndb_pack_varchar(const NDBCOL *col, char *buf,

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-10-28 09:32:10 +0000
@@ -306,4 +306,117 @@ void ndb_get_connection_stats(Uint64* st
   }
 }
 
+static ST_FIELD_INFO ndb_transid_mysql_connection_map_fields_info[] =
+{
+  {
+    "mysql_connection_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  {
+    "node_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+  {
+    "ndb_transid",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  { 0, 0, MYSQL_TYPE_NULL, 0, 0, "", SKIP_OPEN_TABLE }
+};
+
+static
+int
+ndb_transid_mysql_connection_map_fill_table(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
+
+  const bool all = (check_global_access(thd, PROCESS_ACL) == 0);
+  const ulonglong self = thd_get_thread_id(thd);
+
+  TABLE* table= tables->table;
+  for (uint i = 0; i<g_pool_alloc; i++)
+  {
+    if (g_pool[i])
+    {
+      g_pool[i]->lock_ndb_objects();
+      const Ndb * p = g_pool[i]->get_next_ndb_object(0);
+      while (p)
+      {
+        Uint64 connection_id = p->getCustomData64();
+        if ((connection_id == self) || all)
+        {
+          table->field[0]->set_notnull();
+          table->field[0]->store(p->getCustomData64(), true);
+          table->field[1]->set_notnull();
+          table->field[1]->store(g_pool[i]->node_id());
+          table->field[2]->set_notnull();
+          table->field[2]->store(p->getNextTransactionId(), true);
+          schema_table_store_record(thd, table);
+        }
+        p = g_pool[i]->get_next_ndb_object(p);
+      }
+      g_pool[i]->unlock_ndb_objects();
+    }
+  }
+
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_init(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_init");
+  ST_SCHEMA_TABLE* schema = reinterpret_cast<ST_SCHEMA_TABLE*>(p);
+  schema->fields_info = ndb_transid_mysql_connection_map_fields_info;
+  schema->fill_table = ndb_transid_mysql_connection_map_fill_table;
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_deinit(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_deinit");
+  DBUG_RETURN(0);
+}
+
+#include <mysql/plugin.h>
+static struct st_mysql_information_schema i_s_info =
+{
+  MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin =
+{
+  MYSQL_INFORMATION_SCHEMA_PLUGIN,
+  &i_s_info,
+  "ndb_transid_mysql_connection_map",
+  "Oracle Corporation",
+  "Map between mysql connection id and ndb transaction id",
+  PLUGIN_LICENSE_GPL,
+  ndb_transid_mysql_connection_map_init,
+  ndb_transid_mysql_connection_map_deinit,
+  0x0001,
+  NULL,
+  NULL,
+  NULL
+};
+
 #endif /* WITH_NDBCLUSTER_STORAGE_ENGINE */

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2011-08-27 09:54:26 +0000
+++ b/sql/ha_ndbinfo.cc	2011-10-20 16:18:28 +0000
@@ -245,7 +245,7 @@ bool ha_ndbinfo::get_error_message(int e
   if (!message)
     DBUG_RETURN(false);
 
-  buf->set(message, strlen(message), &my_charset_bin);
+  buf->set(message, (uint32)strlen(message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s", buf->ptr()));
   DBUG_RETURN(false);
 }
@@ -736,7 +736,9 @@ ndbinfo_find_files(handlerton *hton, THD
 
 handlerton* ndbinfo_hton;
 
-int ndbinfo_init(void *plugin)
+static
+int
+ndbinfo_init(void *plugin)
 {
   DBUG_ENTER("ndbinfo_init");
 
@@ -779,7 +781,9 @@ int ndbinfo_init(void *plugin)
   DBUG_RETURN(0);
 }
 
-int ndbinfo_deinit(void *plugin)
+static
+int
+ndbinfo_deinit(void *plugin)
 {
   DBUG_ENTER("ndbinfo_deinit");
 
@@ -804,6 +808,27 @@ struct st_mysql_sys_var* ndbinfo_system_
   NULL
 };
 
+struct st_mysql_storage_engine ndbinfo_storage_engine=
+{
+  MYSQL_HANDLERTON_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin ndbinfo_plugin =
+{
+  MYSQL_STORAGE_ENGINE_PLUGIN,
+  &ndbinfo_storage_engine,
+  "ndbinfo",
+  "Sun Microsystems Inc.",
+  "MySQL Cluster system information storage engine",
+  PLUGIN_LICENSE_GPL,
+  ndbinfo_init,               /* plugin init */
+  ndbinfo_deinit,             /* plugin deinit */
+  0x0001,                     /* plugin version */
+  NULL,                       /* status variables */
+  ndbinfo_system_variables,   /* system variables */
+  NULL                        /* config options */
+};
+
 template class Vector<const NdbInfoRecAttr*>;
 
 #endif

=== modified file 'sql/ha_ndbinfo.h'
--- a/sql/ha_ndbinfo.h	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbinfo.h	2011-10-17 12:43:31 +0000
@@ -20,9 +20,6 @@
 
 #include <mysql/plugin.h>
 
-int ndbinfo_init(void *plugin);
-int ndbinfo_deinit(void *plugin);
-
 class ha_ndbinfo: public handler
 {
 public:

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2011-06-30 15:59:25 +0000
+++ b/sql/sql_parse.cc	2011-10-17 09:17:54 +0000
@@ -1476,6 +1476,14 @@ bool dispatch_command(enum enum_server_c
   case COM_REFRESH:
   {
     int not_used;
+#ifndef MCP_BUG13001491
+    /*
+      Initialize thd->lex since it's used in many base functions, such as
+      open_tables(). Otherwise, it remains unitialized and may cause crash
+      during execution of COM_REFRESH.
+    */
+    lex_start(thd);
+#endif
     status_var_increment(thd->status_var.com_stat[SQLCOM_FLUSH]);
     ulong options= (ulong) (uchar) packet[0];
     if (check_global_access(thd,RELOAD_ACL))
@@ -6978,7 +6986,18 @@ bool reload_acl_and_cache(THD *thd, ulon
     if (ha_flush_logs(NULL))
       result=1;
     if (flush_error_log())
+#ifndef MCP_BUG13001491
+    {
+      /*
+        When flush_error_log() failed, my_error() has not been called.
+        So, we have to do it here to keep the protocol.
+      */
+      my_error(ER_UNKNOWN_ERROR, MYF(0));
+      result= 1;
+    }
+#else
       result=1;
+#endif
   }
 #ifdef HAVE_QUERY_CACHE
   if (options & REFRESH_QUERY_CACHE_FREE)

=== modified file 'storage/ndb/include/kernel/signaldata/DiGetNodes.hpp'
--- a/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-10-23 08:34:49 +0000
@@ -64,7 +64,10 @@ private:
   Uint32 hashValue;
   Uint32 distr_key_indicator;
   Uint32 unused;
-  Uint32 jamBuffer[2];
+  union {
+    void * jamBufferPtr;
+    Uint32 jamBufferStorage[2];
+  };
 };
 
 #endif

=== added file 'storage/ndb/include/kernel/statedesc.hpp'
--- a/storage/ndb/include/kernel/statedesc.hpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/include/kernel/statedesc.hpp	2011-10-28 10:16:23 +0000
@@ -0,0 +1,32 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_STATE_DESC_H
+#define NDB_STATE_DESC_H
+
+struct ndbkernel_state_desc
+{
+  unsigned value;
+  const char * name;
+  const char * friendly_name;
+  const char * description;
+};
+
+extern struct ndbkernel_state_desc g_dbtc_apiconnect_state_desc[];
+extern struct ndbkernel_state_desc g_dblqh_tcconnect_state_desc[];
+
+#endif

=== modified file 'storage/ndb/include/ndbapi/Ndb.hpp'
--- a/storage/ndb/include/ndbapi/Ndb.hpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/include/ndbapi/Ndb.hpp	2011-10-17 12:43:31 +0000
@@ -1762,7 +1762,19 @@ public:
   /* Get/Set per-Ndb custom data pointer */
   void setCustomData(void*);
   void* getCustomData() const;
-  
+
+  /* Get/Set per-Ndb custom data pointer */
+  /* NOTE: shares storage with void*
+   * i.e can not be used together with setCustomData
+   */
+  void setCustomData64(Uint64);
+  Uint64 getCustomData64() const;
+
+  /**
+   * transid next startTransaction() on this ndb-object will get
+   */
+  Uint64 getNextTransactionId() const;
+
   /* Some client behaviour counters to assist
    * optimisation
    */

=== modified file 'storage/ndb/include/util/OutputStream.hpp'
--- a/storage/ndb/include/util/OutputStream.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/util/OutputStream.hpp	2011-10-24 07:44:52 +0000
@@ -33,6 +33,7 @@ public:
     ATTRIBUTE_FORMAT(printf, 2, 3) = 0;
   virtual int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3) = 0;
+  virtual int write(const void * buf, size_t len) = 0;
   virtual void flush() {};
   virtual void reset_timeout() {};
 };
@@ -48,6 +49,7 @@ public:
     ATTRIBUTE_FORMAT(printf, 2, 3);
   int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3);
+  int write(const void * buf, size_t len);
   void flush() { fflush(f); }
 };
 
@@ -67,6 +69,7 @@ public:
     ATTRIBUTE_FORMAT(printf, 2, 3);
   int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3);
+  int write(const void * buf, size_t len);
 };
 
 
@@ -82,6 +85,7 @@ public:
   int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3);
 
+  int write(const void * buf, size_t len);
   void flush();
 };
 
@@ -92,6 +96,7 @@ public:
   virtual ~NullOutputStream() {}
   int print(const char * /* unused */, ...) { return 1;}
   int println(const char * /* unused */, ...) { return 1;}
+  int write(const void * buf, size_t len) { return 1;}
 };
 
 #endif

=== modified file 'storage/ndb/src/common/debugger/EventLogger.cpp'
--- a/storage/ndb/src/common/debugger/EventLogger.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp	2011-10-21 08:59:23 +0000
@@ -1524,7 +1524,7 @@ EventLogger::getText(char * dst, size_t
   if (nodeId != 0)
   {
     BaseString::snprintf(dst, dst_len, "Node %u: ", nodeId);
-    pos= strlen(dst);
+    pos= (int)strlen(dst);
   }
   if (dst_len-pos > 0)
     textF(dst+pos, dst_len-pos, theData, len);

=== modified file 'storage/ndb/src/common/debugger/SignalLoggerManager.cpp'
--- a/storage/ndb/src/common/debugger/SignalLoggerManager.cpp	2011-01-30 20:56:00 +0000
+++ b/storage/ndb/src/common/debugger/SignalLoggerManager.cpp	2011-10-21 08:59:23 +0000
@@ -126,7 +126,7 @@ getParameter(char *blocks[NO_OF_BLOCKS],
   char * tmp = copy;
   bool done = false;
   while(!done){
-    int len = strcspn(tmp, ", ;:\0");
+    int len = (int)strcspn(tmp, ", ;:\0");
     if(len == 0)
       done = true;
     else {

=== modified file 'storage/ndb/src/common/logger/LogHandler.cpp'
--- a/storage/ndb/src/common/logger/LogHandler.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/logger/LogHandler.cpp	2011-10-21 08:59:23 +0000
@@ -175,7 +175,7 @@ LogHandler::parseParams(const BaseString
   bool ret = true;
 
   _params.split(v_args, ",");
-  for(size_t i=0; i < v_args.size(); i++) {
+  for(unsigned i=0; i < v_args.size(); i++) {
     Vector<BaseString> v_param_value;
     if(v_args[i].split(v_param_value, "=", 2) != 2)
     {

=== modified file 'storage/ndb/src/common/logger/Logger.cpp'
--- a/storage/ndb/src/common/logger/Logger.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/logger/Logger.cpp	2011-10-21 08:59:23 +0000
@@ -212,13 +212,12 @@ Logger::addHandler(LogHandler* pHandler)
 
 bool
 Logger::addHandler(const BaseString &logstring, int *err, int len, char* errStr) {
-  size_t i;
   Vector<BaseString> logdest;
   DBUG_ENTER("Logger::addHandler");
 
   logstring.split(logdest, ";");
 
-  for(i = 0; i < logdest.size(); i++) {
+  for(unsigned i = 0; i < logdest.size(); i++) {
     DBUG_PRINT("info",("adding: %s",logdest[i].c_str()));
 
     Vector<BaseString> v_type_args;

=== modified file 'storage/ndb/src/common/portlib/NdbConfig.c'
--- a/storage/ndb/src/common/portlib/NdbConfig.c	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/portlib/NdbConfig.c	2011-10-21 08:59:23 +0000
@@ -31,14 +31,14 @@ NdbConfig_get_path(int *_len)
   const char *path= NdbEnv_GetEnv("NDB_HOME", 0, 0);
   int path_len= 0;
   if (path)
-    path_len= strlen(path);
+    path_len= (int)strlen(path);
   if (path_len == 0 && datadir_path) {
     path= datadir_path;
-    path_len= strlen(path);
+    path_len= (int)strlen(path);
   }
   if (path_len == 0) {
     path= ".";
-    path_len= strlen(path);
+    path_len= (int)strlen(path);
   }
   if (_len)
     *_len= path_len;
@@ -68,7 +68,7 @@ NdbConfig_NdbCfgName(int with_ndb_home){
 
   if (with_ndb_home) {
     buf= NdbConfig_AllocHomePath(PATH_MAX);
-    len= strlen(buf);
+    len= (int)strlen(buf);
   } else
     buf= NdbMem_Allocate(PATH_MAX);
   basestring_snprintf(buf+len, PATH_MAX, "Ndb.cfg");
@@ -87,7 +87,7 @@ char *get_prefix_buf(int len, int node_i
                         NdbHost_GetProcessId());
   tmp_buf[sizeof(tmp_buf)-1]= 0;
 
-  buf= NdbConfig_AllocHomePath(len+strlen(tmp_buf));
+  buf= NdbConfig_AllocHomePath(len+(int)strlen(tmp_buf));
   strcat(buf, tmp_buf);
   return buf;
 }
@@ -95,7 +95,7 @@ char *get_prefix_buf(int len, int node_i
 char* 
 NdbConfig_ErrorFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_error.log");
   return buf;
 }
@@ -103,7 +103,7 @@ NdbConfig_ErrorFileName(int node_id){
 char*
 NdbConfig_ClusterLogFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_cluster.log");
   return buf;
 }
@@ -111,7 +111,7 @@ NdbConfig_ClusterLogFileName(int node_id
 char*
 NdbConfig_SignalLogFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_signal.log");
   return buf;
 }
@@ -119,7 +119,7 @@ NdbConfig_SignalLogFileName(int node_id)
 char*
 NdbConfig_TraceFileName(int node_id, int file_no){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_trace.log.%u", file_no);
   return buf;
 }
@@ -127,7 +127,7 @@ NdbConfig_TraceFileName(int node_id, int
 char*
 NdbConfig_NextTraceFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_trace.log.next");
   return buf;
 }
@@ -135,7 +135,7 @@ NdbConfig_NextTraceFileName(int node_id)
 char*
 NdbConfig_PidFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, ".pid");
   return buf;
 }
@@ -143,7 +143,7 @@ NdbConfig_PidFileName(int node_id){
 char*
 NdbConfig_StdoutFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_out.log");
   return buf;
 }

=== modified file 'storage/ndb/src/common/portlib/NdbDir.cpp'
--- a/storage/ndb/src/common/portlib/NdbDir.cpp	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/common/portlib/NdbDir.cpp	2011-10-21 08:59:23 +0000
@@ -280,7 +280,7 @@ NdbDir::remove_recursive(const char* dir
     fprintf(stderr, "Too long path to remove: '%s'\n", dir);
     return false;
   }
-  int start_len = strlen(path);
+  int start_len = (int)strlen(path);
 
   const char* name;
   NdbDir::Iterator iter;
@@ -298,7 +298,7 @@ loop:
       if ((strcmp(".", name) == 0) || (strcmp("..", name) == 0))
         continue;
 
-      int end_len, len = strlen(path);
+      int end_len, len = (int)strlen(path);
       if ((end_len = basestring_snprintf(path + len, sizeof(path) - len,
                                          "%s", name)) < 0)
       {
@@ -329,7 +329,7 @@ loop:
     }
     iter.close();
 
-    int len = strlen(path);
+    int len = (int)strlen(path);
     path[len - 1] = 0; // remove ending slash
 
     char * prev_slash = strrchr(path, IF_WIN('\\', '/'));

=== modified file 'storage/ndb/src/common/portlib/NdbThread.c'
--- a/storage/ndb/src/common/portlib/NdbThread.c	2011-10-07 07:37:47 +0000
+++ b/storage/ndb/src/common/portlib/NdbThread.c	2011-10-21 08:59:23 +0000
@@ -253,7 +253,11 @@ NdbThread_Create(NDB_THREAD_FUNC *p_thre
     thread_stack_size = PTHREAD_STACK_MIN;
 #endif
   DBUG_PRINT("info", ("stack_size: %llu", (ulonglong)thread_stack_size));
+#ifndef _WIN32
   pthread_attr_setstacksize(&thread_attr, thread_stack_size);
+#else
+  pthread_attr_setstacksize(&thread_attr, (DWORD)thread_stack_size);
+#endif
 #ifdef USE_PTHREAD_EXTRAS
   /* Guard stack overflow with a 2k databuffer */
   pthread_attr_setguardsize(&thread_attr, 2048);

=== modified file 'storage/ndb/src/common/portlib/ndb_daemon.cc'
--- a/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-10-20 16:18:28 +0000
@@ -315,7 +315,7 @@ do_files(const char *pidfile_name, const
                 pidfile_name, errno);
 
   char buf[32];
-  int length = my_snprintf(buf, sizeof(buf), "%ld",
+  int length = (int)my_snprintf(buf, sizeof(buf), "%ld",
                            (long)NdbHost_GetProcessId());
   if (write(pidfd, buf, length) != length)
     return ERR1("Failed to write pid to pidfile '%s', errno: %d",

=== modified file 'storage/ndb/src/common/transporter/TransporterRegistry.cpp'
--- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp	2011-10-21 08:59:23 +0000
@@ -2418,7 +2418,7 @@ TransporterRegistry::print_transporters(
 
   out << "<<" << endl;
 
-  for (size_t i= 0; i < m_transporter_interface.size(); i++){
+  for (unsigned i= 0; i < m_transporter_interface.size(); i++){
     Transporter_interface tf= m_transporter_interface[i];
 
     out << i

=== modified file 'storage/ndb/src/common/util/BaseString.cpp'
--- a/storage/ndb/src/common/util/BaseString.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/BaseString.cpp	2011-10-21 08:59:23 +0000
@@ -50,7 +50,7 @@ BaseString::BaseString(const char* s)
       return;
     }
     memcpy(m_chr, s, n + 1);
-    m_len = n;
+    m_len = (unsigned)n;
 }
 
 BaseString::BaseString(const char * s, size_t n)
@@ -70,7 +70,7 @@ BaseString::BaseString(const char * s, s
   }
   memcpy(m_chr, s, n);
   m_chr[n] = 0;
-  m_len = n;
+  m_len = (unsigned)n;
 }
 
 BaseString::BaseString(const BaseString& str)
@@ -93,7 +93,7 @@ BaseString::BaseString(const BaseString&
     }
     memcpy(t, s, n + 1);
     m_chr = t;
-    m_len = n;
+    m_len = (unsigned)n;
 }
 
 BaseString::~BaseString()
@@ -125,7 +125,7 @@ BaseString::assign(const char* s)
     }
     delete[] m_chr;
     m_chr = t;
-    m_len = n;
+    m_len = (unsigned)n;
     return *this;
 }
 
@@ -145,7 +145,7 @@ BaseString::assign(const char* s, size_t
     }
     delete[] m_chr;
     m_chr = t;
-    m_len = n;
+    m_len = (unsigned)n;
     return *this;
 }
 
@@ -178,7 +178,7 @@ BaseString::append(const char* s)
     }
     delete[] m_chr;
     m_chr = t;
-    m_len += n;
+    m_len += (unsigned)n;
     return *this;
 }
 
@@ -196,7 +196,7 @@ BaseString::append(const BaseString& str
 BaseString&
 BaseString::append(const Vector<BaseString> &vector,
 		   const BaseString &separator) {
-    for(size_t i=0;i<vector.size(); i++) {
+    for(unsigned i=0;i<vector.size(); i++) {
 	append(vector[i]);
 	if(i<vector.size()-1)
 	    append(separator);
@@ -232,7 +232,7 @@ BaseString::assfmt(const char *fmt, ...)
     l = basestring_vsnprintf(m_chr, l, fmt, ap);
     assert(l == (int)strlen(m_chr));
     va_end(ap);
-    m_len = strlen(m_chr);
+    m_len = (unsigned)strlen(m_chr);
     return *this;
 }
 
@@ -279,7 +279,7 @@ BaseString::split(Vector<BaseString> &v,
 		  int maxSize) const {
     char *str = strdup(m_chr);
     int i, start, len, num = 0;
-    len = strlen(str);
+    len = (int)strlen(str);
     for(start = i = 0;
 	(i <= len) && ( (maxSize<0) || ((int)v.size()<=maxSize-1) );
 	i++) {
@@ -360,7 +360,7 @@ BaseString::argify(const char *argv0, co
     char *tmp = new char[strlen(src)+1];
     if (tmp == NULL)
     {
-      for(size_t i = 0; i < vargv.size(); i++)
+      for(unsigned i = 0; i < vargv.size(); i++)
         free(vargv[i]);
       errno = ENOMEM;
       return NULL;
@@ -413,7 +413,7 @@ BaseString::argify(const char *argv0, co
           if (t == NULL)
           {
             delete[] tmp;
-            for(size_t i = 0; i < vargv.size(); i++)
+            for(unsigned i = 0; i < vargv.size(); i++)
               free(vargv[i]);
             errno = ENOMEM;
             return NULL;
@@ -422,7 +422,7 @@ BaseString::argify(const char *argv0, co
           {
             free(t);
             delete[] tmp;
-            for(size_t i = 0; i < vargv.size(); i++)
+            for(unsigned i = 0; i < vargv.size(); i++)
               free(vargv[i]);
             return NULL;
           }
@@ -433,7 +433,7 @@ BaseString::argify(const char *argv0, co
     delete[] tmp;
     if (vargv.push_back(NULL))
     {
-      for(size_t i = 0; i < vargv.size(); i++)
+      for(unsigned i = 0; i < vargv.size(); i++)
         free(vargv[i]);
       return NULL;
     }
@@ -444,13 +444,13 @@ BaseString::argify(const char *argv0, co
     char **argv = (char **)malloc(sizeof(*argv) * (vargv.size()));
     if(argv == NULL)
     {
-        for(size_t i = 0; i < vargv.size(); i++)
+        for(unsigned i = 0; i < vargv.size(); i++)
           free(vargv[i]);
         errno = ENOMEM;
 	return NULL;
     }
     
-    for(size_t i = 0; i < vargv.size(); i++){
+    for(unsigned i = 0; i < vargv.size(); i++){
 	argv[i] = vargv[i];
     }
     
@@ -460,13 +460,13 @@ BaseString::argify(const char *argv0, co
 BaseString&
 BaseString::trim(const char * delim){
     trim(m_chr, delim);
-    m_len = strlen(m_chr);
+    m_len = (unsigned)strlen(m_chr);
     return * this;
 }
 
 char*
 BaseString::trim(char * str, const char * delim){
-    int len = strlen(str) - 1;
+    int len = (int)strlen(str) - 1;
     for(; len > 0 && strchr(delim, str[len]); len--)
       ;
 

=== modified file 'storage/ndb/src/common/util/ConfigValues.cpp'
--- a/storage/ndb/src/common/util/ConfigValues.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/ConfigValues.cpp	2011-10-21 08:59:23 +0000
@@ -593,7 +593,7 @@ ConfigValues::getPackedSize() const {
 	break;
       case StringType:
 	size += 8; // key + len
-	size += mod4(strlen(* getString(m_values[i+1])) + 1);
+	size += mod4((unsigned)strlen(* getString(m_values[i+1])) + 1);
 	break;
       case InvalidType:
       default:

=== modified file 'storage/ndb/src/common/util/File.cpp'
--- a/storage/ndb/src/common/util/File.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/File.cpp	2011-10-21 08:59:23 +0000
@@ -148,13 +148,13 @@ File_class::close()
 int 
 File_class::read(void* buf, size_t itemSize, size_t nitems) const
 {
-  return ::fread(buf, itemSize,  nitems, m_file);
+  return (int)::fread(buf, itemSize,  nitems, m_file);
 }
 
 int 
 File_class::readChar(char* buf, long start, long length) const
 {
-  return ::fread((void*)&buf[start], 1, length, m_file);
+  return (int)::fread((void*)&buf[start], 1, length, m_file);
 }
 
 int 
@@ -166,13 +166,13 @@ File_class::readChar(char* buf)
 int 
 File_class::write(const void* buf, size_t size_arg, size_t nitems)
 {
-  return ::fwrite(buf, size_arg, nitems, m_file);
+  return (int)::fwrite(buf, size_arg, nitems, m_file);
 }
  
 int
 File_class::writeChar(const char* buf, long start, long length)
 {
-  return ::fwrite((const void*)&buf[start], sizeof(char), length, m_file);
+  return (int)::fwrite((const void*)&buf[start], sizeof(char), length, m_file);
 }
 
 int 

=== modified file 'storage/ndb/src/common/util/InputStream.cpp'
--- a/storage/ndb/src/common/util/InputStream.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/InputStream.cpp	2011-10-21 08:59:23 +0000
@@ -57,7 +57,7 @@ SocketInputStream::gets(char * buf, int
     m_startover= false;
   }
   else
-    offset= strlen(buf);
+    offset= (int)strlen(buf);
 
   int time= 0;
   int res = readln_socket(m_socket, m_timeout_remain, &time,

=== modified file 'storage/ndb/src/common/util/NdbSqlUtil.cpp'
--- a/storage/ndb/src/common/util/NdbSqlUtil.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/NdbSqlUtil.cpp	2011-10-21 08:59:23 +0000
@@ -693,7 +693,7 @@ NdbSqlUtil::likeChar(const void* info, c
   const char* v2 = (const char*)p2;
   CHARSET_INFO* cs = (CHARSET_INFO*)(info);
   // strip end spaces to match (incorrect) MySQL behaviour
-  n1 = (*cs->cset->lengthsp)(cs, v1, n1);
+  n1 = (unsigned)(*cs->cset->lengthsp)(cs, v1, n1);
   int k = (*cs->coll->wildcmp)(cs, v1, v1 + n1, v2, v2 + n2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many);
   return k == 0 ? 0 : +1;
 }
@@ -980,13 +980,13 @@ NdbSqlUtil::strnxfrm_bug7284(CHARSET_INF
   if (n1 <= 0)
     return -1;
   // strxfrm to binary
-  int n2 = ndb_strnxfrm(cs, xsp, sizeof(xsp), nsp, n1);
+  int n2 = (int)ndb_strnxfrm(cs, xsp, sizeof(xsp), nsp, n1);
   if (n2 <= 0)
     return -1;
   // XXX bug workaround - strnxfrm may not write full string
   memset(dst, 0x0, dstLen);
   // strxfrm argument string - returns no error indication
-  int n3 = ndb_strnxfrm(cs, dst, dstLen, src, srcLen);
+  int n3 = (int)ndb_strnxfrm(cs, dst, dstLen, src, srcLen);
   // pad with strxfrm-ed space chars
   int n4 = n3;
   while (n4 < (int)dstLen) {

=== modified file 'storage/ndb/src/common/util/OutputStream.cpp'
--- a/storage/ndb/src/common/util/OutputStream.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/util/OutputStream.cpp	2011-10-24 07:44:52 +0000
@@ -43,6 +43,12 @@ FileOutputStream::println(const char * f
   return ret + fprintf(f, "\n");
 }
 
+int
+FileOutputStream::write(const void * buf, size_t len)
+{
+  return (int)fwrite(buf, len, 1, f);
+}
+
 SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket,
 				       unsigned write_timeout_ms) :
   m_socket(socket),
@@ -97,6 +103,28 @@ SocketOutputStream::println(const char *
   return ret;
 }
 
+int
+SocketOutputStream::write(const void * buf, size_t len)
+{
+  if (timedout())
+    return -1;
+
+  int time = 0;
+  int ret = write_socket(m_socket, m_timeout_ms, &time,
+                         (const char*)buf, (int)len);
+  if (ret >= 0)
+  {
+    m_timeout_remain -= time;
+  }
+
+  if ((ret < 0 && errno == SOCKET_ETIMEDOUT) || m_timeout_remain <= 0)
+  {
+    m_timedout = true;
+    ret= -1;
+  }
+  return ret;
+}
+
 #include <UtilBuffer.hpp>
 #include <BaseString.hpp>
 
@@ -172,6 +200,12 @@ BufferedSockOutputStream::println(const
   return 0;
 }
 
+int
+BufferedSockOutputStream::write(const void * buf, size_t len)
+{
+  return m_buffer.append(buf, len);
+}
+
 void BufferedSockOutputStream::flush(){
   int elapsed = 0;
   if (write_socket(m_socket, m_timeout_ms, &elapsed,

=== modified file 'storage/ndb/src/common/util/Parser.cpp'
--- a/storage/ndb/src/common/util/Parser.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/Parser.cpp	2011-10-21 08:59:23 +0000
@@ -78,7 +78,7 @@ bool
 Empty(const char * str){
   if(str == 0)
     return true;
-  const int len = strlen(str);
+  const int len = (int)strlen(str);
   if(len == 0)
     return false;
   for(int i = 0; i<len; i++)
@@ -96,7 +96,7 @@ void
 trim(char * str){
   if(str == NULL)
     return;
-  int len = strlen(str);
+  int len = (int)strlen(str);
   for(len--; str[len] == '\n' || str[len] == ' ' || str[len] == '\t'; len--)
     str[len] = 0;
   
@@ -156,7 +156,7 @@ ParserImpl::run(Context * ctx, const cla
     return false;
   }
 
-  int last= strlen(ctx->m_currentToken);
+  int last= (int)strlen(ctx->m_currentToken);
   if(last>0)
     last--;
 

=== modified file 'storage/ndb/src/common/util/Properties.cpp'
--- a/storage/ndb/src/common/util/Properties.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/Properties.cpp	2011-10-21 08:59:23 +0000
@@ -662,10 +662,10 @@ PropertiesImpl::getPackedSize(Uint32 pLe
       sz += 4; // Type
       sz += 4; // Name Len
       sz += 4; // Value Len
-      sz += mod4(pLen + strlen(content[i]->name)); // Name
+      sz += mod4(pLen + (unsigned)strlen(content[i]->name)); // Name
       switch(content[i]->valueType){
       case PropertiesType_char:
-	sz += mod4(strlen((char *)content[i]->value));
+	sz += mod4((unsigned)strlen((char *)content[i]->value));
 	break;
       case PropertiesType_Uint32:
 	sz += mod4(4);
@@ -734,7 +734,7 @@ PropertiesImpl::pack(Uint32 *& buf, cons
   CharBuf charBuf;
   
   for(unsigned int i = 0; i<items; i++){
-    const int strLenName      = strlen(content[i]->name);
+    const int strLenName      = (int)strlen(content[i]->name);
     
     if(content[i]->valueType == PropertiesType_Properties){
       charBuf.clear();

=== modified file 'storage/ndb/src/common/util/ndb_init.cpp'
--- a/storage/ndb/src/common/util/ndb_init.cpp	2011-09-27 17:28:13 +0000
+++ b/storage/ndb/src/common/util/ndb_init.cpp	2011-10-20 16:18:28 +0000
@@ -56,7 +56,7 @@ ndb_init_internal()
   {
     {
       const char* err = "ndb_init() failed - exit\n";
-      int res = write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }
@@ -79,7 +79,7 @@ ndb_init()
     if (my_init())
     {
       const char* err = "my_init() failed - exit\n";
-      int res = write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }

=== modified file 'storage/ndb/src/common/util/ndbzio.c'
--- a/storage/ndb/src/common/util/ndbzio.c	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/src/common/util/ndbzio.c	2011-10-20 18:36:21 +0000
@@ -428,7 +428,7 @@ int read_buffer(ndbzio_stream *s)
   my_errno= 0;
   if (s->stream.avail_in == 0)
   {
-    s->stream.avail_in = my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
+    s->stream.avail_in = (uInt)my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
     if(s->stream.avail_in > 0)
       my_errno= 0;
     if (s->stream.avail_in == 0)
@@ -681,7 +681,7 @@ unsigned int ZEXPORT ndbzread ( ndbzio_s
         bytes_read= my_read(s->file, (uchar *)next_out, s->stream.avail_out,
                             MYF(0));
         if(bytes_read>0)
-          s->stream.avail_out -= bytes_read;
+          s->stream.avail_out -= (uInt)bytes_read;
         if (bytes_read == 0)
         {
           s->z_eof = 1;

=== modified file 'storage/ndb/src/common/util/socket_io.cpp'
--- a/storage/ndb/src/common/util/socket_io.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/socket_io.cpp	2011-10-21 08:59:23 +0000
@@ -123,7 +123,7 @@ readln_socket(NDB_SOCKET_TYPE socket, in
         *time = 0;
 
 	ptr[0]= 0;
-	return ptr - buf;
+	return (int)(ptr - buf);
       }
     }
     
@@ -226,7 +226,7 @@ vprint_socket(NDB_SOCKET_TYPE socket, in
   } else
     return 0;
 
-  int ret = write_socket(socket, timeout_millis, time, buf2, size);
+  int ret = write_socket(socket, timeout_millis, time, buf2, (int)size);
   if(buf2 != buf)
     free(buf2);
   return ret;
@@ -254,7 +254,7 @@ vprintln_socket(NDB_SOCKET_TYPE socket,
   }
   buf2[size-1]='\n';
 
-  int ret = write_socket(socket, timeout_millis, time, buf2, size);
+  int ret = write_socket(socket, timeout_millis, time, buf2, (int)size);
   if(buf2 != buf)
     free(buf2);
   return ret;

=== modified file 'storage/ndb/src/cw/cpcd/APIService.cpp'
--- a/storage/ndb/src/cw/cpcd/APIService.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/cw/cpcd/APIService.cpp	2011-10-21 08:59:23 +0000
@@ -182,7 +182,7 @@ CPCDAPISession::runSession(){
 
     switch(ctx.m_status){
     case Parser_t::Ok:
-      for(size_t i = 0; i<ctx.m_aliasUsed.size(); i++)
+      for(unsigned i = 0; i<ctx.m_aliasUsed.size(); i++)
 	ndbout_c("Used alias: %s -> %s", 
 		 ctx.m_aliasUsed[i]->name, ctx.m_aliasUsed[i]->realName);
       break;
@@ -199,7 +199,7 @@ CPCDAPISession::runSession(){
 void
 CPCDAPISession::stopSession(){
   CPCD::RequestStatus rs;
-  for(size_t i = 0; i<m_temporaryProcesses.size(); i++){
+  for(unsigned i = 0; i<m_temporaryProcesses.size(); i++){
     Uint32 id = m_temporaryProcesses[i];
     m_cpcd.undefineProcess(&rs, id);
   }
@@ -215,7 +215,7 @@ CPCDAPISession::loadFile(){
 
     switch(ctx.m_status){
     case Parser_t::Ok:
-      for(size_t i = 0; i<ctx.m_aliasUsed.size(); i++)
+      for(unsigned i = 0; i<ctx.m_aliasUsed.size(); i++)
 	ndbout_c("Used alias: %s -> %s", 
 		 ctx.m_aliasUsed[i]->name, ctx.m_aliasUsed[i]->realName);
       break;
@@ -348,7 +348,7 @@ CPCDAPISession::listProcesses(Parser_t::
   m_output->println("%s", "");
   
 
-  for(size_t i = 0; i < proclist->size(); i++) {
+  for(unsigned i = 0; i < proclist->size(); i++) {
     CPCD::Process *p = (*proclist)[i];
 
     m_output->println("process");

=== modified file 'storage/ndb/src/cw/cpcd/CPCD.cpp'
--- a/storage/ndb/src/cw/cpcd/CPCD.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/cw/cpcd/CPCD.cpp	2011-10-21 08:59:23 +0000
@@ -60,7 +60,7 @@ CPCD::findUniqueId() {
     if(id == 0)
       ok = false;
 
-    for(size_t i = 0; i<m_processes.size(); i++) {
+    for(unsigned i = 0; i<m_processes.size(); i++) {
       if(m_processes[i]->m_id == id)
 	ok = false;
     }
@@ -76,7 +76,7 @@ CPCD::defineProcess(RequestStatus * rs,
 
   Guard tmp(m_processes);
 
-  for(size_t i = 0; i<m_processes.size(); i++) {
+  for(unsigned i = 0; i<m_processes.size(); i++) {
     Process * proc = m_processes[i];
     
     if((strcmp(arg->m_name.c_str(), proc->m_name.c_str()) == 0) && 
@@ -106,7 +106,7 @@ CPCD::undefineProcess(CPCD::RequestStatu
   Guard tmp(m_processes);
 
   Process * proc = 0;
-  size_t i;
+  unsigned i;
   for(i = 0; i < m_processes.size(); i++) {
     if(m_processes[i]->m_id == id) {
       proc = m_processes[i];
@@ -142,7 +142,7 @@ CPCD::startProcess(CPCD::RequestStatus *
 
     Guard tmp(m_processes);
     
-    for(size_t i = 0; i < m_processes.size(); i++) {
+    for(unsigned i = 0; i < m_processes.size(); i++) {
       if(m_processes[i]->m_id == id) {
 	proc = m_processes[i];
 	break;
@@ -185,7 +185,7 @@ CPCD::stopProcess(CPCD::RequestStatus *r
   Guard tmp(m_processes);
 
   Process * proc = 0;
-  for(size_t i = 0; i < m_processes.size(); i++) {
+  for(unsigned i = 0; i < m_processes.size(); i++) {
     if(m_processes[i]->m_id == id) {
       proc = m_processes[i];
       break;
@@ -264,7 +264,7 @@ CPCD::saveProcessList(){
     return false;
   }
 
-  for(size_t i = 0; i<m_processes.size(); i++){
+  for(unsigned i = 0; i<m_processes.size(); i++){
     m_processes[i]->print(f);
     fprintf(f, "\n");
 
@@ -367,7 +367,7 @@ CPCD::loadProcessList(){
   sess.loadFile();
   loadingProcessList = false;
 
-  size_t i;
+  unsigned i;
   Vector<int> temporary;
   for(i = 0; i<m_processes.size(); i++){
     Process * proc = m_processes[i];

=== modified file 'storage/ndb/src/cw/cpcd/Monitor.cpp'
--- a/storage/ndb/src/cw/cpcd/Monitor.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/cw/cpcd/Monitor.cpp	2011-10-21 08:59:23 +0000
@@ -63,7 +63,7 @@ CPCD::Monitor::run() {
 
     proc.lock();
 
-    for(size_t i = 0; i < proc.size(); i++) {
+    for(unsigned i = 0; i < proc.size(); i++) {
       proc[i]->monitor();
     }
 

=== modified file 'storage/ndb/src/cw/cpcd/Process.cpp'
--- a/storage/ndb/src/cw/cpcd/Process.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/cw/cpcd/Process.cpp	2011-10-21 08:59:23 +0000
@@ -334,7 +334,7 @@ save_environment(const char *env, Vector
 
 void
 CPCD::Process::do_exec() {
-  size_t i;
+  unsigned i;
 
 #ifdef _WIN32
   Vector<BaseString> saved;
@@ -411,7 +411,7 @@ CPCD::Process::do_exec() {
     }
     int f = fds[i]= open(redirects[i]->c_str(), flags, mode);
     if(f == -1){
-      logger.error("Cannot redirect %ld to/from '%s' : %s\n", i,
+      logger.error("Cannot redirect %u to/from '%s' : %s\n", i,
 		   redirects[i]->c_str(), strerror(errno));
       _exit(1);
     }

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-05-25 15:03:11 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-10-28 14:17:25 +0000
@@ -18,7 +18,7 @@ Next NDBCNTR 1002
 Next NDBFS 2000
 Next DBACC 3002
 Next DBTUP 4035
-Next DBLQH 5072
+Next DBLQH 5074
 Next DBDICT 6026
 Next DBDIH 7229
 Next DBTC 8092

=== modified file 'storage/ndb/src/kernel/blocks/backup/read.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/read.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/read.cpp	2011-10-21 08:59:23 +0000
@@ -229,7 +229,7 @@ size_t
 aread(void * buf, size_t sz, size_t n, ndbzio_stream* f)
 {
   int error = 0;
-  unsigned r = ndbzread(f, buf, (sz * n), &error);
+  unsigned r = ndbzread(f, buf, (unsigned)(sz * n), &error);
   if (error || r != (sz * n))
   {
     printf("Failed to read!!");

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-09-02 17:24:52 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-10-14 13:24:26 +0000
@@ -51,6 +51,7 @@
 
 #include <NdbSleep.h>
 #include <SafeCounter.hpp>
+#include <SectionReader.hpp>
 
 #define ZREPORT_MEMORY_USAGE 1000
 
@@ -2678,6 +2679,31 @@ Cmvmi::execTESTSIG(Signal* signal){
     return;
   }
 
+  /**
+   * Testing Api fragmented signal send/receive
+   */
+  if (testType == 40)
+  {
+    /* Fragmented signal sent from Api, we'll check it and return it */
+    Uint32 expectedVal = 0;
+    for (Uint32 s = 0; s < handle.m_cnt; s++)
+    {
+      SectionReader sr(handle.m_ptr[s].i, getSectionSegmentPool());
+      Uint32 received;
+      while (sr.getWord(&received))
+      {
+        ndbrequire(received == expectedVal ++);
+      }
+    }
+
+    /* Now return it back to the Api, no callback, so framework
+     * can time-slice the send
+     */
+    sendFragmentedSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB, &handle);
+
+    return;
+  }
+
   if(signal->getSendersBlockRef() == ref){
     /**
      * Signal from API (not via NodeReceiverGroup)

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-07 13:15:08 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-21 08:59:23 +0000
@@ -15406,7 +15406,7 @@ Dbdict::createEvent_RT_USER_CREATE(Signa
   }
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
   {
-    int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+    int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
     memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
 #ifdef EVENT_DEBUG
     printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n",
@@ -15434,7 +15434,7 @@ sendref:
   }
   r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME);
   {
-    int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
+    int len = (int)strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
     memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len);
   }
 
@@ -16059,7 +16059,7 @@ Dbdict::createEvent_RT_USER_GET(Signal*
   }
 
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
-  int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+  int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
   memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
   
   releaseSections(handle);
@@ -17122,7 +17122,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
   }
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
   {
-    int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+    int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
     memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
 #ifdef EVENT_DEBUG
     printf("DropEvntReq; EventName %s, len %u\n",
@@ -20680,31 +20680,6 @@ Dbdict::createFile_parse(Signal* signal,
     return;
   }
 
-  /**
-   * auto-connect
-   */
-  if (f.FilegroupId == RNIL && f.FilegroupVersion == RNIL)
-  {
-    jam();
-    Filegroup_hash::Iterator it;
-    c_filegroup_hash.first(it);
-    while (!it.isNull())
-    {
-      jam();
-      if ((f.FileType == DictTabInfo::Undofile &&
-           it.curr.p->m_type == DictTabInfo::LogfileGroup) ||
-          (f.FileType == DictTabInfo::Datafile &&
-           it.curr.p->m_type == DictTabInfo::Tablespace))
-      {
-        jam();
-        f.FilegroupId = it.curr.p->key;
-        f.FilegroupVersion = it.curr.p->m_version;
-        break;
-      }
-      c_filegroup_hash.next(it);
-    }
-  }
-
   // Get Filegroup
   FilegroupPtr fg_ptr;
   if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId))
@@ -21433,21 +21408,6 @@ Dbdict::createFilegroup_parse(Signal* si
       setError(error, CreateFilegroupRef::InvalidExtentSize, __LINE__);
       return;
     }
-
-    /**
-     * auto-connect
-     */
-    if (fg.TS_LogfileGroupId == RNIL && fg.TS_LogfileGroupVersion == RNIL)
-    {
-      jam();
-      Filegroup_hash::Iterator it;
-      if (c_filegroup_hash.first(it))
-      {
-        jam();
-        fg.TS_LogfileGroupId = it.curr.p->key;
-        fg.TS_LogfileGroupVersion = it.curr.p->m_version;
-      }
-    }
   }
   else if(fg.FilegroupType == DictTabInfo::LogfileGroup)
   {
@@ -24227,7 +24187,6 @@ Dbdict::releaseSchemaOp(SchemaOpPtr& op_
   ndbrequire(op_ptr.p->m_magic == SchemaOp::DICT_MAGIC);
   c_schemaOpHash.remove(op_ptr);
   c_schemaOpPool.release(op_ptr);
-  ndbrequire(op_ptr.p->m_magic == 0);
   op_ptr.setNull();
 }
 
@@ -24548,11 +24507,17 @@ Dbdict::releaseSchemaTrans(SchemaTransPt
 {
   D("releaseSchemaTrans" << V(trans_ptr.p->trans_key));
 
-  LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
-  SchemaOpPtr op_ptr;
-  while (list.first(op_ptr)) {
-    list.remove(op_ptr);
-    releaseSchemaOp(op_ptr);
+  {
+    /**
+     * Put in own scope...since LocalSchemaOp_list stores back head
+     *   in destructor
+     */
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    SchemaOpPtr op_ptr;
+    while (list.first(op_ptr)) {
+      list.remove(op_ptr);
+      releaseSchemaOp(op_ptr);
+    }
   }
   ndbrequire(trans_ptr.p->m_magic == SchemaTrans::DICT_MAGIC);
   ndbrequire(c_schemaTransCount != 0);

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-09-23 08:52:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-28 14:17:25 +0000
@@ -2372,6 +2372,17 @@ void Dbdih::execSTART_PERMREQ(Signal* si
   CRASH_INSERTION(7122);
   ndbrequire(isMaster());
   ndbrequire(refToNode(retRef) == nodeId);
+  if (c_lcpMasterTakeOverState.state != LMTOS_IDLE)
+  {
+    jam();
+    infoEvent("DIH : Denied request for start permission from %u "
+              "while LCP Master takeover in progress.",
+              nodeId);
+    signal->theData[0] = nodeId;
+    signal->theData[1] = StartPermRef::ZNODE_START_DISALLOWED_ERROR;
+    sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
+    return;
+  }
   if ((c_nodeStartMaster.activeState) ||
       (c_nodeStartMaster.wait != ZFALSE) ||
       ERROR_INSERTED_CLEAR(7175)) {
@@ -9023,7 +9034,7 @@ void Dbdih::execDIGETNODESREQ(Signal* si
   Uint32 fragId, newFragId = RNIL;
   DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
   TabRecord* regTabDesc = tabRecord;
-  EmulatedJamBuffer * jambuf = * (EmulatedJamBuffer**)(req->jamBuffer);
+  EmulatedJamBuffer * jambuf = (EmulatedJamBuffer*)req->jamBufferPtr;
   thrjamEntry(jambuf);
   ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp	2011-10-21 08:59:23 +0000
@@ -58,7 +58,7 @@ char * getNSString(Uint32 ns){
 
 void
 fill(const char * buf, int mod){
-  int len = strlen(buf)+1;
+  int len = (int)(strlen(buf)+1);
   ndbout << buf << " ";
   while((len % mod) != 0){
     ndbout << " ";

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-10-11 08:11:15 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-10-28 09:56:57 +0000
@@ -18,6 +18,7 @@
 #ifndef DBLQH_H
 #define DBLQH_H
 
+#ifndef DBLQH_STATE_EXTRACT
 #include <pc.hpp>
 #include <ndb_limits.h>
 #include <SimulatedBlock.hpp>
@@ -41,6 +42,7 @@
 class Dbacc;
 class Dbtup;
 class Lgman;
+#endif // DBLQH_STATE_EXTRACT
 
 #ifdef DBLQH_C
 // Constants
@@ -410,10 +412,15 @@ class Lgman;
  *  - TEST 
  *  - LOG 
  */
-class Dblqh: public SimulatedBlock {
+class Dblqh 
+#ifndef DBLQH_STATE_EXTRACT
+  : public SimulatedBlock
+#endif
+{
   friend class DblqhProxy;
 
 public:
+#ifndef DBLQH_STATE_EXTRACT
   enum LcpCloseState {
     LCP_IDLE = 0,
     LCP_RUNNING = 1,       // LCP is running
@@ -1940,7 +1947,7 @@ public:
     Uint32 usageCountW; // writers
   }; // Size 100 bytes
   typedef Ptr<Tablerec> TablerecPtr;
-
+#endif // DBLQH_STATE_EXTRACT
   struct TcConnectionrec {
     enum ListState {
       NOT_IN_LIST = 0,
@@ -2021,6 +2028,7 @@ public:
       COPY_CONNECTED = 2,
       LOG_CONNECTED = 3
     };
+#ifndef DBLQH_STATE_EXTRACT
     ConnectState connectState;
     UintR copyCountWords;
     Uint32 keyInfoIVal;
@@ -2131,8 +2139,10 @@ public:
       Uint32 m_page_id[2];
       Local_key m_disk_ref[2];
     } m_nr_delete;
+#endif // DBLQH_STATE_EXTRACT
   }; /* p2c: size = 280 bytes */
-  
+
+#ifndef DBLQH_STATE_EXTRACT
   typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
 
   struct TcNodeFailRecord {
@@ -2687,10 +2697,7 @@ private:
 
   bool validate_filter(Signal*);
   bool match_and_print(Signal*, Ptr<TcConnectionrec>);
-  void ndbinfo_write_op(Signal* signal,
-                        DbinfoScanReq * req,
-                        Ndbinfo::Ratelimit * rl,
-                        TcConnectionrecPtr tcPtr);
+  void ndbinfo_write_op(Ndbinfo::Row&, TcConnectionrecPtr tcPtr);
 
   void define_backup(Signal*);
   void execDEFINE_BACKUP_REF(Signal*);
@@ -3281,8 +3288,9 @@ public:
 
   void sendFireTrigConfTc(Signal* signal, BlockReference ref, Uint32 Tdata[]);
   bool check_fire_trig_pass(Uint32 op, Uint32 pass);
+#endif
 };
-
+#ifndef DBLQH_STATE_EXTRACT
 inline
 bool
 Dblqh::ScanRecord::check_scan_batch_completed() const
@@ -3405,5 +3413,5 @@ Dblqh::TRACE_OP_CHECK(const TcConnection
 	   regTcPtr->operation == ZDELETE)) ||
     ERROR_INSERTED(5713);
 }
-
+#endif
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-11 08:11:15 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-28 14:17:25 +0000
@@ -13788,6 +13788,15 @@ void Dblqh::execBACKUP_FRAGMENT_REF(Sign
 void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal) 
 {
   jamEntry();
+
+  if (ERROR_INSERTED(5073))
+  {
+    ndbout_c("Delaying BACKUP_FRAGMENT_CONF");
+    sendSignalWithDelay(reference(), GSN_BACKUP_FRAGMENT_CONF, signal, 500,
+                        signal->getLength());
+    return;
+  }
+
   //BackupFragmentConf* conf= (BackupFragmentConf*)signal->getDataPtr();
 
   lcpPtr.i = 0;
@@ -23422,7 +23431,9 @@ void Dblqh::execDBINFO_SCANREQ(Signal *s
       {
         jam();
         ptrCheckGuard(tcPtr, ctcConnectrecFileSize, tcConnectionrec);
-        ndbinfo_write_op(signal, &req, &rl, tcPtr);
+        Ndbinfo::Row row(signal, req);
+        ndbinfo_write_op(row, tcPtr);
+        ndbinfo_send_row(signal, req, row, rl);
         tcPtr.i = tcPtr.p->nextHashRec;
       }
       bucket++;
@@ -23437,24 +23448,16 @@ void Dblqh::execDBINFO_SCANREQ(Signal *s
 }
 
 void
-Dblqh::ndbinfo_write_op(Signal* signal,
-                        DbinfoScanReq * req,
-                        Ndbinfo::Ratelimit * rl,
-                        TcConnectionrecPtr tcPtr)
+Dblqh::ndbinfo_write_op(Ndbinfo::Row & row, TcConnectionrecPtr tcPtr)
 {
-  Ndbinfo::Row row(signal, *req);
   row.write_uint32(getOwnNodeId());
   row.write_uint32(instance());          // block instance
   row.write_uint32(tcPtr.i);             // objid
   row.write_uint32(tcPtr.p->tcBlockref); // tcref
   row.write_uint32(tcPtr.p->applRef);    // apiref
 
-  char transid[64];
-  BaseString::snprintf(transid, sizeof(transid),
-                       "%.8x.%.8x",
-                       tcPtr.p->transid[0],
-                       tcPtr.p->transid[1]);
-  row.write_string(transid);
+  row.write_uint32(tcPtr.p->transid[0]);
+  row.write_uint32(tcPtr.p->transid[1]);
   row.write_uint32(tcPtr.p->tableref);
   row.write_uint32(tcPtr.p->fragmentid);
 
@@ -23511,8 +23514,6 @@ Dblqh::ndbinfo_write_op(Signal* signal,
     row.write_uint32(tcPtr.p->transactionState);
     row.write_uint32(0);
   }
-
-  ndbinfo_send_row(signal, *req, row, *rl);
 }
 
 

=== added file 'storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp	2011-10-28 09:56:57 +0000
@@ -0,0 +1,76 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <kernel/statedesc.hpp>
+#define DBLQH_STATE_EXTRACT
+#include "Dblqh.hpp"
+
+#define SDESC(a,b,c) { (unsigned)Dblqh::TcConnectionrec::a, #a, b, c }
+
+struct ndbkernel_state_desc g_dblqh_tcconnect_state_desc[] =
+{
+  SDESC(IDLE, "Idle", ""),
+  SDESC(WAIT_ACC, "WaitLock", ""),
+  SDESC(WAIT_TUPKEYINFO, "", ""),
+  SDESC(WAIT_ATTR, "WaitData", ""),
+  SDESC(WAIT_TUP, "WaitTup", ""),
+  SDESC(STOPPED, "Stopped", ""),
+  SDESC(LOG_QUEUED, "LogPrepare", ""),
+  SDESC(PREPARED, "Prepared", ""),
+  SDESC(LOG_COMMIT_WRITTEN_WAIT_SIGNAL, "", ""),
+  SDESC(LOG_COMMIT_QUEUED_WAIT_SIGNAL, "", ""),
+
+  // Commit in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(COMMIT_STOPPED, "CommittingStopped", ""),
+  SDESC(LOG_COMMIT_QUEUED, "Committing", ""),
+  SDESC(COMMIT_QUEUED, "Committing", ""),
+  SDESC(COMMITTED, "Committed", ""),
+  SDESC(WAIT_TUP_COMMIT, "Committing", ""),
+
+  /* -------------------------------------------------------------------- */
+  // Abort in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(WAIT_ACC_ABORT, "Aborting", ""),
+  SDESC(ABORT_QUEUED, "Aborting", ""),
+  SDESC(ABORT_STOPPED, "AbortingStopped", ""),
+  SDESC(WAIT_AI_AFTER_ABORT, "Aborting", ""),
+  SDESC(LOG_ABORT_QUEUED, "Aborting", ""),
+  SDESC(WAIT_TUP_TO_ABORT, "Aborting", ""),
+
+  /* -------------------------------------------------------------------- */
+  // Scan in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(WAIT_SCAN_AI, "Scanning", ""),
+  SDESC(SCAN_STATE_USED, "Scanning", ""),
+  SDESC(SCAN_FIRST_STOPPED, "Scanning", ""),
+  SDESC(SCAN_CHECK_STOPPED, "Scanning", ""),
+  SDESC(SCAN_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_RELEASE_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_CLOSE_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_CLOSE_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_FIRST_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_TUPKEY, "Scanning", ""),
+  SDESC(COPY_TUPKEY, "NodeRecoveryScanning", ""),
+
+  SDESC(TC_NOT_CONNECTED, "Idle", ""),
+  SDESC(PREPARED_RECEIVED_COMMIT, "Committing", ""),
+  SDESC(LOG_COMMIT_WRITTEN, "Committing", ""),
+
+  { 0, 0, 0, 0 }
+};

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp	2011-10-21 08:59:23 +0000
@@ -85,7 +85,7 @@ NDB_COMMAND(redoLogFileReader,  "redoLog
   {
     MY_STAT buf;
     my_stat(fileName, &buf, MYF(0));
-    NO_MBYTE_IN_FILE = buf.st_size / (1024 * 1024);
+    NO_MBYTE_IN_FILE = (unsigned)(buf.st_size / (1024 * 1024));
     if (NO_MBYTE_IN_FILE != 16)
     {
       ndbout_c("Detected %umb files", NO_MBYTE_IN_FILE);

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-29 11:43:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-23 08:34:49 +0000
@@ -3601,6 +3601,7 @@ Dbspj::computeHash(Signal* signal,
     (MAX_KEY_SIZE_IN_WORDS + 1) / 2;
   Uint64 tmp64[MAX_KEY_SIZE_IN_LONG_WORDS];
   Uint32 *tmp32 = (Uint32*)tmp64;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -3639,6 +3640,7 @@ Dbspj::computePartitionHash(Signal* sign
   Uint64 *tmp64 = _space;
   Uint32 *tmp32 = (Uint32*)tmp64;
   Uint32 sz = ptr.sz;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -3681,7 +3683,7 @@ Dbspj::getNodes(Signal* signal, BuildKey
   req->tableId = tableId;
   req->hashValue = dst.hashInfo[1];
   req->distr_key_indicator = 0; // userDefinedPartitioning not supported!
-  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
+  req->jamBufferPtr = jamBuffer();
 
 #if 1
   EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
@@ -4456,6 +4458,12 @@ Dbspj::parseScanIndex(Build_context& ctx
     data.m_firstExecution = true;
     data.m_batch_chunks = 0;
 
+    /**
+     * We will need to look at the parameters again if the scan is pruned and the prune
+     * key uses parameter values. Therefore, we keep a reference to the start of the
+     * parameter buffer.
+     */
+    DABuffer origParam = param;
     err = parseDA(ctx, requestPtr, treeNodePtr,
                   tree, treeBits, param, paramBits);
     if (unlikely(err != 0))
@@ -4482,7 +4490,7 @@ Dbspj::parseScanIndex(Build_context& ctx
         /**
          * Expand pattern into a new pattern (with linked values)
          */
-        err = expand(pattern, treeNodePtr, tree, len, param, cnt);
+        err = expand(pattern, treeNodePtr, tree, len, origParam, cnt);
         if (unlikely(err != 0))
           break;
 
@@ -4501,7 +4509,7 @@ Dbspj::parseScanIndex(Build_context& ctx
          */
         Uint32 prunePtrI = RNIL;
         bool hasNull;
-        err = expand(prunePtrI, tree, len, param, cnt, hasNull);
+        err = expand(prunePtrI, tree, len, origParam, cnt, hasNull);
         if (unlikely(err != 0))
           break;
 
@@ -5078,7 +5086,8 @@ Dbspj::scanIndex_parent_batch_complete(S
       parallelism = (data.m_fragCount - data.m_frags_complete) / roundTrips;
     }
 
-    ndbassert(parallelism <= data.m_fragCount - data.m_frags_complete);
+    ndbassert(parallelism >= 1);
+    ndbassert((Uint32)parallelism + data.m_frags_complete <= data.m_fragCount);
     data.m_parallelism = static_cast<Uint32>(parallelism);
 
 #ifdef DEBUG_SCAN_FRAGREQ
@@ -6189,6 +6198,7 @@ Uint32
 Dbspj::appendToPattern(Local_pattern_store & pattern,
                        DABuffer & tree, Uint32 len)
 {
+  jam();
   if (unlikely(tree.ptr + len > tree.end))
     return DbspjErr::InvalidTreeNodeSpecification;
 
@@ -6203,6 +6213,7 @@ Uint32
 Dbspj::appendParamToPattern(Local_pattern_store& dst,
                             const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6218,6 +6229,7 @@ Uint32
 Dbspj::appendParamHeadToPattern(Local_pattern_store& dst,
                                 const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6235,6 +6247,7 @@ Dbspj::appendTreeToSection(Uint32 & ptrI
   /**
    * TODO handle errors
    */
+  jam();
   Uint32 SZ = 16;
   Uint32 tmp[16];
   while (len > SZ)
@@ -6293,6 +6306,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Section & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6316,6 +6330,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Linear & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6335,6 +6350,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Linear & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6353,6 +6369,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Section & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6378,6 +6395,7 @@ Dbspj::appendAttrinfoToSection(Uint32 &
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Section & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6400,6 +6418,7 @@ Dbspj::appendPkColToSection(Uint32 & dst
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   Uint32 offset = row.m_header->m_offset[col];
   Uint32 tmp = row.m_data[offset];
   Uint32 len = AttributeHeader::getDataSize(tmp);
@@ -6413,6 +6432,7 @@ Dbspj::appendFromParent(Uint32 & dst, Lo
                         Uint32 levels, const RowPtr & rowptr,
                         bool& hasNull)
 {
+  jam();
   Ptr<TreeNode> treeNodePtr;
   m_treenode_pool.getPtr(treeNodePtr, rowptr.m_src_node_ptrI);
   Uint32 corrVal = rowptr.m_src_correlation;
@@ -6527,6 +6547,7 @@ Dbspj::appendDataToSection(Uint32 & ptrI
                            Local_pattern_store::ConstDataBufferIterator& it,
                            Uint32 len, bool& hasNull)
 {
+  jam();
   if (unlikely(len==0))
   {
     jam();
@@ -6732,6 +6753,7 @@ Uint32
 Dbspj::expand(Uint32 & ptrI, DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle error
    */
@@ -6816,6 +6838,7 @@ Dbspj::expand(Local_pattern_store& dst,
               DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt)
 {
+  jam();
   /**
    * TODO handle error
    */

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-10-07 14:34:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-10-28 09:56:57 +0000
@@ -18,6 +18,7 @@
 #ifndef DBTC_H
 #define DBTC_H
 
+#ifndef DBTC_STATE_EXTRACT
 #include <ndb_limits.h>
 #include <pc.hpp>
 #include <SimulatedBlock.hpp>
@@ -37,6 +38,7 @@
 #include <signaldata/EventReport.hpp>
 #include <trigger_definitions.h>
 #include <SignalCounter.hpp>
+#endif
 
 #ifdef DBTC_C
 /*
@@ -143,14 +145,20 @@
 #define ZTRANS_TOO_BIG 261
 #endif
 
-class Dbtc: public SimulatedBlock {
+class Dbtc
+#ifndef DBTC_STATE_EXTRACT
+  : public SimulatedBlock
+#endif
+{
 public:
 
+#ifndef DBTC_STATE_EXTRACT
   /**
    * Incase of mt-TC...only one instance will perform actual take-over
    *   let this be TAKE_OVER_INSTANCE
    */
   STATIC_CONST( TAKE_OVER_INSTANCE = 1 );
+#endif
 
   enum ConnectionState {
     CS_CONNECTED = 0,
@@ -188,6 +196,7 @@ public:
     CS_WAIT_FIRE_TRIG_REQ = 27
   };
 
+#ifndef DBTC_STATE_EXTRACT
   enum OperationState {
     OS_CONNECTED = 1,
     OS_OPERATING = 2,
@@ -1986,10 +1995,7 @@ private:
 
   bool validate_filter(Signal*);
   bool match_and_print(Signal*, ApiConnectRecordPtr);
-  void ndbinfo_write_trans(Signal* signal,
-                           DbinfoScanReq * req,
-                           Ndbinfo::Ratelimit * rl,
-                           ApiConnectRecordPtr transPtr);
+  bool ndbinfo_write_trans(Ndbinfo::Row&, ApiConnectRecordPtr);
 
 #ifdef ERROR_INSERT
   bool testFragmentDrop(Signal* signal);
@@ -2106,6 +2112,7 @@ private:
 #endif
   Uint32 m_deferred_enabled;
   Uint32 m_max_writes_per_trans;
+#endif
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-07 14:34:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-23 08:34:49 +0000
@@ -3226,7 +3226,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal
   req->tableId = Ttableref;
   req->hashValue = TdistrHashValue;
   req->distr_key_indicator = regCachePtr->distributionKeyIndicator;
-  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
+  req->jamBufferPtr = jamBuffer();
 
   /*-------------------------------------------------------------*/
   /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
@@ -10926,7 +10926,7 @@ void Dbtc::execDIH_SCAN_TAB_CONF(Signal*
     req->tableId = tabPtr.i;
     req->hashValue = cachePtr.p->distributionKey;
     req->distr_key_indicator = tabPtr.p->get_user_defined_partitioning();
-    * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
+    req->jamBufferPtr = jamBuffer();
     EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
                    DiGetNodesReq::SignalLength, 0);
     UintR TerrorIndicator = signal->theData[0];
@@ -13295,7 +13295,12 @@ void Dbtc::execDBINFO_SCANREQ(Signal *si
     for (Uint32 i = 0; i < maxloop; i++)
     {
       ptrCheckGuard(ptr, capiConnectFilesize, apiConnectRecord);
-      ndbinfo_write_trans(signal, &req, &rl, ptr);
+      Ndbinfo::Row row(signal, req);
+      if (ndbinfo_write_trans(row, ptr))
+      {
+        jam();
+        ndbinfo_send_row(signal, req, row, rl);
+      }
 
       ptr.i ++;
       if (ptr.i == capiConnectFilesize)
@@ -13318,11 +13323,8 @@ done:
   ndbinfo_send_scan_conf(signal, req, rl);
 }
 
-void
-Dbtc::ndbinfo_write_trans(Signal* signal,
-                          DbinfoScanReq * req,
-                          Ndbinfo::Ratelimit * rl,
-                          ApiConnectRecordPtr transPtr)
+bool
+Dbtc::ndbinfo_write_trans(Ndbinfo::Row & row, ApiConnectRecordPtr transPtr)
 {
   Uint32 conState = transPtr.p->apiConnectstate;
 
@@ -13338,21 +13340,15 @@ Dbtc::ndbinfo_write_trans(Signal* signal
       conState == CS_DISCONNECTED ||
       conState == CS_RESTART)
   {
-    return;
+    return false;
   }
 
-  char transid[64];
-  BaseString::snprintf(transid, sizeof(transid),
-                       "%.8x.%.8x",
-                       transPtr.p->transid[0],
-                       transPtr.p->transid[1]);
-
-  Ndbinfo::Row row(signal, *req);
   row.write_uint32(getOwnNodeId());
   row.write_uint32(instance());   // block instance
   row.write_uint32(transPtr.i);
   row.write_uint32(transPtr.p->ndbapiBlockref);
-  row.write_string(transid);
+  row.write_uint32(transPtr.p->transid[0]);
+  row.write_uint32(transPtr.p->transid[1]);
   row.write_uint32(conState);
   row.write_uint32(transPtr.p->m_flags);
   row.write_uint32(transPtr.p->lqhkeyreqrec);
@@ -13404,7 +13400,7 @@ Dbtc::ndbinfo_write_trans(Signal* signal
 
   Uint32 apiTimer = getApiConTimer(transPtr.i);
   row.write_uint32(apiTimer ? (ctcTimer - apiTimer) / 100 : 0);
-  ndbinfo_send_row(signal, *req, row, *rl);
+  return true;
 }
 
 bool

=== added file 'storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp	2011-10-28 09:56:57 +0000
@@ -0,0 +1,59 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <kernel/statedesc.hpp>
+#define DBTC_STATE_EXTRACT
+#include "Dbtc.hpp"
+
+#define SDESC(a,b,c) { (unsigned)Dbtc::a, #a, b, c }
+
+/**
+ * Value
+ * Friendly name
+ * Description
+ */
+struct ndbkernel_state_desc g_dbtc_apiconnect_state_desc[] =
+{
+  SDESC(CS_CONNECTED, "Connected",
+        "An allocated idle transaction object"),
+  SDESC(CS_DISCONNECTED, "Disconnected",
+        "An unallocated connection object"),
+  SDESC(CS_STARTED, "Started", "A started transaction"),
+  SDESC(CS_RECEIVING, "Receiving", "A transaction receiving operations"),
+  SDESC(CS_RESTART, "", ""),
+  SDESC(CS_ABORTING, "Aborting", "A transaction aborting"),
+  SDESC(CS_COMPLETING, "Completing", "A transaction completing"),
+  SDESC(CS_COMPLETE_SENT, "Completing", "A transaction completing"),
+  SDESC(CS_PREPARE_TO_COMMIT, "", ""),
+  SDESC(CS_COMMIT_SENT, "Committing", "A transaction committing"),
+  SDESC(CS_START_COMMITTING, "", ""),
+  SDESC(CS_COMMITTING, "Committing", "A transaction committing"),
+  SDESC(CS_REC_COMMITTING, "", ""),
+  SDESC(CS_WAIT_ABORT_CONF, "Aborting", ""),
+  SDESC(CS_WAIT_COMPLETE_CONF, "Completing", ""),
+  SDESC(CS_WAIT_COMMIT_CONF, "Committing", ""),
+  SDESC(CS_FAIL_ABORTING, "TakeOverAborting", ""),
+  SDESC(CS_FAIL_ABORTED, "TakeOverAborting", ""),
+  SDESC(CS_FAIL_PREPARED, "", ""),
+  SDESC(CS_FAIL_COMMITTING, "TakeOverCommitting", ""),
+  SDESC(CS_FAIL_COMMITTED, "TakeOverCommitting", ""),
+  SDESC(CS_FAIL_COMPLETED, "TakeOverCompleting", ""),
+  SDESC(CS_START_SCAN, "Scanning", ""),
+  SDESC(CS_SEND_FIRE_TRIG_REQ, "Precomitting", ""),
+  SDESC(CS_WAIT_FIRE_TRIG_REQ, "Precomitting", ""),
+  { 0, 0, 0, 0 }
+};

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-10-13 09:02:21 +0000
@@ -842,6 +842,8 @@ public:
   static Uint32 mt_buildIndexFragment_wrapper(void*);
 private:
   Uint32 mt_buildIndexFragment(struct mt_BuildIndxCtx*);
+
+  Signal* c_signal_bug32040;
 };
 
 // Dbtux::TupLoc

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp	2011-10-13 09:02:21 +0000
@@ -90,6 +90,8 @@ Dbtux::Dbtux(Block_context& ctx, Uint32
   addRecSignal(GSN_NODE_STATE_REP, &Dbtux::execNODE_STATE_REP, true);
 
   addRecSignal(GSN_DROP_FRAG_REQ, &Dbtux::execDROP_FRAG_REQ);
+
+  c_signal_bug32040 = 0;
 }
 
 Dbtux::~Dbtux()
@@ -152,6 +154,7 @@ Dbtux::execSTTOR(Signal* signal)
     CLEAR_ERROR_INSERT_VALUE;
     c_tup = (Dbtup*)globalData.getBlock(DBTUP, instance());
     ndbrequire(c_tup != 0);
+    c_signal_bug32040 = signal;
     break;
   case 3:
     jam();

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp	2011-10-13 09:02:21 +0000
@@ -911,7 +911,11 @@ Dbtux::scanNext(ScanOpPtr scanPtr, bool
   }
 #endif
   // cannot be moved away from tuple we have locked
+#if defined VM_TRACE || defined ERROR_INSERT
   ndbrequire(scan.m_state != ScanOp::Locked);
+#else
+  ndbrequire(fromMaintReq || scan.m_state != ScanOp::Locked);
+#endif
   // scan direction
   const unsigned idir = scan.m_descending; // 0, 1
   const int jdir = 1 - 2 * (int)idir;      // 1, -1
@@ -921,6 +925,24 @@ Dbtux::scanNext(ScanOpPtr scanPtr, bool
   NodeHandle origNode(frag);
   selectNode(origNode, pos.m_loc);
   ndbrequire(islinkScan(origNode, scanPtr));
+  if (unlikely(scan.m_state == ScanOp::Locked)) {
+    // bug#32040 - no fix, just unlock and continue
+    jam();
+    if (scan.m_accLockOp != RNIL) {
+      jam();
+      Signal* signal = c_signal_bug32040;
+      AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+      lockReq->returnCode = RNIL;
+      lockReq->requestInfo = AccLockReq::Abort;
+      lockReq->accOpPtr = scan.m_accLockOp;
+      EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+      jamEntry();
+      ndbrequire(lockReq->returnCode == AccLockReq::Success);
+      scan.m_accLockOp = RNIL;
+      scan.m_lockwait = false;
+    }
+    scan.m_state = ScanOp::Next;
+  }
   // current node in loop
   NodeHandle node = origNode;
   // copy of entry found

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-10-17 13:37:47 +0000
@@ -149,8 +149,9 @@ public:
   // schema trans
   Uint32 c_schemaTransId;
   Uint32 c_schemaTransKey;
-  Uint32 c_hashMapId;
-  Uint32 c_hashMapVersion;
+  // intersignal transient store of: hash_map, logfilegroup, tablesspace
+  Uint32 c_objectId; 
+  Uint32 c_objectVersion;;
 
 public:
   Ndbcntr(Block_context&);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-10-17 13:37:47 +0000
@@ -2204,8 +2204,8 @@ Ndbcntr::execCREATE_HASH_MAP_CONF(Signal
   if (conf->senderData == 0)
   {
     jam();
-    c_hashMapId = conf->objectId;
-    c_hashMapVersion = conf->objectVersion;
+    c_objectId = conf->objectId;
+    c_objectVersion = conf->objectVersion;
   }
 
   createSystableLab(signal, 0);
@@ -2274,8 +2274,8 @@ Ndbcntr::createDDObjects(Signal * signal
     {
       jam();
       fg.TS_ExtentSize = Uint32(entry->size);
-      fg.TS_LogfileGroupId = RNIL;
-      fg.TS_LogfileGroupVersion = RNIL;
+      fg.TS_LogfileGroupId = c_objectId;
+      fg.TS_LogfileGroupVersion = c_objectVersion;
     }
 
     SimpleProperties::UnpackStatus s;
@@ -2310,8 +2310,8 @@ Ndbcntr::createDDObjects(Signal * signal
     DictFilegroupInfo::File f; f.init();
     BaseString::snprintf(f.FileName, sizeof(f.FileName), "%s", entry->name);
     f.FileType = entry->type;
-    f.FilegroupId = RNIL;
-    f.FilegroupVersion = RNIL;
+    f.FilegroupId = c_objectId;
+    f.FilegroupVersion = c_objectVersion;
     f.FileSizeHi = Uint32(entry->size >> 32);
     f.FileSizeLo = Uint32(entry->size);
 
@@ -2371,6 +2371,8 @@ Ndbcntr::execCREATE_FILEGROUP_CONF(Signa
 {
   jamEntry();
   CreateFilegroupConf* conf = (CreateFilegroupConf*)signal->getDataPtr();
+  c_objectId = conf->filegroupId;
+  c_objectVersion = conf->filegroupVersion;
   createDDObjects(signal, conf->senderData + 1);
 }
 
@@ -2433,8 +2435,8 @@ void Ndbcntr::createSystableLab(Signal*
   //w.add(DictTabInfo::KeyLength, 1);
   w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType);
   w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE);
-  w.add(DictTabInfo::HashMapObjectId, c_hashMapId);
-  w.add(DictTabInfo::HashMapVersion, c_hashMapVersion);
+  w.add(DictTabInfo::HashMapObjectId, c_objectId);
+  w.add(DictTabInfo::HashMapVersion, c_objectVersion);
 
   for (unsigned i = 0; i < table.columnCount; i++) {
     const SysColumn& column = table.columnList[i];

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-10-20 16:18:28 +0000
@@ -110,7 +110,7 @@ AsyncFile::writeReq(Request * request)
     bool write_not_complete = true;
 
     while(write_not_complete) {
-      int totsize = 0;
+      size_t totsize = 0;
       off_t offset = request->par.readWrite.pages[page_num].offset;
       char* bufptr = theWriteBuffer;
 
@@ -128,7 +128,7 @@ AsyncFile::writeReq(Request * request)
           if (((i + 1) < request->par.readWrite.numberOfPages)) {
             // There are more pages to write
             // Check that offsets are consequtive
-            off_t tmp = page_offset + request->par.readWrite.pages[i].size;
+            off_t tmp=(off_t)(page_offset+request->par.readWrite.pages[i].size);
             if (tmp != request->par.readWrite.pages[i+1].offset) {
               // Next page is not aligned with previous, not allowed
               DEBUG(ndbout_c("Page offsets are not aligned"));
@@ -143,7 +143,7 @@ AsyncFile::writeReq(Request * request)
               break;
             }
           }
-          page_offset += request->par.readWrite.pages[i].size;
+          page_offset += (off_t)request->par.readWrite.pages[i].size;
         }
         bufptr = theWriteBuffer;
       } else {

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2011-10-20 13:01:37 +0000
@@ -197,7 +197,7 @@ Filename::set(Ndbfs* fs,
   strcat(theName, fileExtension[type]);
   
   if(dir == true){
-    for(int l = strlen(theName) - 1; l >= 0; l--){
+    for(int l = (int)strlen(theName) - 1; l >= 0; l--){
       if(theName[l] == DIR_SEPARATOR[0]){
 	theName[l] = 0;
 	break;

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp	2011-10-20 18:36:21 +0000
@@ -217,7 +217,7 @@ Win32AsyncFile::readBuffer(Request* req,
     DWORD dwBytesRead;
     BOOL bRead = ReadFile(hFile,
                           buf,
-                          size,
+                          (DWORD)size,
                           &dwBytesRead,
                           &ov);
     if(!bRead){
@@ -248,7 +248,7 @@ Win32AsyncFile::readBuffer(Request* req,
 
     buf += bytes_read;
     size -= bytes_read;
-    offset += bytes_read;
+    offset += (off_t)bytes_read;
   }
   return 0;
 }
@@ -277,7 +277,7 @@ Win32AsyncFile::writeBuffer(const char *
     size_t bytes_written = 0;
 
     DWORD dwWritten;
-    BOOL bWrite = WriteFile(hFile, buf, bytes_to_write, &dwWritten, &ov);
+    BOOL bWrite = WriteFile(hFile, buf, (DWORD)bytes_to_write, &dwWritten, &ov);
     if(!bWrite) {
       return GetLastError();
     }
@@ -288,7 +288,7 @@ Win32AsyncFile::writeBuffer(const char *
 
     buf += bytes_written;
     size -= bytes_written;
-    offset += bytes_written;
+    offset += (off_t)bytes_written;
   }
   return 0;
 }
@@ -393,7 +393,7 @@ loop:
   do {
     if (0 != strcmp(".", ffd.cFileName) && 0 != strcmp("..", ffd.cFileName))
     {
-      int len = strlen(path);
+      int len = (int)strlen(path);
       strcat(path, ffd.cFileName);
       if(DeleteFile(path) || RemoveDirectory(path)) 
       {

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-09-14 11:32:24 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-10-20 12:35:16 +0000
@@ -470,31 +470,34 @@ void Qmgr::setCCDelay(UintR aCCDelay)
 void Qmgr::execCONNECT_REP(Signal* signal)
 {
   jamEntry();
-  const Uint32 nodeId = signal->theData[0];
+  const Uint32 connectedNodeId = signal->theData[0];
 
   if (ERROR_INSERTED(931))
   {
     jam();
-    ndbout_c("Discarding CONNECT_REP(%d)", nodeId);
-    infoEvent("Discarding CONNECT_REP(%d)", nodeId);
+    ndbout_c("Discarding CONNECT_REP(%d)", connectedNodeId);
+    infoEvent("Discarding CONNECT_REP(%d)", connectedNodeId);
     return;
   }
 
-  c_connectedNodes.set(nodeId);
+  c_connectedNodes.set(connectedNodeId);
 
-  NodeRecPtr nodePtr;
-  nodePtr.i = nodeId;
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  nodePtr.p->m_secret = 0;
+  {
+    NodeRecPtr connectedNodePtr;
+    connectedNodePtr.i = connectedNodeId;
+    ptrCheckGuard(connectedNodePtr, MAX_NODES, nodeRec);
+    connectedNodePtr.p->m_secret = 0;
+  }
 
-  nodePtr.i = getOwnNodeId();
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  NodeInfo nodeInfo = getNodeInfo(nodeId);
-  switch(nodePtr.p->phase){
+  NodeRecPtr myNodePtr;
+  myNodePtr.i = getOwnNodeId();
+  ptrCheckGuard(myNodePtr, MAX_NODES, nodeRec);
+  NodeInfo connectedNodeInfo = getNodeInfo(connectedNodeId);
+  switch(myNodePtr.p->phase){
   case ZRUNNING:
-    if (nodeInfo.getType() == NodeInfo::DB)
+    if (connectedNodeInfo.getType() == NodeInfo::DB)
     {
-      ndbrequire(!c_clusterNodes.get(nodeId));
+      ndbrequire(!c_clusterNodes.get(connectedNodeId));
     }
   case ZSTARTING:
     jam();
@@ -504,16 +507,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     return;
   case ZAPI_ACTIVE:
+    ndbrequire(false);
   case ZAPI_INACTIVE:
-    return;
+    ndbrequire(false);
   case ZINIT:
-    ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM);
+    ndbrequire(getNodeInfo(connectedNodeId).m_type == NodeInfo::MGM);
     break;
   default:
     ndbrequire(false);
   }
 
-  if (nodeInfo.getType() != NodeInfo::DB)
+  if (connectedNodeInfo.getType() != NodeInfo::DB)
   {
     jam();
     return;
@@ -522,24 +526,24 @@ void Qmgr::execCONNECT_REP(Signal* signa
   switch(c_start.m_gsn){
   case GSN_CM_REGREQ:
     jam();
-    sendCmRegReq(signal, nodeId);
+    sendCmRegReq(signal, connectedNodeId);
 
     /**
      * We're waiting for CM_REGCONF c_start.m_nodes contains all configured
      *   nodes
      */
-    ndbrequire(nodePtr.p->phase == ZSTARTING);
-    ndbrequire(c_start.m_nodes.isWaitingFor(nodeId));
+    ndbrequire(myNodePtr.p->phase == ZSTARTING);
+    ndbrequire(c_start.m_nodes.isWaitingFor(connectedNodeId));
     return;
   case GSN_CM_NODEINFOREQ:
     jam();
     
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
       ndbrequire(getOwnNodeId() != cpresident);
-      ndbrequire(nodePtr.p->phase == ZSTARTING);
-      sendCmNodeInfoReq(signal, nodeId, nodePtr.p);
+      ndbrequire(myNodePtr.p->phase == ZSTARTING);
+      sendCmNodeInfoReq(signal, connectedNodeId, myNodePtr.p);
       return;
     }
     return;
@@ -547,17 +551,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     
     ndbrequire(getOwnNodeId() != cpresident);
-    ndbrequire(nodePtr.p->phase == ZRUNNING);
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    ndbrequire(myNodePtr.p->phase == ZRUNNING);
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
-      c_start.m_nodes.clearWaitingFor(nodeId);
+      c_start.m_nodes.clearWaitingFor(connectedNodeId);
       c_start.m_gsn = RNIL;
       
       NodeRecPtr addNodePtr;
-      addNodePtr.i = nodeId;
+      addNodePtr.i = connectedNodeId;
       ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
-      cmAddPrepare(signal, addNodePtr, nodePtr.p);
+      cmAddPrepare(signal, addNodePtr, myNodePtr.p);
       return;
     }
   }
@@ -565,11 +569,11 @@ void Qmgr::execCONNECT_REP(Signal* signa
     (void)1;
   }
   
-  ndbrequire(!c_start.m_nodes.isWaitingFor(nodeId));
-  ndbrequire(!c_readnodes_nodes.get(nodeId));
-  c_readnodes_nodes.set(nodeId);
+  ndbrequire(!c_start.m_nodes.isWaitingFor(connectedNodeId));
+  ndbrequire(!c_readnodes_nodes.get(connectedNodeId));
+  c_readnodes_nodes.set(connectedNodeId);
   signal->theData[0] = reference();
-  sendSignal(calcQmgrBlockRef(nodeId), GSN_READ_NODESREQ, signal, 1, JBA);
+  sendSignal(calcQmgrBlockRef(connectedNodeId), GSN_READ_NODESREQ, signal, 1, JBA);
   return;
 }//Qmgr::execCONNECT_REP()
 
@@ -4788,7 +4792,9 @@ void Qmgr::failReport(Signal* signal,
     if (ERROR_INSERTED(938))
     {
       nodeFailCount++;
-      ndbout_c("QMGR : execFAIL_REP : %u nodes have failed", nodeFailCount);
+      ndbout_c("QMGR : execFAIL_REP(Failed : %u Source : %u  Cause : %u) : "
+               "%u nodes have failed", 
+               aFailedNode, sourceNode, aFailCause, nodeFailCount);
       /* Count DB nodes */
       Uint32 nodeCount = 0;
       for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
@@ -6877,6 +6883,12 @@ Qmgr::execNODE_PINGCONF(Signal* signal)
     return;
   }
 
+  if (ERROR_INSERTED(938))
+  {
+    ndbout_c("QMGR : execNODE_PING_CONF() from %u in tick %u",
+             sendersNodeId, m_connectivity_check.m_tick);
+  }
+
   /* Node must have been pinged, we must be waiting for the response,
    * or the node must have already failed
    */

=== modified file 'storage/ndb/src/kernel/error/ErrorReporter.cpp'
--- a/storage/ndb/src/kernel/error/ErrorReporter.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/error/ErrorReporter.cpp	2011-10-21 08:59:23 +0000
@@ -163,7 +163,7 @@ ErrorReporter::formatMessage(int thr_no,
   {
     for (Uint32 i = 1 ; i < num_threads; i++)
     {
-      sofar = strlen(messptr);
+      sofar = (int)strlen(messptr);
       if(sofar < MESSAGE_LENGTH)
       {
 	BaseString::snprintf(messptr + sofar, MESSAGE_LENGTH - sofar,
@@ -172,7 +172,7 @@ ErrorReporter::formatMessage(int thr_no,
     }
   }
 
-  sofar = strlen(messptr);
+  sofar = (int)strlen(messptr);
   if(sofar < MESSAGE_LENGTH)
   {
     BaseString::snprintf(messptr + sofar, MESSAGE_LENGTH - sofar,

=== modified file 'storage/ndb/src/kernel/error/ndbd_exit_codes.c'
--- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c	2011-10-20 13:01:37 +0000
@@ -267,7 +267,7 @@ const char *ndbd_exit_status_message(ndb
 
 int ndbd_exit_string(int err_no, char *str, unsigned int size)
 {
-  unsigned int len;
+  size_t len;
 
   ndbd_exit_classification cl;
   ndbd_exit_status st;
@@ -279,8 +279,8 @@ int ndbd_exit_string(int err_no, char *s
 
     len = my_snprintf(str, size-1, "%s: %s: %s", msg, st_msg, cl_msg);
     str[size-1]= '\0';
-  
-    return len;
+
+    return (int)len;
   }
   return -1;
 }

=== modified file 'storage/ndb/src/kernel/vm/DLHashTable.hpp'
--- a/storage/ndb/src/kernel/vm/DLHashTable.hpp	2011-10-07 11:46:40 +0000
+++ b/storage/ndb/src/kernel/vm/DLHashTable.hpp	2011-10-13 09:25:13 +0000
@@ -27,11 +27,16 @@
  *   (with a double linked list)
  *
  * The entries in the hashtable must have the following methods:
- *  -# bool equal(const class T &) const;
+ *  -# bool U::equal(const class U &) const;
  *     Which should return equal if the to objects have the same key
- *  -# Uint32 hashValue() const;
+ *  -# Uint32 U::hashValue() const;
  *     Which should return a 32 bit hashvalue
+ *
+ * and the following members:
+ *  -# Uint32 U::nextHash;
+ *  -# Uint32 U::prevHash;
  */
+
 template <typename P, typename T, typename U = T>
 class DLHashTableImpl 
 {
@@ -211,7 +216,7 @@ inline
 void
 DLHashTableImpl<P, T, U>::add(Ptr<T> & obj)
 {
-  const Uint32 hv = obj.p->hashValue() & mask;
+  const Uint32 hv = obj.p->U::hashValue() & mask;
   const Uint32 i  = hashValues[hv];
   
   if(i == RNIL)
@@ -288,7 +293,7 @@ inline
 void
 DLHashTableImpl<P, T, U>::remove(Ptr<T> & ptr, const T & key)
 {
-  const Uint32 hv = key.hashValue() & mask;  
+  const Uint32 hv = key.U::hashValue() & mask;  
   
   Uint32 i;
   T * p;
@@ -300,7 +305,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T>
   while(i != RNIL)
   {
     p = thePool.getPtr(i);
-    if(key.equal(* p))
+    if(key.U::equal(* p))
     {
       const Uint32 next = p->U::nextHash;
       if(prev.i == RNIL)
@@ -366,7 +371,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T>
   } 
   else 
   {
-    const Uint32 hv = ptr.p->hashValue() & mask;  
+    const Uint32 hv = ptr.p->U::hashValue() & mask;  
     if (hashValues[hv] == ptr.i)
     {
       hashValues[hv] = next;
@@ -400,7 +405,7 @@ DLHashTableImpl<P, T, U>::release(Ptr<T>
   } 
   else 
   {
-    const Uint32 hv = ptr.p->hashValue() & mask;  
+    const Uint32 hv = ptr.p->U::hashValue() & mask;  
     if (hashValues[hv] == ptr.i)
     {
       hashValues[hv] = next;
@@ -493,7 +498,7 @@ inline
 bool
 DLHashTableImpl<P, T, U>::find(Ptr<T> & ptr, const T & key) const 
 {
-  const Uint32 hv = key.hashValue() & mask;  
+  const Uint32 hv = key.U::hashValue() & mask;  
   
   Uint32 i;
   T * p;
@@ -502,7 +507,7 @@ DLHashTableImpl<P, T, U>::find(Ptr<T> &
   while(i != RNIL)
   {
     p = thePool.getPtr(i);
-    if(key.equal(* p))
+    if(key.U::equal(* p))
     {
       ptr.i = i;
       ptr.p = p;

=== modified file 'storage/ndb/src/kernel/vm/NdbinfoTables.cpp'
--- a/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-11 08:11:15 +0000
+++ b/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-17 13:32:49 +0000
@@ -172,68 +172,70 @@ DECLARE_NDBINFO_TABLE(DISKPAGEBUFFER, 9)
 DECLARE_NDBINFO_TABLE(THREADBLOCKS, 4) =
 { { "threadblocks", 4, 0, "which blocks are run in which threads" },
   {
-    {"node_id",                     Ndbinfo::Number, ""},
-    {"thr_no",                      Ndbinfo::Number, ""},
-    {"block_number",                Ndbinfo::Number, ""},
-    {"block_instance",              Ndbinfo::Number, ""},
+    {"node_id",                     Ndbinfo::Number, "node id"},
+    {"thr_no",                      Ndbinfo::Number, "thread number"},
+    {"block_number",                Ndbinfo::Number, "block number"},
+    {"block_instance",              Ndbinfo::Number, "block instance"},
   }
 };
 
 DECLARE_NDBINFO_TABLE(THREADSTAT, 18) =
-{ { "threadstat", 18, 0, "threadstat" },
+{ { "threadstat", 18, 0, "Statistics on execution threads" },
   {
     //{"0123456701234567"}
-    {"node_id",             Ndbinfo::Number, ""},
-    {"thr_no",              Ndbinfo::Number, ""},
-    {"thr_nm",              Ndbinfo::String, ""},
-    {"c_loop",              Ndbinfo::Number64,""},
-    {"c_exec",              Ndbinfo::Number64,""},
-    {"c_wait",              Ndbinfo::Number64,""},
-    {"c_l_sent_prioa",      Ndbinfo::Number64,""},
-    {"c_l_sent_priob",      Ndbinfo::Number64,""},
-    {"c_r_sent_prioa",      Ndbinfo::Number64,""},
-    {"c_r_sent_priob",      Ndbinfo::Number64,""},
-    {"os_tid",              Ndbinfo::Number64,""},
-    {"os_now",              Ndbinfo::Number64,""},
-    {"os_ru_utime",         Ndbinfo::Number64,""},
-    {"os_ru_stime",         Ndbinfo::Number64,""},
-    {"os_ru_minflt",        Ndbinfo::Number64,""},
-    {"os_ru_majflt",        Ndbinfo::Number64,""},
-    {"os_ru_nvcsw",         Ndbinfo::Number64,""},
-    {"os_ru_nivcsw",        Ndbinfo::Number64,""}
+    {"node_id",             Ndbinfo::Number, "node id"},
+    {"thr_no",              Ndbinfo::Number, "thread number"},
+    {"thr_nm",              Ndbinfo::String, "thread name"},
+    {"c_loop",              Ndbinfo::Number64,"No of loops in main loop"},
+    {"c_exec",              Ndbinfo::Number64,"No of signals executed"},
+    {"c_wait",              Ndbinfo::Number64,"No of times waited for more input"},
+    {"c_l_sent_prioa",      Ndbinfo::Number64,"No of prio A signals sent to own node"},
+    {"c_l_sent_priob",      Ndbinfo::Number64,"No of prio B signals sent to own node"},
+    {"c_r_sent_prioa",      Ndbinfo::Number64,"No of prio A signals sent to remote node"},
+    {"c_r_sent_priob",      Ndbinfo::Number64,"No of prio B signals sent to remote node"},
+    {"os_tid",              Ndbinfo::Number64,"OS thread id"},
+    {"os_now",              Ndbinfo::Number64,"OS gettimeofday (millis)"},
+    {"os_ru_utime",         Ndbinfo::Number64,"OS user CPU time (micros)"},
+    {"os_ru_stime",         Ndbinfo::Number64,"OS system CPU time (micros)"},
+    {"os_ru_minflt",        Ndbinfo::Number64,"OS page reclaims (soft page faults"},
+    {"os_ru_majflt",        Ndbinfo::Number64,"OS page faults (hard page faults)"},
+    {"os_ru_nvcsw",         Ndbinfo::Number64,"OS voluntary context switches"},
+    {"os_ru_nivcsw",        Ndbinfo::Number64,"OS involuntary context switches"}
   }
 };
 
-DECLARE_NDBINFO_TABLE(TRANSACTIONS, 10) =
-{ { "transactions", 10, 0, "transactions" },
+DECLARE_NDBINFO_TABLE(TRANSACTIONS, 11) =
+{ { "transactions", 11, 0, "transactions" },
   {
-    {"node_id",             Ndbinfo::Number, ""},
-    {"block_instance",      Ndbinfo::Number, ""},
-    {"objid",               Ndbinfo::Number, ""},
-    {"apiref",              Ndbinfo::Number, ""},
-    {"transid",             Ndbinfo::String, ""},
-    {"state",               Ndbinfo::Number, ""},
-    {"flags",               Ndbinfo::Number, ""},
-    {"c_ops",               Ndbinfo::Number, "No of operations" },
-    {"outstanding",         Ndbinfo::Number, "Outstanding request" },
-    {"timer",               Ndbinfo::Number, "(in seconds)"},
+    {"node_id",             Ndbinfo::Number, "node id"},
+    {"block_instance",      Ndbinfo::Number, "TC instance no"},
+    {"objid",               Ndbinfo::Number, "Object id of transaction object"},
+    {"apiref",              Ndbinfo::Number, "API reference"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
+    {"state",               Ndbinfo::Number, "Transaction state"},
+    {"flags",               Ndbinfo::Number, "Transaction flags"},
+    {"c_ops",               Ndbinfo::Number, "No of operations in transaction" },
+    {"outstanding",         Ndbinfo::Number, "Currently outstanding request" },
+    {"timer",               Ndbinfo::Number, "Timer (seconds)"},
   }
 };
 
-DECLARE_NDBINFO_TABLE(OPERATIONS, 11) =
-{ { "operations", 11, 0, "operations" },
+DECLARE_NDBINFO_TABLE(OPERATIONS, 12) =
+{ { "operations", 12, 0, "operations" },
   {
-    {"node_id",             Ndbinfo::Number, ""},
-    {"block_instance",      Ndbinfo::Number, ""},
-    {"objid",               Ndbinfo::Number, ""},
-    {"tcref",               Ndbinfo::Number, ""},
-    {"apiref",              Ndbinfo::Number, ""},
-    {"transid",             Ndbinfo::String, ""},
-    {"tableid",             Ndbinfo::Number, ""},
-    {"fragmentid",          Ndbinfo::Number, ""},
-    {"op",                  Ndbinfo::Number, ""},
-    {"state",               Ndbinfo::Number, ""},
-    {"flags",               Ndbinfo::Number, ""}
+    {"node_id",             Ndbinfo::Number, "node id"},
+    {"block_instance",      Ndbinfo::Number, "LQH instance no"},
+    {"objid",               Ndbinfo::Number, "Object id of operation object"},
+    {"tcref",               Ndbinfo::Number, "TC reference"},
+    {"apiref",              Ndbinfo::Number, "API reference"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
+    {"tableid",             Ndbinfo::Number, "Table id"},
+    {"fragmentid",          Ndbinfo::Number, "Fragment id"},
+    {"op",                  Ndbinfo::Number, "Operation type"},
+    {"state",               Ndbinfo::Number, "Operation state"},
+    {"flags",               Ndbinfo::Number, "Operation flags"}
   }
 };
 

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-08-27 06:06:02 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-10-20 16:18:28 +0000
@@ -1827,7 +1827,7 @@ SimulatedBlock::infoEvent(const char * m
   BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
   va_end(ap);
   
-  int len = strlen(buf) + 1;
+  size_t len = strlen(buf) + 1;
   if(len > 96){
     len = 96;
     buf[95] = 0;
@@ -1847,7 +1847,7 @@ SimulatedBlock::infoEvent(const char * m
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
   
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,
@@ -1872,7 +1872,7 @@ SimulatedBlock::warningEvent(const char
   BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
   va_end(ap);
   
-  int len = strlen(buf) + 1;
+  size_t len = strlen(buf) + 1;
   if(len > 96){
     len = 96;
     buf[95] = 0;
@@ -1892,7 +1892,7 @@ SimulatedBlock::warningEvent(const char
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
 
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,

=== modified file 'storage/ndb/src/mgmapi/mgmapi.cpp'
--- a/storage/ndb/src/mgmapi/mgmapi.cpp	2011-09-19 11:59:09 +0000
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp	2011-10-24 07:44:52 +0000
@@ -499,7 +499,10 @@ ndb_mgm_call(NdbMgmHandle handle,
   out.println("%s", "");
 
   if (cmd_bulk)
-    out.println(cmd_bulk);
+  {
+    out.write(cmd_bulk, strlen(cmd_bulk));
+    out.write("\n", 1);
+  }
 
   CHECK_TIMEDOUT_RET(handle, in, out, NULL);
 
@@ -2039,7 +2042,7 @@ ndb_mgm_dump_state(NdbMgmHandle handle,
   char buf[256];
   buf[0] = 0;
   for (int i = 0; i < _num_args; i++){
-    unsigned n = strlen(buf);
+    unsigned n = (unsigned)strlen(buf);
     if (n + 20 > sizeof(buf)) {
       SET_ERROR(handle, NDB_MGM_USAGE_ERROR, "arguments too long");
       DBUG_RETURN(-1);
@@ -2562,7 +2565,7 @@ ndb_mgm_get_configuration2(NdbMgmHandle
     size_t start = 0;
     do {
       if((read = read_socket(handle->socket, handle->timeout,
-			     &buf64[start], len-start)) < 1){
+			     &buf64[start], (int)(len-start))) < 1){
 	delete[] buf64;
 	buf64 = 0;
         if(read==0)

=== modified file 'storage/ndb/src/mgmapi/ndb_logevent.cpp'
--- a/storage/ndb/src/mgmapi/ndb_logevent.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp	2011-10-20 13:01:37 +0000
@@ -616,7 +616,7 @@ int ndb_logevent_get_next(const NdbLogEv
     BaseString tmp(val);
     Vector<BaseString> list;
     tmp.split(list);
-    for (size_t j = 0; j<list.size(); j++)
+    for (unsigned j = 0; j<list.size(); j++)
     {
       dst->Data[j] = atoi(list[j].c_str());
     }

=== modified file 'storage/ndb/src/mgmclient/CommandInterpreter.cpp'
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2011-10-21 08:59:23 +0000
@@ -1154,7 +1154,7 @@ CommandInterpreter::execute_impl(const c
     }
     // for mysql client compatability remove trailing ';'
     {
-      unsigned last= strlen(line)-1;
+      unsigned last= (unsigned)(strlen(line)-1);
       if (line[last] == ';')
       {
 	line[last]= 0;
@@ -2431,7 +2431,7 @@ CommandInterpreter::executeDumpState(int
     return -1;
   }
 
-  for (size_t i = 0; i < args.size(); i++)
+  for (unsigned i = 0; i < args.size(); i++)
   {
     const char* arg = args[i].c_str();
 

=== modified file 'storage/ndb/src/mgmsrv/Defragger.hpp'
--- a/storage/ndb/src/mgmsrv/Defragger.hpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/src/mgmsrv/Defragger.hpp	2011-10-20 13:01:37 +0000
@@ -37,7 +37,7 @@ class Defragger {
   Vector<DefragBuffer*> m_buffers;
 
   DefragBuffer* find_buffer(NodeId nodeId, Uint32 fragId){
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (unsigned i = 0; i < m_buffers.size(); i++)
     {
       DefragBuffer* dbuf = m_buffers[i];
       if (dbuf->m_node_id == nodeId &&
@@ -48,7 +48,7 @@ class Defragger {
   }
 
   void erase_buffer(const DefragBuffer* dbuf){
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (unsigned i = 0; i < m_buffers.size(); i++)
     {
       if (m_buffers[i] == dbuf)
       {
@@ -64,7 +64,7 @@ public:
   Defragger() {};
   ~Defragger()
   {
-    for (size_t i = m_buffers.size(); i > 0; --i)
+    for (unsigned i = m_buffers.size(); i > 0; --i)
     {
       delete m_buffers[i-1]; // free the memory of the fragment
     }
@@ -121,7 +121,7 @@ public:
     clear any unassembled signal buffers from node
   */
   void node_failed(NodeId nodeId) {
-    for (size_t i = m_buffers.size(); i > 0; --i)
+    for (unsigned i = m_buffers.size(); i > 0; --i)
     {
       if (m_buffers[i-1]->m_node_id == nodeId)
       {

=== modified file 'storage/ndb/src/mgmsrv/InitConfigFileParser.cpp'
--- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp	2011-10-21 08:59:23 +0000
@@ -186,7 +186,7 @@ InitConfigFileParser::run_config_rules(C
 						      ConfigInfo::m_ConfigRules[i].m_ruleData))
       return 0;
 
-    for(size_t j = 0; j<tmp.size(); j++){
+    for(unsigned j = 0; j<tmp.size(); j++){
       BaseString::snprintf(ctx.fname, sizeof(ctx.fname),
                            "%s", tmp[j].m_sectionType.c_str());
       ctx.type             = InitConfigFileParser::Section;
@@ -478,7 +478,7 @@ bool InitConfigFileParser::convertString
 //****************************************************************************
 static void
 trim(char * str){
-  int len = strlen(str);
+  int len = (int)strlen(str);
   for(len--;
       (str[len] == '\r' || str[len] == '\n' || 
        str[len] == ' ' || str[len] == '\t') && 
@@ -581,7 +581,7 @@ bool
 InitConfigFileParser::storeSection(Context& ctx){
   if(ctx.m_currentSection == NULL)
     return true;
-  for(int i = strlen(ctx.fname) - 1; i>=0; i--){
+  for(int i = (int)strlen(ctx.fname) - 1; i>=0; i--){
     ctx.fname[i] = toupper(ctx.fname[i]);
   }
   BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "%s", ctx.fname);

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-09-27 07:35:34 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-10-21 08:59:23 +0000
@@ -3576,7 +3576,7 @@ MgmtSrvr::alloc_node_id_impl(NodeId& nod
     return false;
 
   // Print list of possible nodes
-  for (size_t i = 0; i < nodes.size(); i++)
+  for (unsigned i = 0; i < nodes.size(); i++)
   {
     const PossibleNode& node = nodes[i];
     g_eventLogger->debug(" [%u]: %u, '%s', %d",

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2011-09-13 09:10:52 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2011-10-24 07:44:52 +0000
@@ -599,15 +599,17 @@ MgmApiSession::getConfig(Parser_t::Conte
   m_output->println("Content-Transfer-Encoding: base64");
   m_output->print("\n");
 
+  unsigned len = (unsigned)strlen(pack64.c_str());
   if(ERROR_INSERTED(3))
   {
     // Return only half the packed config
     BaseString half64 = pack64.substr(0, pack64.length());
-    m_output->println(half64.c_str());
+    m_output->write(half64.c_str(), (unsigned)strlen(half64.c_str()));
+    m_output->write("\n", 1);
     return;
   }
-  m_output->println(pack64.c_str());
-  m_output->print("\n");
+  m_output->write(pack64.c_str(), len);
+  m_output->write("\n\n", 2);
   return;
 }
 
@@ -1370,12 +1372,12 @@ logevent2str(BaseString& str, int eventT
       str.appfmt("%s=%d\n",ndb_logevent_body[i].token, val);
       if(strcmp(ndb_logevent_body[i].token,"error") == 0)
       {
-        int pretty_text_len= strlen(pretty_text);
+        int pretty_text_len= (int)strlen(pretty_text);
         if(pretty_text_size-pretty_text_len-3 > 0)
         {
           BaseString::snprintf(pretty_text+pretty_text_len, 4 , " - ");
           ndb_error_string(val, pretty_text+(pretty_text_len+3),
-                           pretty_text_size-pretty_text_len-3);
+                           (int)(pretty_text_size-pretty_text_len-3));
         }
       }
     } while (ndb_logevent_body[++i].type == eventType);
@@ -1430,9 +1432,20 @@ Ndb_mgmd_event_service::log(int eventTyp
 
       int r;
       if (m_clients[i].m_parsable)
-        r= out.println(str.c_str());
+      {
+        unsigned len = str.length();
+        r= out.write(str.c_str(), len);
+      }
       else
-        r= out.println(pretty_text);
+      {
+        unsigned len = (unsigned)strlen(pretty_text);
+        r= out.write(pretty_text, len);
+      }
+
+      if (! (r < 0))
+      {
+        r = out.write("\n", 1);
+      }
 
       if (r<0)
       {
@@ -1631,7 +1644,7 @@ MgmApiSession::listen_event(Parser<MgmAp
   Vector<BaseString> list;
   param.trim();
   param.split(list, " ,");
-  for(size_t i = 0; i<list.size(); i++){
+  for(unsigned i = 0; i<list.size(); i++){
     Vector<BaseString> spec;
     list[i].trim();
     list[i].split(spec, "=:");
@@ -1852,7 +1865,7 @@ MgmApiSession::list_session(SocketServer
   lister->m_output->println("session.%llu.m_stop: %d",id,s->m_stop);
   if(s->m_ctx)
   {
-    int l= strlen(s->m_ctx->m_tokenBuffer);
+    int l= (int)strlen(s->m_ctx->m_tokenBuffer);
     char *buf= (char*) malloc(2*l+1);
     char *b= buf;
     for(int i=0; i<l;i++)
@@ -1922,7 +1935,7 @@ MgmApiSession::get_session(SocketServer:
   p->l->m_output->println("m_stop: %d",s->m_stop);
   if(s->m_ctx)
   {
-    int l= strlen(s->m_ctx->m_tokenBuffer);
+    int l= (int)strlen(s->m_ctx->m_tokenBuffer);
     p->l->m_output->println("parser_buffer_len: %u",l);
     p->l->m_output->println("parser_status: %d",s->m_ctx->m_status);
   }
@@ -2019,7 +2032,7 @@ void MgmApiSession::setConfig(Parser_t::
       if((r= read_socket(m_socket,
                          SOCKET_TIMEOUT,
                          &buf64[start],
-                         len64-start)) < 1)
+                         (int)(len64-start))) < 1)
       {
         delete[] buf64;
         result.assfmt("read_socket failed, errno: %d", errno);

=== modified file 'storage/ndb/src/ndbapi/Ndb.cpp'
--- a/storage/ndb/src/ndbapi/Ndb.cpp	2011-09-07 17:12:12 +0000
+++ b/storage/ndb/src/ndbapi/Ndb.cpp	2011-10-17 12:43:31 +0000
@@ -2254,13 +2254,31 @@ Ndb::getNdbErrorDetail(const NdbError& e
 void
 Ndb::setCustomData(void* _customDataPtr)
 {
-  theImpl->customDataPtr = _customDataPtr;
+  theImpl->customData = Uint64(_customDataPtr);
 }
 
 void*
 Ndb::getCustomData() const
 {
-  return theImpl->customDataPtr;
+  return (void*)theImpl->customData;
+}
+
+void
+Ndb::setCustomData64(Uint64 _customData)
+{
+  theImpl->customData = _customData;
+}
+
+Uint64
+Ndb::getCustomData64() const
+{
+  return theImpl->customData;
+}
+
+Uint64
+Ndb::getNextTransactionId() const
+{
+  return theFirstTransId;
 }
 
 Uint32

=== modified file 'storage/ndb/src/ndbapi/NdbBlob.cpp'
--- a/storage/ndb/src/ndbapi/NdbBlob.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp	2011-10-21 08:59:23 +0000
@@ -696,7 +696,7 @@ NdbBlob::copyKeyFromRow(const NdbRecord
     unpacked+= unpacked_len;
   }
 
-  packedBuf.size= packed - packedBuf.data;
+  packedBuf.size= (Uint32)(packed - packedBuf.data);
   packedBuf.zerorest();
   assert(unpacked == unpackedBuf.data + unpackedBuf.size);
   DBUG_RETURN(0);

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-06-06 12:18:27 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-10-21 08:59:23 +0000
@@ -1260,7 +1260,7 @@ NdbTableImpl::buildColumnHash(){
       Uint32 bucket = hv & m_columnHashMask;
       bucket = (bucket < size ? bucket : bucket - size);
       m_columnHash[bucket] = (sz << 16) | (((size - bucket) + pos) << 1);
-      for(size_t j = 0; j<sz; j++, pos++){
+      for(unsigned j = 0; j<sz; j++, pos++){
 	Uint32 col = chains[i][j];	
 	Uint32 hv = hashValues[col];
 	if (m_columnHash.push_back((col << 16) | hv))

=== modified file 'storage/ndb/src/ndbapi/NdbImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-10-17 12:43:31 +0000
@@ -129,7 +129,7 @@ public:
 
   BaseString m_systemPrefix; // Buffer for preformatted for <sys>/<def>/
   
-  void* customDataPtr;
+  Uint64 customData;
 
   Uint64 clientStats[ Ndb::NumClientStatistics ];
   

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-10-22 09:38:48 +0000
@@ -1744,7 +1744,7 @@ NdbOperation::receiveTCKEYREF(const NdbA
   if (aSignal->getLength() == TcKeyRef::SignalLength)
   {
     // Signal may contain additional error data
-    theError.details = (char *) aSignal->readData(5);
+    theError.details = (char *)UintPtr(aSignal->readData(5));
   }
 
   theStatus = Finished;

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-29 11:35:02 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-20 12:45:36 +0000
@@ -1661,7 +1661,8 @@ NdbQueryIndexScanOperationDefImpl::NdbQu
                            int& error)
   : NdbQueryScanOperationDefImpl(table,options,ident,ix,id,error),
   m_interface(*this), 
-  m_index(index)
+  m_index(index),
+  m_paramInPruneKey(false)
 {
   memset(&m_bound, 0, sizeof m_bound);
   if (bound!=NULL) {
@@ -2316,7 +2317,7 @@ NdbQueryLookupOperationDefImpl::appendKe
 
 
 Uint32
-NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef) const
+NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef)
 {
   Uint32 appendedPattern = 0;
 
@@ -2408,6 +2409,7 @@ NdbQueryIndexScanOperationDefImpl::appen
           }
           case NdbQueryOperandImpl::Param:
             appendedPattern |= QN_ScanIndexNode::SI_PRUNE_PARAMS;
+            m_paramInPruneKey = true;
             serializedDef.append(QueryPattern::param(paramCnt++));
             break;
           default:

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-29 11:35:02 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-20 12:45:36 +0000
@@ -382,6 +382,15 @@ public:
   virtual const IndexBound* getBounds() const
   { return NULL; } 
 
+  /** 
+   * True if this is a prunable scan and there are NdbQueryParamOperands in the
+   * distribution key.
+   */
+  virtual bool hasParamInPruneKey() const
+  {
+    return false;
+  }
+
   // Return 'true' is query type is a multi-row scan
   virtual bool isScanOperation() const = 0;
 
@@ -523,7 +532,7 @@ protected:
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const
   { return 0; }
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef)
   { return 0; }
 
 }; // class NdbQueryScanOperationDefImpl
@@ -553,11 +562,16 @@ public:
   virtual const IndexBound* getBounds() const
   { return &m_bound; } 
 
+  bool hasParamInPruneKey() const
+  {
+    return m_paramInPruneKey;
+  }
+
 protected:
   // Append pattern for creating complete range bounds to serialized code 
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const;
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const;
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef);
 
 private:
 
@@ -583,6 +597,12 @@ private:
 
   /** True if there is a set of bounds.*/
   IndexBound m_bound;
+
+  /** 
+   * True if scan is prunable and there are NdbQueryParamOperands in the 
+   * distribution key.
+   */
+  bool m_paramInPruneKey;
 }; // class NdbQueryIndexScanOperationDefImpl
 
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-09-20 10:43:05 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-28 13:38:36 +0000
@@ -2464,6 +2464,7 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
            << ", finalBatchFrags=" << m_finalBatchFrags
            <<  endl;
   }
+  assert(rootFrag.isFragBatchComplete());
 
   /* May received fragment data after a SCANREF() (timeout?) 
    * terminated the scan.  We are about to close this query, 
@@ -2471,8 +2472,6 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
    */
   if (likely(m_errorReceived == 0))
   {
-    assert(rootFrag.isFragBatchComplete());
-
     assert(m_pendingFrags > 0);                // Check against underflow.
     assert(m_pendingFrags <= m_rootFragCount); // .... and overflow
     m_pendingFrags--;
@@ -2489,6 +2488,16 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
     rootFrag.setReceivedMore();
     return true;
   }
+  else if (!getQueryDef().isScanQuery())  // A failed lookup query
+  {
+    /**
+     * A lookup query will retrieve the rows as part of ::execute().
+     * -> Error must be visible through API before we return control
+     *    to the application.
+     */
+    setErrorCode(m_errorReceived);
+    return true;
+  }
 
   return false;
 } // NdbQueryImpl::handleBatchComplete
@@ -4562,6 +4571,10 @@ NdbQueryOperationImpl::prepareAttrInfo(U
     {
       requestInfo |= QN_ScanIndexParameters::SIP_PARALLEL;
     }
+    if (def.hasParamInPruneKey())
+    {
+      requestInfo |= QN_ScanIndexParameters::SIP_PRUNE_PARAMS;
+    }
     param->requestInfo = requestInfo;
     // Check that both values fit in param->batchSize.
     assert(getMaxBatchRows() < (1<<QN_ScanIndexParameters::BatchRowBits));
@@ -4966,12 +4979,12 @@ NdbQueryOperationImpl::execTCKEYREF(cons
   if (&getRoot() == this || 
       ref->errorCode != static_cast<Uint32>(Err_TupleNotFound))
   {
-    getQuery().setErrorCode(ref->errorCode);
     if (aSignal->getLength() == TcKeyRef::SignalLength)
     {
       // Signal may contain additional error data
-      getQuery().m_error.details = (char *)ref->errorData;
+      getQuery().m_error.details = (char *)UintPtr(ref->errorData);
     }
+    getQuery().setFetchTerminated(ref->errorCode,false);
   }
 
   NdbRootFragment& rootFrag = getQuery().m_rootFrags[0];

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-09-01 15:12:11 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-10-22 09:38:48 +0000
@@ -2090,7 +2090,7 @@ transactions.
     if (aSignal->getLength() == TcRollbackRep::SignalLength)
     {
       // Signal may contain additional error data
-      theError.details = (char *) aSignal->readData(5);
+      theError.details = (char *)UintPtr(aSignal->readData(5));
     }
 
     /**********************************************************************/

=== modified file 'storage/ndb/src/ndbapi/Ndbinit.cpp'
--- a/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-10-17 12:43:31 +0000
@@ -207,7 +207,7 @@ NdbImpl::NdbImpl(Ndb_cluster_connection
     wakeHandler(0),
     wakeContext(~Uint32(0)),
     m_ev_op(0),
-    customDataPtr(0)
+    customData(0)
 {
   int i;
   for (i = 0; i < MAX_NDB_NODES; i++) {

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.cpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-10-14 13:24:26 +0000
@@ -1191,9 +1191,11 @@ TransporterFacade::sendFragmentedSignal(
       /* This section fits whole, move onto next */
       this_chunk_sz+= remaining_sec_sz;
       i++;
+      continue;
     }
     else
     {
+      assert(this_chunk_sz <= CHUNK_SZ);
       /* This section doesn't fit, truncate it */
       unsigned send_sz= CHUNK_SZ - this_chunk_sz;
       if (i != start_i)
@@ -1205,19 +1207,34 @@ TransporterFacade::sendFragmentedSignal(
          * The final piece does not need to be a multiple of
          * NDB_SECTION_SEGMENT_SZ
          * 
-         * Note that this can push this_chunk_sz above CHUNK_SZ
-         * Should probably round-down, but need to be careful of
-         * 'can't fit any' cases.  Instead, CHUNK_SZ is defined
-         * with some slack below MAX_SENT_MESSAGE_BYTESIZE
+         * We round down the available send space to the nearest whole 
+         * number of segments.
+         * If there's not enough space for one segment, then we round up
+         * to one segment.  This can make us send more than CHUNK_SZ, which
+         * is ok as it's defined as less than the maximum message length.
          */
-	send_sz=
-	  NDB_SECTION_SEGMENT_SZ
-	  *((send_sz+NDB_SECTION_SEGMENT_SZ-1)
-            /NDB_SECTION_SEGMENT_SZ);
-        if (send_sz > remaining_sec_sz)
-	  send_sz= remaining_sec_sz;
+        send_sz = (send_sz / NDB_SECTION_SEGMENT_SZ) * 
+          NDB_SECTION_SEGMENT_SZ;                        /* Round down */
+        send_sz = MAX(send_sz, NDB_SECTION_SEGMENT_SZ);  /* At least one */
+        send_sz = MIN(send_sz, remaining_sec_sz);        /* Only actual data */
+        
+        /* If we've squeezed the last bit of data in, jump out of 
+         * here to send the last fragment.
+         * Otherwise, send what we've collected so far.
+         */
+        if ((send_sz == remaining_sec_sz) &&      /* All sent */
+            (i == secs - 1))                      /* No more sections */
+        {
+          this_chunk_sz+=  remaining_sec_sz;
+          i++;
+          continue;
+        }
       }
 
+      /* At this point, there must be data to send in a further signal */
+      assert((send_sz < remaining_sec_sz) ||
+             (i < secs - 1));
+
       /* Modify tmp generic section ptr to describe truncated
        * section
        */
@@ -1256,9 +1273,6 @@ TransporterFacade::sendFragmentedSignal(
                  tmp_signal.readSignalNumber() == GSN_API_REGREQ);
         }
       }
-      // setup variables for next signal
-      start_i= i;
-      this_chunk_sz= 0;
       assert(remaining_sec_sz >= send_sz);
       Uint32 remaining= remaining_sec_sz - send_sz;
       tmp_ptr[i].sz= remaining;
@@ -1271,6 +1285,10 @@ TransporterFacade::sendFragmentedSignal(
       if (remaining == 0)
         /* This section's done, move onto the next */
 	i++;
+      
+      // setup variables for next signal
+      start_i= i;
+      this_chunk_sz= 0;
     }
   }
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-10-07 13:15:08 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-10-20 13:01:37 +0000
@@ -979,17 +979,17 @@ int ndb_error_string(int err_no, char *s
   int len;
 
   assert(size > 1);
-  if(size <= 1) 
+  if(size <= 1)
     return 0;
+
   error.code = err_no;
   ndberror_update(&error);
 
-  len =
-    my_snprintf(str, size-1, "%s: %s: %s", error.message,
+  len = (int)my_snprintf(str, size-1, "%s: %s: %s", error.message,
 		ndberror_status_message(error.status),
 		ndberror_classification_message(error.classification));
   str[size-1]= '\0';
-  
+
   if (error.classification != UE)
     return len;
   return -len;

=== modified file 'storage/ndb/test/include/HugoCalculator.hpp'
--- a/storage/ndb/test/include/HugoCalculator.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/include/HugoCalculator.hpp	2011-10-12 10:19:08 +0000
@@ -41,6 +41,7 @@ public:
                      const char* valPtr, Uint32 valLen);
   int getIdValue(NDBT_ResultRow* const pRow) const;
   int getUpdatesValue(NDBT_ResultRow* const pRow) const;
+  int getIdColNo() const { return m_idCol;}
   int isIdCol(int colId) { return m_idCol == colId; };
   int isUpdateCol(int colId){ return m_updatesCol == colId; };
 

=== modified file 'storage/ndb/test/include/HugoOperations.hpp'
--- a/storage/ndb/test/include/HugoOperations.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/include/HugoOperations.hpp	2011-10-12 10:19:08 +0000
@@ -117,7 +117,8 @@ public:
   bool getPartIdForRow(const NdbOperation* pOp, int rowid, Uint32& partId);
   
   int setValues(NdbOperation*, int rowId, int updateId);
-  
+  int setNonPkValues(NdbOperation*, int rowId, int updateId);
+
   int verifyUpdatesValue(int updatesValue, int _numRows = 0);
 
   int indexReadRecords(Ndb*, const char * idxName, int recordNo,

=== modified file 'storage/ndb/test/include/NDBT_Table.hpp'
--- a/storage/ndb/test/include/NDBT_Table.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/include/NDBT_Table.hpp	2011-10-22 09:47:36 +0000
@@ -93,6 +93,7 @@ public:
     // validate() might cause initialization order problem with charset
     NdbError error;
     int ret = aggregate(error);
+    (void)ret;
     assert(ret == 0);
   }
   

=== modified file 'storage/ndb/test/include/NdbMgmd.hpp'
--- a/storage/ndb/test/include/NdbMgmd.hpp	2011-09-13 08:58:27 +0000
+++ b/storage/ndb/test/include/NdbMgmd.hpp	2011-10-24 07:44:52 +0000
@@ -227,7 +227,7 @@ public:
 
     SocketOutputStream out(socket());
 
-    if (out.println(cmd)){
+    if (out.println("%s", cmd)){
       error("call: println failed at line %d", __LINE__);
       return false;
     }
@@ -278,9 +278,16 @@ public:
     }
 
     // Send any bulk data
-    if (bulk && out.println(bulk)){
-      error("call: print('<bulk>') failed at line %d", __LINE__);
-      return false;
+    if (bulk)
+    {
+      if (out.write(bulk, strlen(bulk)) >= 0)
+      {
+        if (out.write("\n", 1) < 0)
+        {
+          error("call: print('<bulk>') failed at line %d", __LINE__);
+          return false;
+        }
+      }
     }
 
     BaseString buf;

=== modified file 'storage/ndb/test/ndbapi/Makefile.am'
--- a/storage/ndb/test/ndbapi/Makefile.am	2011-09-13 09:10:52 +0000
+++ b/storage/ndb/test/ndbapi/Makefile.am	2011-10-14 13:24:26 +0000
@@ -112,6 +112,8 @@ testMgmd_CXXFLAGS = -I$(top_srcdir)/stor
 testSingleUserMode_SOURCES = testSingleUserMode.cpp
 testNativeDefault_SOURCES = testNativeDefault.cpp
 testNdbApi_SOURCES = testNdbApi.cpp
+testNdbApi_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/ndbapi \
+	-I$(top_srcdir)/storage/ndb/include/transporter
 testNodeRestart_SOURCES = testNodeRestart.cpp
 testUpgrade_SOURCES = testUpgrade.cpp
 testUpgrade_LDADD = $(LDADD) $(top_srcdir)/libmysql_r/libmysqlclient_r.la

=== modified file 'storage/ndb/test/ndbapi/ScanFunctions.hpp'
--- a/storage/ndb/test/ndbapi/ScanFunctions.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/ndbapi/ScanFunctions.hpp	2011-10-21 08:59:23 +0000
@@ -29,7 +29,7 @@ class AttribList {
 public:
   AttribList(){};
   ~AttribList(){
-    for(size_t i = 0; i < attriblist.size(); i++){      
+    for(unsigned i = 0; i < attriblist.size(); i++){      
       delete attriblist[i];
     }
   };
@@ -335,7 +335,7 @@ void AttribList::buildAttribList(const N
   attriblist.push_back(attr);  
 
 #if 1
-  for(size_t j = 0; j < attriblist.size(); j++){
+  for(unsigned j = 0; j < attriblist.size(); j++){
 
     g_info << attriblist[j]->numAttribs << ": " ;
     for(int a = 0; a < attriblist[j]->numAttribs; a++)

=== modified file 'storage/ndb/test/ndbapi/testDict.cpp'
--- a/storage/ndb/test/ndbapi/testDict.cpp	2011-04-23 08:21:36 +0000
+++ b/storage/ndb/test/ndbapi/testDict.cpp	2011-10-20 12:21:10 +0000
@@ -8167,6 +8167,8 @@ runBug58277loadtable(NDBT_Context* ctx,
     int cnt = 0;
     for (int i = 0; i < rows; i++)
     {
+      int retries = 10;
+  retry:
       NdbTransaction* pTx = 0;
       CHK2((pTx = pNdb->startTransaction()) != 0, pNdb->getNdbError());
 
@@ -8183,7 +8185,19 @@ runBug58277loadtable(NDBT_Context* ctx,
         int x[] = {
          -630
         };
-        CHK3(pTx->execute(Commit) == 0, pTx->getNdbError(), x);
+        int res = pTx->execute(Commit);
+        if (res != 0 &&
+            pTx->getNdbError().status == NdbError::TemporaryError)
+        {
+          retries--;
+          if (retries >= 0)
+          {
+            pTx->close();
+            NdbSleep_MilliSleep(10);
+            goto retry;
+          }
+        }
+        CHK3(res == 0, pTx->getNdbError(), x);
         cnt++;
       }
       while (0);

=== modified file 'storage/ndb/test/ndbapi/testMgm.cpp'
--- a/storage/ndb/test/ndbapi/testMgm.cpp	2011-09-19 14:10:19 +0000
+++ b/storage/ndb/test/ndbapi/testMgm.cpp	2011-10-24 07:44:52 +0000
@@ -736,7 +736,7 @@ get_nodeid_of_type(NdbMgmd& mgmd, ndb_mg
   int noOfNodes = cs->no_of_nodes;
   int randomnode = myRandom48(noOfNodes);
   ndb_mgm_node_state *ns = cs->node_states + randomnode;
-  assert(ns->node_type == (Uint32)type);
+  assert((Uint32)ns->node_type == (Uint32)type);
   assert(ns->node_id);
 
   *nodeId = ns->node_id;

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-09-29 06:48:39 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-10-14 13:24:26 +0000
@@ -25,6 +25,8 @@
 #include <random.h>
 #include <NdbTick.h>
 #include <my_sys.h>
+#include <SignalSender.hpp>
+#include <GlobalSignalNumbers.h>
 
 #define MAX_NDB_OBJECTS 32678
 
@@ -4972,6 +4974,635 @@ int runNdbClusterConnectionConnect(NDBT_
   return NDBT_OK;
 }
 
+/* Testing fragmented signal send/receive */
+
+/*
+  SectionStore
+
+  Abstraction of long section storage api.
+  Used by FragmentAssembler to assemble received long sections
+*/
+class SectionStore
+{
+public:
+  virtual ~SectionStore() {};
+  virtual int appendToSection(Uint32 secId, LinearSectionPtr ptr) = 0;
+};
+
+/*
+  Basic Section Store
+
+  Naive implementation using malloc.  Real usage might use something better.
+*/
+class BasicSectionStore : public SectionStore
+{
+public:
+  BasicSectionStore()
+  {
+    init();
+  };
+
+  ~BasicSectionStore()
+  {
+    freeStorage();
+  };
+
+  void init()
+  {
+    ptrs[0].p = NULL;
+    ptrs[0].sz = 0;
+
+    ptrs[2] = ptrs[1] = ptrs[0];
+  }
+
+  void freeStorage()
+  {
+    free(ptrs[0].p);
+    free(ptrs[1].p);
+    free(ptrs[2].p);
+  }
+
+  virtual int appendToSection(Uint32 secId, LinearSectionPtr ptr)
+  {
+    /* Potentially expensive re-alloc + copy */
+    assert(secId < 3);
+    
+    Uint32 existingSz = ptrs[secId].sz;
+    Uint32* existingBuff = ptrs[secId].p;
+
+    Uint32 newSize = existingSz + ptr.sz;
+    Uint32* newBuff = (Uint32*) realloc(existingBuff, newSize * 4);
+
+    if (!newBuff)
+      return -1;
+    
+    memcpy(newBuff + existingSz, ptr.p, ptr.sz * 4);
+    
+    ptrs[secId].p = newBuff;
+    ptrs[secId].sz = existingSz + ptr.sz;
+
+    return 0;
+  }
+    
+  LinearSectionPtr ptrs[3];
+};
+
+
+
+/*
+  FragmentAssembler
+
+  Used to assemble sections from multiple fragment signals, and 
+  produce a 'normal' signal.
+  
+  Requires a SectionStore implementation to accumulate the section
+  fragments
+
+  Might be useful generic utility, or not.
+
+  Usage : 
+    FragmentAssembler fa(ss);
+    while (!fa.isComplete())
+    {
+      sig = waitSignal();
+      ss.handleSignal(sig, sections);
+    }
+
+    fa.getSignalHeader();
+    fa.getSignalBody();
+    fa.getSectionStore(); ..
+
+*/
+class FragmentAssembler
+{
+public:
+  enum AssemblyError
+  {
+    NoError = 0,
+    FragmentSequence = 1,
+    FragmentSource = 2,
+    FragmentIdentity = 3,
+    SectionAppend = 4
+  };
+
+  FragmentAssembler(SectionStore* _secStore):
+    secsReceived(0),
+    secStore(_secStore),
+    complete(false),
+    fragId(0),
+    sourceNode(0),
+    error(NoError)
+  {}
+
+  int handleSignal(const SignalHeader* sigHead,
+                   const Uint32* sigBody,
+                   LinearSectionPtr* sections)
+  {
+    Uint32 sigLen = sigHead->theLength;
+    
+    if (fragId == 0)
+    {
+      switch (sigHead->m_fragmentInfo)
+      {
+      case 0:
+      {
+        /* Not fragmented, pass through */
+        sh = *sigHead;
+        memcpy(signalBody, sigBody, sigLen * 4);
+        Uint32 numSecs = sigHead->m_noOfSections;
+        for (Uint32 i=0; i<numSecs; i++)
+        {
+          if (secStore->appendToSection(i, sections[i]) != 0)
+          {
+            error = SectionAppend;
+            return -1;
+          }
+        }
+        complete = true;
+        break;
+      }
+      case 1:
+      {
+        /* Start of fragmented signal */
+        Uint32 incomingFragId;
+        Uint32 incomingSourceNode;
+        Uint32 numSecsInFragment;
+        
+        if (handleFragmentSections(sigHead, sigBody, sections,
+                                   &incomingFragId, &incomingSourceNode,
+                                   &numSecsInFragment) != 0)
+          return -1;
+        
+        assert(incomingFragId != 0);
+        fragId = incomingFragId;
+        sourceNode = incomingSourceNode;
+        assert(numSecsInFragment > 0);
+        
+        break;
+      }
+      default:
+      {
+        /* Error, out of sequence fragment */
+        error = FragmentSequence;
+        return -1;
+        break;
+      }
+      }
+    }
+    else
+    {
+      /* FragId != 0 */
+      switch (sigHead->m_fragmentInfo)
+      {
+      case 0:
+      case 1:
+      {
+        /* Error, out of sequence fragment */
+        error = FragmentSequence;
+        return -1;
+      }
+      case 2:
+        /* Fall through */
+      case 3:
+      {
+        /* Body fragment */
+        Uint32 incomingFragId;
+        Uint32 incomingSourceNode;
+        Uint32 numSecsInFragment;
+        
+        if (handleFragmentSections(sigHead, sigBody, sections,
+                                   &incomingFragId, &incomingSourceNode,
+                                   &numSecsInFragment) != 0)
+          return -1;
+
+        if (incomingSourceNode != sourceNode)
+        {
+          /* Error in source node */
+          error = FragmentSource;
+          return -1;
+        }
+        if (incomingFragId != fragId)
+        {
+          error = FragmentIdentity;
+          return -1;
+        }
+        
+        if (sigHead->m_fragmentInfo == 3)
+        {
+          /* Final fragment, contains actual signal body */
+          memcpy(signalBody,
+                 sigBody,
+                 sigLen * 4);
+          sh = *sigHead;
+          sh.theLength = sigLen - (numSecsInFragment + 1);
+          sh.m_noOfSections = 
+            ((secsReceived & 4)? 1 : 0) +
+            ((secsReceived & 2)? 1 : 0) +
+            ((secsReceived & 1)? 1 : 0);
+          sh.m_fragmentInfo = 0;
+          
+          complete=true;
+        }
+        break;
+      }
+      default:
+      {
+        /* Bad fragmentinfo field */
+        error = FragmentSequence;
+        return -1;
+      }
+      }
+    }
+
+    return 0;
+  }
+
+  int handleSignal(NdbApiSignal* signal,
+                   LinearSectionPtr* sections)
+  {
+    return handleSignal(signal, signal->getDataPtr(), sections);
+  }
+
+  bool isComplete()
+  {
+    return complete;
+  }
+
+  /* Valid if isComplete() */
+  SignalHeader getSignalHeader()
+  {
+    return sh;
+  }
+  
+  /* Valid if isComplete() */
+  Uint32* getSignalBody()
+  {
+    return signalBody;
+  }
+
+  /* Valid if isComplete() */
+  Uint32 getSourceNode()
+  {
+    return sourceNode;
+  }
+
+  SectionStore* getSectionStore()
+  {
+    return secStore;
+  }
+
+  AssemblyError getError() const
+  {
+    return error;
+  }
+  
+private:
+  int handleFragmentSections(const SignalHeader* sigHead,
+                             const Uint32* sigBody,
+                             LinearSectionPtr* sections,
+                             Uint32* incomingFragId,
+                             Uint32* incomingSourceNode,
+                             Uint32* numSecsInFragment)
+  {
+    Uint32 sigLen = sigHead->theLength;
+    
+    *numSecsInFragment = sigHead->m_noOfSections;
+    assert(sigLen >= (1 + *numSecsInFragment));
+           
+    *incomingFragId = sigBody[sigLen - 1];
+    *incomingSourceNode = refToNode(sigHead->theSendersBlockRef);
+    const Uint32* secIds = &sigBody[sigLen - (*numSecsInFragment) - 1];
+    
+    for (Uint32 i=0; i < *numSecsInFragment; i++)
+    {
+      secsReceived |= (1 < secIds[i]);
+      
+      if (secStore->appendToSection(secIds[i], sections[i]) != 0)
+      {
+        error = SectionAppend;
+        return -1;
+      }
+    }
+    
+    return 0;
+  }
+
+  Uint32 secsReceived;
+  SectionStore* secStore;
+  bool complete;
+  Uint32 fragId;
+  Uint32 sourceNode;
+  SignalHeader sh;
+  Uint32 signalBody[NdbApiSignal::MaxSignalWords];
+  AssemblyError error;
+};                 
+
+static const Uint32 MAX_SEND_BYTES=32768; /* Align with TransporterDefinitions.hpp */
+static const Uint32 MAX_SEND_WORDS=MAX_SEND_BYTES/4;
+static const Uint32 SEGMENT_WORDS= 60; /* Align with SSPool etc */
+static const Uint32 SEGMENT_BYTES = SEGMENT_WORDS * 4;
+//static const Uint32 MAX_SEGS_PER_SEND=64; /* 6.3 */
+static const Uint32 MAX_SEGS_PER_SEND = (MAX_SEND_BYTES / SEGMENT_BYTES) - 2; /* Align with TransporterFacade.cpp */
+static const Uint32 MAX_WORDS_PER_SEND = MAX_SEGS_PER_SEND * SEGMENT_WORDS;
+static const Uint32 HALF_MAX_WORDS_PER_SEND = MAX_WORDS_PER_SEND / 2;
+static const Uint32 THIRD_MAX_WORDS_PER_SEND = MAX_WORDS_PER_SEND / 3;
+static const Uint32 MEDIUM_SIZE = 5000;
+
+/* Most problems occurred with sections lengths around the boundary
+ * of the max amount sent - MAX_WORDS_PER_SEND, so we define interesting
+ * sizes so that we test behavior around these boundaries
+ */
+static Uint32 interestingSizes[] = 
+{
+  0,
+  1, 
+  MEDIUM_SIZE,
+  THIRD_MAX_WORDS_PER_SEND -1,
+  THIRD_MAX_WORDS_PER_SEND,
+  THIRD_MAX_WORDS_PER_SEND +1,
+  HALF_MAX_WORDS_PER_SEND -1,
+  HALF_MAX_WORDS_PER_SEND,
+  HALF_MAX_WORDS_PER_SEND + 1,
+  MAX_WORDS_PER_SEND -1, 
+  MAX_WORDS_PER_SEND, 
+  MAX_WORDS_PER_SEND + 1,
+  (2* MAX_SEND_WORDS) + 1,
+  1234 /* Random */
+};
+
+
+/* 
+   FragSignalChecker
+
+   Class for testing fragmented signal send + receive
+*/
+class FragSignalChecker
+{
+public:
+
+  Uint32* buffer;
+
+  FragSignalChecker()
+  {
+    buffer= NULL;
+    init();
+  }
+
+  ~FragSignalChecker()
+  {
+    free(buffer);
+  }
+
+  void init()
+  {
+    buffer = (Uint32*) malloc(getBufferSize());
+
+    if (buffer)
+    {
+      /* Init to a known pattern */
+      for (Uint32 i = 0; i < (getBufferSize()/4); i++)
+      {
+        buffer[i] = i;
+      }
+    }
+  }
+
+  static Uint32 getNumInterestingSizes()
+  {
+    return sizeof(interestingSizes) / sizeof(Uint32);
+  }
+
+  static Uint32 getNumIterationsRequired()
+  {
+    /* To get combinatorial coverage, need each of 3
+     * sections with each of the interesting sizes
+     */
+    Uint32 numSizes = getNumInterestingSizes();
+    return numSizes * numSizes * numSizes;
+  }
+
+  static Uint32 getSecSz(Uint32 secNum, Uint32 iter)
+  {
+    assert(secNum < 3);
+    Uint32 numSizes = getNumInterestingSizes();
+    Uint32 divisor = (secNum == 0 ? 1 : 
+                      secNum == 1 ? numSizes :
+                      numSizes * numSizes);
+    /* offset ensures only end sections are 0 length */
+    Uint32 index = (iter / divisor) % numSizes;
+    if ((index == 0) && (iter >= (divisor * numSizes)))
+      index = 1; /* Avoid lower numbered section being empty */
+    Uint32 value = interestingSizes[index];
+    if(value == 1234)
+    {
+      value = 1 + (rand() % (2* MAX_WORDS_PER_SEND));
+    }
+    return value;
+  }
+
+  static Uint32 getBufferSize()
+  {
+    const Uint32 MaxSectionWords = (2 * MAX_SEND_WORDS) + 1;
+    const Uint32 MaxTotalSectionsWords = MaxSectionWords * 3;
+    return MaxTotalSectionsWords * 4;
+  }
+
+  int sendRequest(SignalSender* ss, 
+                  Uint32* sizes)
+  {
+    /* 
+     * We want to try out various interactions between the
+     * 3 sections and the length of the data sent
+     * - All fit in one 'chunk'
+     * - None fit in one 'chunk'
+     * - Each ends on a chunk boundary
+     *
+     * Max send size is ~ 32kB
+     * Segment size is 60 words / 240 bytes
+     *  -> 136 segments / chunk
+     *  -> 134 segments / chunk 'normally' sent
+     *  -> 32160 bytes
+     */
+    g_err << "Sending "
+          << sizes[0]
+          << " " << sizes[1]
+          << " " << sizes[2]
+          << endl;
+    
+    const Uint32 numSections = 
+      (sizes[0] ? 1 : 0) + 
+      (sizes[1] ? 1 : 0) + 
+      (sizes[2] ? 1 : 0);
+    const Uint32 testType = 40;
+    const Uint32 fragmentLength = 1;
+    const Uint32 print = 1;
+    const Uint32 len = 5 + numSections;
+    SimpleSignal request(false);
+    
+    Uint32* signalBody = request.getDataPtrSend();
+    signalBody[0] = ss->getOwnRef();
+    signalBody[1] = testType;
+    signalBody[2] = fragmentLength;
+    signalBody[3] = print;
+    signalBody[4] = 0; /* Return count */
+    signalBody[5] = sizes[0];
+    signalBody[6] = sizes[1];
+    signalBody[7] = sizes[2];
+    
+    
+    request.ptr[0].sz = sizes[0];
+    request.ptr[0].p = &buffer[0];
+    request.ptr[1].sz = sizes[1];
+    request.ptr[1].p = &buffer[sizes[0]];
+    request.ptr[2].sz = sizes[2];
+    request.ptr[2].p = &buffer[sizes[0] + sizes[1]];
+    
+    request.header.m_noOfSections= numSections;
+    
+    int rc = 0;
+    ss->lock();
+    rc = ss->sendFragmentedSignal(ss->get_an_alive_node(),
+                                  request,
+                                  CMVMI,
+                                  GSN_TESTSIG,
+                                  len);
+    ss->unlock();
+    
+    if (rc != 0)
+    {
+      g_err << "Error sending signal" << endl;
+      return rc;
+    }
+    
+    return 0;
+  }
+
+  int waitResponse(SignalSender* ss,
+                   Uint32* expectedSz)
+  {
+    /* Here we need to wait for all of the signals which
+     * comprise a fragmented send, and check that
+     * the data is as expected
+     */
+    BasicSectionStore bss;
+    FragmentAssembler fa(&bss);
+    
+    while(true)
+    {
+      ss->lock();
+      SimpleSignal* response = ss->waitFor(10000);
+      ss->unlock();
+      
+      if (!response)
+      {
+        g_err << "Timed out waiting for response" << endl;
+        return -1;
+      }
+      
+      //response->print();
+      
+      if (response->header.theVerId_signalNumber == GSN_TESTSIG)
+      {
+        if (fa.handleSignal(&response->header,
+                            response->getDataPtr(),
+                            response->ptr) != 0)
+        {
+          g_err << "Error assembling fragmented signal."
+                << "  Error is "
+                << (Uint32) fa.getError()
+                << endl;
+          return -1;
+        }
+        
+        if (fa.isComplete())
+        {
+          Uint32 expectedWord = 0;
+          for (Uint32 i=0; i < 3; i++)
+          {
+            if (bss.ptrs[i].sz != expectedSz[i])
+            {
+              g_err << "Wrong size for section : "
+                    << i
+                    << " expected " << expectedSz[i]
+                    << " but received " << bss.ptrs[i].sz
+                    << endl;
+              return -1;
+            }
+            
+            for (Uint32 d=0; d < expectedSz[i]; d++)
+            {
+              if (bss.ptrs[i].p[d] != expectedWord)
+              {
+                g_err << "Bad data in section "
+                      << i
+                      << " at word number "
+                      << d
+                      << ".  Expected "
+                      << expectedWord
+                      << " but found "
+                      << bss.ptrs[i].p[d]
+                      << endl;
+                return -1;
+              }
+              expectedWord++;
+            }
+          }
+          
+          break;
+        }
+        
+      }
+    }
+    
+    return 0;
+  }
+  
+  int runTest(SignalSender* ss)
+  {
+    for (Uint32 iter=0; 
+         iter < getNumIterationsRequired(); 
+         iter++)
+    {
+      int rc;
+      Uint32 sizes[3];
+      sizes[0] = getSecSz(0, iter);
+      sizes[1] = getSecSz(1, iter);
+      sizes[2] = getSecSz(2, iter);
+      
+      /* Build request, including sections */
+      rc = sendRequest(ss, sizes);
+      if (rc != 0)
+      {
+        g_err << "Failed sending request on iteration " << iter 
+              << " with rc " << rc << endl;
+        return NDBT_FAILED;
+      }
+      
+      /* Wait for response */
+      rc = waitResponse(ss, sizes);
+      if (rc != 0)
+      {
+        g_err << "Failed waiting for response on iteration " << iter
+              << " with rc " << rc << endl;
+        return NDBT_FAILED;
+      }
+    }
+    
+    return NDBT_OK;
+  }
+};
+
+
+int testFragmentedSend(NDBT_Context* ctx, NDBT_Step* step){
+  Ndb* pNdb= GETNDB(step);
+  Ndb_cluster_connection* conn = &pNdb->get_ndb_cluster_connection();
+  SignalSender ss(conn);
+  FragSignalChecker fsc;
+  
+  return fsc.runTest(&ss);
+}
+
+
 
 NDBT_TESTSUITE(testNdbApi);
 TESTCASE("MaxNdb", 
@@ -5245,6 +5876,10 @@ TESTCASE("NdbClusterConnectSR",
   STEPS(runNdbClusterConnect, MAX_NODES);
   STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
 }
+TESTCASE("TestFragmentedSend",
+         "Test fragmented send behaviour"){
+  INITIALIZER(testFragmentedSend);
+}
 NDBT_TESTSUITE_END(testNdbApi);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-28 14:17:25 +0000
@@ -4726,17 +4726,23 @@ int runSplitLatency25PctFail(NDBT_Contex
   /**
    * Now wait for half of cluster to die...
    */
-  ndbout_c("Waiting for half of cluster to die");
-  int not_started = 0;
   const int node_count = restarter.getNumDbNodes();
+  ndbout_c("Waiting for half of cluster (%u/%u) to die", node_count/2, node_count);
+  int not_started = 0;
   do
   {
     not_started = 0;
     for (int i = 0; i < node_count; i++)
     {
-      if (restarter.getNodeStatus(restarter.getDbNodeId(i)) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+      int nodeId = restarter.getDbNodeId(i);
+      int status = restarter.getNodeStatus(nodeId);
+      ndbout_c("Node %u status %u", nodeId, status);
+      if (status == NDB_MGM_NODE_STATUS_NOT_STARTED)
         not_started++;
     }
+    NdbSleep_MilliSleep(2000);
+    ndbout_c("%u / %u in state NDB_MGM_NODE_STATUS_NOT_STARTED(%u)",
+             not_started, node_count, NDB_MGM_NODE_STATUS_NOT_STARTED);
   } while (2 * not_started != node_count);
 
   ndbout_c("Restarting cluster");
@@ -4751,6 +4757,125 @@ int runSplitLatency25PctFail(NDBT_Contex
   return NDBT_OK;
 }
 
+int
+runMasterFailSlowLCP(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Motivated by bug# 13323589 */
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 4)
+  {
+    return NDBT_OK;
+  }
+
+  int master = res.getMasterNodeId();
+  int otherVictim = res.getRandomNodeOtherNodeGroup(master, rand());
+  int nextMaster = res.getNextMasterNodeId(master);
+  nextMaster = (nextMaster == otherVictim) ? res.getNextMasterNodeId(otherVictim) :
+    nextMaster;
+  assert(nextMaster != master);
+  assert(nextMaster != otherVictim);
+
+  /* Get a node which is not current or next master */
+  int slowNode= nextMaster;
+  while ((slowNode == nextMaster) ||
+         (slowNode == otherVictim) ||
+         (slowNode == master))
+  {
+    slowNode = res.getRandomNotMasterNodeId(rand());
+  }
+
+  ndbout_c("master: %d otherVictim : %d nextMaster: %d slowNode: %d",
+           master,
+           otherVictim,
+           nextMaster,
+           slowNode);
+
+  /* Steps :
+   * 1. Insert slow LCP frag error in slowNode
+   * 2. Start LCP
+   * 3. Wait for LCP to start
+   * 4. Kill at least two nodes including Master
+   * 5. Wait for killed nodes to attempt to rejoin
+   * 6. Remove slow LCP error
+   * 7. Allow system to stabilise + check no errors
+   */
+  // 5073 = Delay on handling BACKUP_FRAGMENT_CONF in LQH
+  if (res.insertErrorInNode(slowNode, 5073))
+  {
+    return NDBT_FAILED;
+  }
+
+  {
+    int req[1] = {DumpStateOrd::DihStartLcpImmediately};
+    if (res.dumpStateOneNode(master, req, 1))
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("Giving LCP time to start...");
+
+  NdbSleep_SecSleep(10);
+
+  ndbout_c("Killing other victim node (%u)...", otherVictim);
+
+  if (res.restartOneDbNode(otherVictim, false, false, true))
+  {
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Killing Master node (%u)...", master);
+
+  if (res.restartOneDbNode(master, false, false, true))
+  {
+    return NDBT_FAILED;
+  }
+
+  /*
+     ndbout_c("Waiting for old Master node to enter NoStart state...");
+     if (res.waitNodesNoStart(&master, 1, 10))
+     return NDBT_FAILED;
+
+     ndbout_c("Starting old Master...");
+     if (res.startNodes(&master, 1))
+     return NDBT_FAILED;
+
+  */
+  ndbout_c("Waiting for some progress on old Master and other victim restart");
+  NdbSleep_SecSleep(15);
+
+  ndbout_c("Now removing error insert on slow node (%u)", slowNode);
+
+  if (res.insertErrorInNode(slowNode, 0))
+  {
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Now wait a while to check stability...");
+  NdbSleep_SecSleep(30);
+
+  if (res.getNodeStatus(master) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+  {
+    ndbout_c("Old Master needs kick to restart");
+    if (res.startNodes(&master, 1))
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("Wait for cluster recovery...");
+  if (res.waitClusterStarted())
+  {
+    return NDBT_FAILED;
+  }
+
+
+  ndbout_c("Done");
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testNodeRestart);
 TESTCASE("NoLoad", 
 	 "Test that one node at a time can be stopped and then restarted "\
@@ -5282,6 +5407,11 @@ TESTCASE("Bug57522", "")
 {
   INITIALIZER(runBug57522);
 }
+TESTCASE("MasterFailSlowLCP",
+         "DIH Master failure during a slow LCP can cause a crash.")
+{
+  INITIALIZER(runMasterFailSlowLCP);
+}
 TESTCASE("ForceStopAndRestart", "Test restart and stop -with force flag")
 {
   STEP(runForceStopAndRestart);

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2011-10-03 11:06:06 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2011-10-20 11:43:11 +0000
@@ -201,6 +201,7 @@ extern const char * g_ndbd_bin_path;
 extern const char * g_ndbmtd_bin_path;
 extern const char * g_mysqld_bin_path;
 extern const char * g_mysql_install_db_bin_path;
+extern const char * g_libmysqlclient_so_path;
 
 extern const char * g_search_path[];
 

=== added file 'storage/ndb/test/run-test/conf-daily-perf.cnf'
--- a/storage/ndb/test/run-test/conf-daily-perf.cnf	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/conf-daily-perf.cnf	2011-10-16 18:36:16 +0000
@@ -0,0 +1,64 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+fix-nodeid=1
+mt = 2
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+loose-skip-bdb
+skip-grant-tables
+socket=mysql.sock
+
+ndbcluster=1
+ndb-force-send=1
+ndb-use-exact-count=0
+ndb-extra-logging=1
+ndb-autoincrement-prefetch-sz=256
+engine-condition-pushdown=1
+ndb-cluster-connection-pool=4
+
+key_buffer = 256M
+max_allowed_packet = 16M
+sort_buffer_size = 512K
+read_buffer_size = 256K
+read_rnd_buffer_size = 512K
+myisam_sort_buffer_size = 8M
+max-connections=200
+thread-cache-size=128
+
+query_cache_type = 0
+query_cache_size = 0
+table_open_cache=1024
+table_definition_cache=256
+
+[client]
+protocol=tcp
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host5,CHOOSE_host6,CHOOSE_host7,CHOOSE_host8
+ndbapi= CHOOSE_host2,CHOOSE_host3,CHOOSE_host4
+mysqld = CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 250M
+DataMemory = 1500M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 5
+NoOfFragmentLogFiles = 8
+FragmentLogFileSize = 64M
+ODirect=1
+MaxNoOfExecutionThreads=8
+
+SharedGlobalMemory=256M
+DiskPageBufferMemory=256M
+FileSystemPath=/data0/autotest
+FileSystemPathDataFiles=/data1/autotest
+FileSystemPathUndoFiles=/data2/autotest
+InitialLogfileGroup=undo_buffer_size=64M;undofile01.dat:256M;undofile02.dat:256M
+InitialTablespace=datafile01.dat:256M;datafile02.dat:256M

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-05 13:18:31 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-28 14:17:25 +0000
@@ -1830,3 +1830,13 @@ max-time: 500
 cmd: testNdbApi
 args: -n NdbClusterConnectSR T1
 
+# Fragmented signal send
+max-time 1800
+cmd: testNdbApi
+args: -n TestFragmentedSend T1
+
+max-time: 300
+cmd: testNodeRestart
+args: -nMasterFailSlowLCP T1
+
+

=== added file 'storage/ndb/test/run-test/daily-perf-tests.txt'
--- a/storage/ndb/test/run-test/daily-perf-tests.txt	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/daily-perf-tests.txt	2011-10-16 18:36:16 +0000
@@ -0,0 +1,140 @@
+# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+max-time: 300
+cmd: DbCreate
+args:
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 25
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 100
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 200
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1 -proc 25
+type: bench
+
+# baseline
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord
+type: bench
+
+# minimal record
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 2
+type: bench
+
+# 4k record
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 25 -s 40
+type: bench
+
+# baseline DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord
+type: bench
+
+# minimal record DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 2
+type: bench
+
+# 4k record DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 25 -s 40
+type: bench
+
+# sql
+max-time: 600
+client: ndb-sql-perf-create-table.sh
+args: t1
+
+max-time: 600
+client: ndb-sql-perf-select.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=1
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-select.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=4
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-update.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=1
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-update.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=4
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-table.sh
+args: t1
+mysqld:
+
+# sql join
+max-time: 600
+client: ndb-sql-perf-load-tpcw.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-tpcw-getBestSeller.sh
+args:
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-tpcw.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-load-music-store.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-select-music-store.sh
+args:
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-music-store.sh
+args:
+

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2011-10-03 08:46:52 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2011-10-20 11:43:11 +0000
@@ -122,6 +122,24 @@ printfile(FILE* out, Properties& props,
   fflush(out);
 }
 
+static
+char *
+dirname(const char * path)
+{
+  char * s = strdup(path);
+  size_t len = strlen(s);
+  for (size_t i = 1; i<len; i++)
+  {
+    if (s[len - i] == '/')
+    {
+      s[len - i] = 0;
+      return s;
+    }
+  }
+  free(s);
+  return 0;
+}
+
 bool
 setup_files(atrt_config& config, int setup, int sshx)
 {
@@ -313,8 +331,23 @@ setup_files(atrt_config& config, int set
         }
         fprintf(fenv, "$PATH\n");
 	keys.push_back("PATH");
+
+        {
+          /**
+           * In 5.5...binaries aren't compiled with rpath
+           * So we need an explicit LD_LIBRARY_PATH
+           *
+           * Use path from libmysqlclient.so
+           */
+          char * dir = dirname(g_libmysqlclient_so_path);
+          fprintf(fenv, "LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n", dir);
+          free(dir);
+          keys.push_back("LD_LIBRARY_PATH");
+        }
+
 	for (size_t k = 0; k<keys.size(); k++)
 	  fprintf(fenv, "export %s\n", keys[k].c_str());
+
 	fflush(fenv);
 	fclose(fenv);
       }

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2011-10-03 14:59:24 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2011-10-20 11:43:11 +0000
@@ -86,6 +86,7 @@ const char * g_ndbd_bin_path = 0;
 const char * g_ndbmtd_bin_path = 0;
 const char * g_mysqld_bin_path = 0;
 const char * g_mysql_install_db_bin_path = 0;
+const char * g_libmysqlclient_so_path = 0;
 
 static struct
 {
@@ -93,11 +94,12 @@ static struct
   const char * exe;
   const char ** var;
 } g_binaries[] = {
-  { true,  "ndb_mgmd",         &g_ndb_mgmd_bin_path},
-  { true,  "ndbd",             &g_ndbd_bin_path },
-  { false, "ndbmtd",           &g_ndbmtd_bin_path },
-  { true,  "mysqld",           &g_mysqld_bin_path },
-  { true,  "mysql_install_db", &g_mysql_install_db_bin_path },
+  { true,  "ndb_mgmd",          &g_ndb_mgmd_bin_path},
+  { true,  "ndbd",              &g_ndbd_bin_path },
+  { false, "ndbmtd",            &g_ndbmtd_bin_path },
+  { true,  "mysqld",            &g_mysqld_bin_path },
+  { true,  "mysql_install_db",  &g_mysql_install_db_bin_path },
+  { true,  "libmysqlclient.so", &g_libmysqlclient_so_path },
   { true, 0, 0 }
 };
 
@@ -108,6 +110,8 @@ g_search_path[] =
   "libexec",
   "sbin",
   "scripts",
+  "lib",
+  "lib/mysql",
   0
 };
 static bool find_binaries();

=== modified file 'storage/ndb/test/src/DbUtil.cpp'
--- a/storage/ndb/test/src/DbUtil.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/DbUtil.cpp	2011-10-21 08:59:23 +0000
@@ -215,7 +215,7 @@ DbUtil::mysqlSimplePrepare(const char *q
     printf("Inside DbUtil::mysqlSimplePrepare\n");
   #endif
   MYSQL_STMT *my_stmt= mysql_stmt_init(this->getMysql());
-  if (my_stmt && mysql_stmt_prepare(my_stmt, query, strlen(query))){
+  if (my_stmt && mysql_stmt_prepare(my_stmt, query, (unsigned long)strlen(query))){
     this->printStError(my_stmt,"Prepare Statement Failed");
     mysql_stmt_close(my_stmt);
     return NULL;
@@ -353,7 +353,7 @@ DbUtil::runQuery(const char* sql,
 
 
   MYSQL_STMT *stmt= mysql_stmt_init(m_mysql);
-  if (mysql_stmt_prepare(stmt, sql, strlen(sql)))
+  if (mysql_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)))
   {
     report_error("Failed to prepare: ", m_mysql);
     return false;
@@ -390,7 +390,7 @@ DbUtil::runQuery(const char* sql,
       args.get(name.c_str(), &val_s);
       bind_param[i].buffer_type= MYSQL_TYPE_STRING;
       bind_param[i].buffer= (char*)val_s;
-      bind_param[i].buffer_length= strlen(val_s);
+      bind_param[i].buffer_length= (unsigned long)strlen(val_s);
       g_debug << " param" << name.c_str() << ": " << val_s << endl;
       break;
     default:

=== modified file 'storage/ndb/test/src/HugoOperations.cpp'
--- a/storage/ndb/test/src/HugoOperations.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/HugoOperations.cpp	2011-10-12 10:19:08 +0000
@@ -411,20 +411,30 @@ int
 HugoOperations::setValues(NdbOperation* pOp, int rowId, int updateId)
 {
   // Define primary keys
-  int a;
   if (equalForRow(pOp, rowId) != 0)
     return NDBT_FAILED;
-  
-  for(a = 0; a<tab.getNoOfColumns(); a++){
-    if (tab.getColumn(a)->getPrimaryKey() == false){
-      if(setValueForAttr(pOp, a, rowId, updateId ) != 0){ 
+
+  if (setNonPkValues(pOp, rowId, updateId) != 0)
+    return NDBT_FAILED;
+
+  return NDBT_OK;
+}
+
+int
+HugoOperations::setNonPkValues(NdbOperation* pOp, int rowId, int updateId)
+{
+  for(int a = 0; a<tab.getNoOfColumns(); a++)
+  {
+    if (tab.getColumn(a)->getPrimaryKey() == false)
+    {
+      if(setValueForAttr(pOp, a, rowId, updateId ) != 0)
+      {
 	ERR(pTrans->getNdbError());
         setNdbError(pTrans->getNdbError());
 	return NDBT_FAILED;
       }
     }
   }
-  
   return NDBT_OK;
 }
 

=== modified file 'storage/ndb/test/src/HugoQueries.cpp'
--- a/storage/ndb/test/src/HugoQueries.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/HugoQueries.cpp	2011-10-28 08:47:01 +0000
@@ -41,7 +41,7 @@ HugoQueries::HugoQueries(const NdbQueryD
 
 HugoQueries::~HugoQueries()
 {
-  for (size_t o = 0; o<m_ops.size(); o++)
+  for (unsigned o = 0; o<m_ops.size(); o++)
   {
     while (m_ops[o].m_rows.size())
     {
@@ -56,14 +56,14 @@ HugoQueries::~HugoQueries()
 void
 HugoQueries::allocRows(int batch)
 {
-  for (size_t o = 0; o<m_ops.size(); o++)
+  for (unsigned o = 0; o<m_ops.size(); o++)
   {
     const NdbQueryOperationDef * pOp =m_query_def->getQueryOperation((Uint32)o);
     const NdbDictionary::Table* tab = pOp->getTable();
 
     if (tab)
     {
-      while (m_ops[o].m_rows.size() < (size_t)batch)
+      while (m_ops[o].m_rows.size() < (unsigned)batch)
       {
         m_ops[o].m_rows.push_back(new NDBT_ResultRow(* tab));
       }
@@ -197,7 +197,7 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
         return NDBT_FAILED;
       }
 
-      for (size_t o = 0; o<m_ops.size(); o++)
+      for (unsigned o = 0; o<m_ops.size(); o++)
       {
         NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
         HugoQueries::getValueForQueryOp(pOp, m_ops[o].m_rows[b]);
@@ -219,13 +219,53 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
       pTrans->close();
       return NDBT_FAILED;
     }
+#if 0
+    // Disabled, as this is incorrectly handled in SPJ API, will fix soon
+    else
+    {
+      /**
+       * If ::execute() didn't fail, there should not be an error on
+       * its NdbError object either:
+       */
+      const NdbError err = pTrans->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        ndbout_c("API INCONSISTENCY: NdbTransaction returned NdbError even if ::execute() succeeded");
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
+#endif
 
+    bool retry = false;
     for (int b = 0; b<batch; b++)
     {
       NdbQuery * query = queries[b];
-      if (query->nextResult() == NdbQuery::NextResult_gotRow)
+
+      /**
+       * As NdbQuery is always 'dirty read' (impl. limitations), 'AbortOnError'
+       * is ignored and handled as 'IgnoreError'. We will therefore not get
+       * errors returned from ::execute() or set into 'pTrans->getNdbError()':
+       * Has to check for errors on the NdbQuery object instead:
+       */
+      const NdbError& err = query->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          retry = true;
+          break;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+
+      const NdbQuery::NextResultOutcome stat = query->nextResult();
+      if (stat == NdbQuery::NextResult_gotRow)
       {
-        for (size_t o = 0; o<m_ops.size(); o++)
+        for (unsigned o = 0; o<m_ops.size(); o++)
         {
           NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
           if (!pOp->isRowNULL())
@@ -239,11 +279,30 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
           }
         }
       }
+      else if (stat == NdbQuery::NextResult_error)
+      {
+        const NdbError& err = query->getNdbError();
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          retry = true;
+          break;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
+    if (retry)
+    {
+      NdbSleep_MilliSleep(50);
+      retryAttempt++;
+      continue;
     }
+
     pTrans->close();
     r += batch;
 
-    for (size_t i = 0; i<batch_rows_found.size(); i++)
+    for (unsigned i = 0; i<batch_rows_found.size(); i++)
       m_rows_found[i] += batch_rows_found[i];
   }
 
@@ -292,7 +351,7 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       return NDBT_FAILED;
     }
 
-    for (size_t o = 0; o<m_ops.size(); o++)
+    for (unsigned o = 0; o<m_ops.size(); o++)
     {
       NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
       HugoQueries::getValueForQueryOp(pOp, m_ops[o].m_rows[0]);
@@ -312,6 +371,44 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       pTrans->close();
       return NDBT_FAILED;
     }
+    else
+    {
+      // Disabled, as this is incorrectly handled in SPJ API, will fix soon
+#if 0
+      /**
+       * If ::execute() didn't fail, there should not be an error on
+       * its NdbError object either:
+       */
+      const NdbError err = pTrans->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        ndbout_c("API INCONSISTENCY: NdbTransaction returned NdbError even if ::execute() succeeded");
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+#endif
+
+      /**
+       * As NdbQuery is always 'dirty read' (impl. limitations), 'AbortOnError'
+       * is ignored and handled as 'IgnoreError'. We will therefore not get
+       * errors returned from ::execute() or set into 'pTrans->getNdbError()':
+       * Has to check for errors on the NdbQuery object instead:
+       */
+      NdbError err = query->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          NdbSleep_MilliSleep(50);
+          retryAttempt++;
+          continue;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
 
     int r = rand() % 100;
     if (r < abort && ((r & 1) == 0))
@@ -335,7 +432,7 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       return NDBT_OK;
       }
 
-      for (size_t o = 0; o<m_ops.size(); o++)
+      for (unsigned o = 0; o<m_ops.size(); o++)
       {
         NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
         if (!pOp->isRowNULL())

=== modified file 'storage/ndb/test/src/HugoQueryBuilder.cpp'
--- a/storage/ndb/test/src/HugoQueryBuilder.cpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/test/src/HugoQueryBuilder.cpp	2011-10-21 08:59:23 +0000
@@ -55,7 +55,7 @@ HugoQueryBuilder::init()
 
 HugoQueryBuilder::~HugoQueryBuilder()
 {
-  for (size_t i = 0; i<m_queries.size(); i++)
+  for (unsigned i = 0; i<m_queries.size(); i++)
     m_queries[i]->destroy();
 }
 
@@ -76,7 +76,7 @@ HugoQueryBuilder::fixOptions()
 void
 HugoQueryBuilder::addTable(Ndb* ndb, const NdbDictionary::Table* tab)
 {
-  for (size_t i = 0; i<m_tables.size(); i++)
+  for (unsigned i = 0; i<m_tables.size(); i++)
   {
     if (m_tables[i].m_table == tab)
       return;
@@ -133,7 +133,7 @@ HugoQueryBuilder::getJoinLevel() const
 void
 HugoQueryBuilder::removeTable(const NdbDictionary::Table* tab)
 {
-  for (size_t i = 0; i<m_tables.size(); i++)
+  for (unsigned i = 0; i<m_tables.size(); i++)
   {
     if (m_tables[i].m_table == tab)
     {
@@ -215,11 +215,11 @@ HugoQueryBuilder::checkBindable(Vector<c
                                 Vector<Op> ops,
                                 bool allow_bind_nullable)
 {
-  for (size_t c = 0; c < cols.size(); c++)
+  for (unsigned c = 0; c < cols.size(); c++)
   {
     const NdbDictionary::Column * col = cols[c];
     bool found = false;
-    for (size_t t = 0; !found && t<ops.size(); t++)
+    for (unsigned t = 0; !found && t<ops.size(); t++)
     {
       const NdbDictionary::Table * tab = ops[t].m_op->getTable();
       if (tab)
@@ -274,7 +274,7 @@ HugoQueryBuilder::checkBusyScan(Op op) c
     op = m_query[op.m_parent];
   }
 
-  for (size_t i = op.m_idx + 1; i < m_query.size(); i++)
+  for (unsigned i = op.m_idx + 1; i < m_query.size(); i++)
     if (isAncestor(op, m_query[i]) && isScan(m_query[i].m_op))
       return true;
 
@@ -537,11 +537,11 @@ loop:
       if (op.m_op == 0)
       {
         ndbout << "Failed to add to " << endl;
-        for (size_t i = 0; i<m_query.size(); i++)
+        for (unsigned i = 0; i<m_query.size(); i++)
           ndbout << m_query[i] << endl;
 
         ndbout << "Parents: " << endl;
-        for (size_t i = 0; i<parents.size(); i++)
+        for (unsigned i = 0; i<parents.size(); i++)
           ndbout << parents[i].m_idx << " ";
         ndbout << endl;
       }

=== modified file 'storage/ndb/test/src/NDBT_Test.cpp'
--- a/storage/ndb/test/src/NDBT_Test.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/NDBT_Test.cpp	2011-10-21 08:59:23 +0000
@@ -414,7 +414,7 @@ NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(N
 NDBT_TestCaseImpl1::~NDBT_TestCaseImpl1(){
   NdbCondition_Destroy(waitThreadsCondPtr);
   NdbMutex_Destroy(waitThreadsMutexPtr);
-  size_t i;
+  unsigned i;
   for(i = 0; i < initializers.size();  i++)
     delete initializers[i];
   initializers.clear();

=== modified file 'storage/ndb/test/src/NdbBackup.cpp'
--- a/storage/ndb/test/src/NdbBackup.cpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/test/src/NdbBackup.cpp	2011-10-21 08:59:23 +0000
@@ -308,7 +308,7 @@ NdbBackup::restore(unsigned _backup_id){
   res = execRestore(true, true, ndbNodes[0].node_id, _backup_id);
 
   // Restore data once for each node
-  for(size_t i = 1; i < ndbNodes.size(); i++){
+  for(unsigned i = 1; i < ndbNodes.size(); i++){
     res = execRestore(true, false, ndbNodes[i].node_id, _backup_id);
   }
   

=== modified file 'storage/ndb/test/src/NdbRestarter.cpp'
--- a/storage/ndb/test/src/NdbRestarter.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/NdbRestarter.cpp	2011-10-21 08:59:23 +0000
@@ -59,7 +59,7 @@ int NdbRestarter::getDbNodeId(int _i){
   if (getStatus() != 0)
     return -1;
 
-  for(size_t i = 0; i < ndbNodes.size(); i++){     
+  for(unsigned i = 0; i < ndbNodes.size(); i++){     
     if (i == (unsigned)_i){
       return ndbNodes[i].node_id;
     }
@@ -114,7 +114,7 @@ NdbRestarter::restartNodes(int * nodes,
     for (int j = 0; j<cnt; j++)
     {
       int _nodeId = nodes[j];
-      for(size_t i = 0; i < ndbNodes.size(); i++)
+      for(unsigned i = 0; i < ndbNodes.size(); i++)
       {
         if(ndbNodes[i].node_id == _nodeId)
         {
@@ -147,7 +147,7 @@ NdbRestarter::getMasterNodeId(){
   
   int min = 0;
   int node = -1;
-  for(size_t i = 0; i < ndbNodes.size(); i++){
+  for(unsigned i = 0; i < ndbNodes.size(); i++){
     if(min == 0 || ndbNodes[i].dynamic_id < min){
       min = ndbNodes[i].dynamic_id;
       node = ndbNodes[i].node_id;
@@ -165,7 +165,7 @@ NdbRestarter::getNodeGroup(int nodeId){
   if (getStatus() != 0)
     return -1;
   
-  for(size_t i = 0; i < ndbNodes.size(); i++)
+  for(unsigned i = 0; i < ndbNodes.size(); i++)
   {
     if(ndbNodes[i].node_id == nodeId)
     {
@@ -184,7 +184,7 @@ NdbRestarter::getNextMasterNodeId(int no
   if (getStatus() != 0)
     return -1;
   
-  size_t i;
+  unsigned i;
   for(i = 0; i < ndbNodes.size(); i++)
   {
     if(ndbNodes[i].node_id == nodeId)
@@ -244,7 +244,7 @@ NdbRestarter::getRandomNodeOtherNodeGrou
     return -1;
   
   int node_group = -1;
-  for(size_t i = 0; i < ndbNodes.size(); i++){
+  for(unsigned i = 0; i < ndbNodes.size(); i++){
     if(ndbNodes[i].node_id == nodeId){
       node_group = ndbNodes[i].node_group;
       break;
@@ -274,7 +274,7 @@ NdbRestarter::getRandomNodeSameNodeGroup
     return -1;
   
   int node_group = -1;
-  for(size_t i = 0; i < ndbNodes.size(); i++){
+  for(unsigned i = 0; i < ndbNodes.size(); i++){
     if(ndbNodes[i].node_id == nodeId){
       node_group = ndbNodes[i].node_group;
       break;
@@ -347,7 +347,7 @@ NdbRestarter::waitClusterState(ndb_mgm_n
   }
   
   // Collect all nodes into nodes
-  for (size_t i = 0; i < ndbNodes.size(); i++){
+  for (unsigned i = 0; i < ndbNodes.size(); i++){
     nodes[i] = ndbNodes[i].node_id;
     numNodes++;
   }
@@ -388,7 +388,7 @@ NdbRestarter::waitNodesState(const int *
 	 * First check if any node is not starting
 	 * then it's no idea to wait anymore
 	 */
-	for (size_t n = 0; n < ndbNodes.size(); n++){
+	for (unsigned n = 0; n < ndbNodes.size(); n++){
 	  if (ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTED &&
 	      ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTING)
 	    waitMore = false;
@@ -426,7 +426,7 @@ NdbRestarter::waitNodesState(const int *
 
     for (int i = 0; i < _num_nodes; i++){
       ndb_mgm_node_state* ndbNode = NULL;
-      for (size_t n = 0; n < ndbNodes.size(); n++){
+      for (unsigned n = 0; n < ndbNodes.size(); n++){
 	if (ndbNodes[n].node_id == _nodes[i])
 	  ndbNode = &ndbNodes[n];
       }
@@ -713,7 +713,7 @@ int NdbRestarter::insertErrorInAllNodes(
 
   int result = 0;
  
-  for(size_t i = 0; i < ndbNodes.size(); i++){     
+  for(unsigned i = 0; i < ndbNodes.size(); i++){     
     g_debug << "inserting error in node " << ndbNodes[i].node_id << endl;
     if (insertErrorInNode(ndbNodes[i].node_id, _error) == -1)
       result = -1;
@@ -751,7 +751,7 @@ int NdbRestarter::dumpStateAllNodes(cons
 
  int result = 0;
  
- for(size_t i = 0; i < ndbNodes.size(); i++){     
+ for(unsigned i = 0; i < ndbNodes.size(); i++){     
    g_debug << "dumping state in node " << ndbNodes[i].node_id << endl;
    if (dumpStateOneNode(ndbNodes[i].node_id, _args, _num_args) == -1)
      result = -1;
@@ -841,7 +841,7 @@ NdbRestarter::checkClusterAlive(const in
   for (int i = 0; i<num_nodes; i++)
     mask.set(deadnodes[i]);
   
-  for (size_t n = 0; n < ndbNodes.size(); n++)
+  for (unsigned n = 0; n < ndbNodes.size(); n++)
   {
     if (mask.get(ndbNodes[n].node_id))
       continue;
@@ -862,7 +862,7 @@ NdbRestarter::rollingRestart(Uint32 flag
   NdbNodeBitmask ng_mask;
   NdbNodeBitmask restart_nodes;
   Vector<int> nodes;
-  for(size_t i = 0; i < ndbNodes.size(); i++)
+  for(unsigned i = 0; i < ndbNodes.size(); i++)
   { 
     if (ng_mask.get(ndbNodes[i].node_group) == false)
     {
@@ -911,7 +911,7 @@ NdbRestarter::getMasterNodeVersion(int&
   int masterNodeId = getMasterNodeId();
   if (masterNodeId != -1)
   {
-    for(size_t i = 0; i < ndbNodes.size(); i++)
+    for(unsigned i = 0; i < ndbNodes.size(); i++)
     {
       if (ndbNodes[i].node_id == masterNodeId)
       {
@@ -964,7 +964,7 @@ NdbRestarter::getNodeTypeVersionRange(nd
   minVer = 0;
   maxVer = 0;
   
-  for(size_t i = 0; i < nodeVec->size(); i++)
+  for(unsigned i = 0; i < nodeVec->size(); i++)
   {
     int nodeVer = (*nodeVec)[i].version;
     if ((minVer == 0) ||
@@ -984,7 +984,7 @@ NdbRestarter::getNodeStatus(int nodeid)
   if (getStatus() != 0)
     return -1;
 
-  for (size_t n = 0; n < ndbNodes.size(); n++)
+  for (unsigned n = 0; n < ndbNodes.size(); n++)
   {
     if (ndbNodes[n].node_id == nodeid)
       return ndbNodes[n].node_status;

=== modified file 'storage/ndb/test/src/getarg.c'
--- a/storage/ndb/test/src/getarg.c	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/getarg.c	2011-10-21 08:59:23 +0000
@@ -290,7 +290,7 @@ arg_printusage (struct getargs *args,
 	    strlcat(buf, "]", sizeof(buf));
 	    if(args[i].type == arg_strings)
 		strlcat(buf, "...", sizeof(buf));
-	    col = check_column(stderr, col, strlen(buf) + 1, columns);
+	    col = check_column(stderr, col, (int)strlen(buf) + 1, columns);
 	    col += fprintf(stderr, " %s", buf);
 	}
 	if (args[i].short_name) {
@@ -301,7 +301,7 @@ arg_printusage (struct getargs *args,
 	    strlcat(buf, "]", sizeof(buf));
 	    if(args[i].type == arg_strings)
 		strlcat(buf, "...", sizeof(buf));
-	    col = check_column(stderr, col, strlen(buf) + 1, columns);
+	    col = check_column(stderr, col, (int)strlen(buf) + 1, columns);
 	    col += fprintf(stderr, " %s", buf);
 	}
 	if (args[i].long_name && args[i].short_name)
@@ -309,7 +309,7 @@ arg_printusage (struct getargs *args,
 	max_len = max(max_len, len);
     }
     if (extra_string) {
-	col = check_column(stderr, col, strlen(extra_string) + 1, columns);
+	col = check_column(stderr, col, (int)strlen(extra_string) + 1, columns);
 	fprintf (stderr, " %s\n", extra_string);
     } else
 	fprintf (stderr, "\n");
@@ -360,14 +360,14 @@ arg_match_long(struct getargs *args, siz
     int argv_len;
     char *p;
 
-    argv_len = strlen(argv);
+    argv_len = (int)strlen(argv);
     p = strchr (argv, '=');
     if (p != NULL)
-	argv_len = p - argv;
+	argv_len = (int)(p - argv);
 
     for (i = 0; i < num_args; ++i) {
 	if(args[i].long_name) {
-	    int len = strlen(args[i].long_name);
+	    int len = (int)strlen(args[i].long_name);
 	    char *p = argv;
 	    int p_len = argv_len;
 	    negate = 0;
@@ -467,7 +467,7 @@ arg_match_long(struct getargs *args, siz
     }
     case arg_collect:{
 	struct getarg_collect_info *c = current->value;
-	int o = argv - rargv[*optind];
+	int o = (int)(argv - rargv[*optind]);
 	return (*c->func)(FALSE, argc, rargv, optind, &o, c->data);
     }
 

=== modified file 'storage/ndb/test/tools/cpcc.cpp'
--- a/storage/ndb/test/tools/cpcc.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/tools/cpcc.cpp	2011-10-21 08:59:23 +0000
@@ -140,7 +140,7 @@ public:
 
   virtual bool evaluate(SimpleCpcClient* c, const SimpleCpcClient::Process & p){
     bool run = on_empty;
-    for(size_t i = 0; i<m_cond.size(); i++){
+    for(unsigned i = 0; i<m_cond.size(); i++){
       if(m_cond[i]->evaluate(c, p)){
 	run = true;
 	break;
@@ -172,7 +172,7 @@ void
 add_hosts(Vector<SimpleCpcClient*> & hosts, BaseString list){
   Vector<BaseString> split;
   list.split(split);
-  for(size_t i = 0; i<split.size(); i++){
+  for(unsigned i = 0; i<split.size(); i++){
     add_host(hosts, split[i]);
   }
 }
@@ -273,7 +273,7 @@ main(int argc, const char** argv){
 
 int
 connect(Vector<SimpleCpcClient*>& list){
-  for(size_t i = 0; i<list.size(); i++){
+  for(unsigned i = 0; i<list.size(); i++){
     if(list[i]->connect() != 0){
       ndbout_c("Failed to connect to %s:%d", 
 	       list[i]->getHost(), list[i]->getPort());
@@ -285,7 +285,7 @@ connect(Vector<SimpleCpcClient*>& list){
 
 int
 for_each(Vector<SimpleCpcClient*>& list, Expression & expr){
-  for(size_t i = 0; i<list.size(); i++){
+  for(unsigned i = 0; i<list.size(); i++){
     if(list[i] == 0)
       continue;
     Properties p;
@@ -294,7 +294,7 @@ for_each(Vector<SimpleCpcClient*>& list,
       ndbout << "Failed to list processes on " 
 	     << list[i]->getHost() << ":" << list[i]->getPort() << endl;
     }
-    for(size_t j = 0; j<procs.size(); j++)
+    for(unsigned j = 0; j<procs.size(); j++)
       expr.evaluate(list[i], procs[j]);
   }
   return 0;

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-10-05 11:21:23 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-10-28 09:56:57 +0000
@@ -73,7 +73,11 @@ MYSQL_ADD_EXECUTABLE(ndb_config
 TARGET_LINK_LIBRARIES(ndb_config ndbmgmclient ndbconf)
 
 # Build ndbinfo_sql and run it to create ndbinfo.sql
-ADD_EXECUTABLE(ndbinfo_sql ndbinfo_sql.cpp)
+ADD_EXECUTABLE(ndbinfo_sql
+  ndbinfo_sql.cpp
+  ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
+  ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
+)
 TARGET_LINK_LIBRARIES(ndbinfo_sql ndbclient)
 GET_TARGET_PROPERTY(NDBINFO_SQL_EXE ndbinfo_sql LOCATION)
 ADD_CUSTOM_COMMAND(OUTPUT ${PROJECT_SOURCE_DIR}/storage/ndb/tools/ndbinfo.sql

=== modified file 'storage/ndb/tools/Makefile.am'
--- a/storage/ndb/tools/Makefile.am	2011-10-05 11:21:23 +0000
+++ b/storage/ndb/tools/Makefile.am	2011-10-28 09:56:57 +0000
@@ -70,7 +70,9 @@ ndb_restore_LDADD = $(top_builddir)/stor
 ndbinfo_sql_SOURCES = ndbinfo_sql.cpp \
 	../src/mgmsrv/Config.cpp \
 	../src/mgmsrv/InitConfigFileParser.cpp \
-        $(top_srcdir)/storage/ndb/src/kernel/vm/mt_thr_config.cpp
+        $(top_srcdir)/storage/ndb/src/kernel/vm/mt_thr_config.cpp \
+        $(top_srcdir)/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp \
+        $(top_srcdir)/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
 
 ndbinfo_sql_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/mgmapi
 

=== modified file 'storage/ndb/tools/ndb_dump_frm_data.cpp'
--- a/storage/ndb/tools/ndb_dump_frm_data.cpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/tools/ndb_dump_frm_data.cpp	2011-10-20 18:36:21 +0000
@@ -104,7 +104,7 @@ dofile(const char* file)
       break;
     }
     ssize_t size2;
-    if ((size2 = read(fd, data, size)) == -1)
+    if ((size2 = read(fd, data, (unsigned)size)) == -1)
     {
       fprintf(stderr, "%s: read: %s\n", file, strerror(errno));
       break;
@@ -137,7 +137,7 @@ dofile(const char* file)
     printf("  orig: %u\n", (uint)size);
     printf("  pack: %u\n", (uint)pack_len);
     printf("*/\n\n");
-    dodump(name, pack_data, pack_len);
+    dodump(name, pack_data, (uint)pack_len);
     ret = 0;
   }
   while (0);

=== modified file 'storage/ndb/tools/ndbinfo_sql.cpp'
--- a/storage/ndb/tools/ndbinfo_sql.cpp	2011-10-07 13:15:08 +0000
+++ b/storage/ndb/tools/ndbinfo_sql.cpp	2011-10-28 09:56:57 +0000
@@ -50,12 +50,12 @@ struct view {
     "used, total, high, entry_size, cp1.param_name AS param_name1, "
     "cp2.param_name AS param_name2, cp3.param_name AS param_name3, "
     "cp4.param_name AS param_name4 "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>pools p "
-    "LEFT JOIN <NDBINFO_DB>.blocks b ON p.block_number = b.block_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp1 ON p.config_param1 = cp1.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp2 ON p.config_param2 = cp2.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp3 ON p.config_param3 = cp3.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp4 ON p.config_param4 = cp4.param_number"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>pools` p "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b ON p.block_number = b.block_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp1 ON p.config_param1 = cp1.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp2 ON p.config_param2 = cp2.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp3 ON p.config_param3 = cp3.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp4 ON p.config_param4 = cp4.param_number"
   },
 #endif
   { "transporters",
@@ -67,7 +67,7 @@ struct view {
     "  WHEN 3 THEN \"DISCONNECTING\""
     "  ELSE NULL "
     " END AS status "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>transporters"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>transporters`"
   },
   { "logspaces",
     "SELECT node_id, "
@@ -77,7 +77,7 @@ struct view {
     "  ELSE NULL "
     " END AS log_type, "
     "log_id, log_part, total, used "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>logspaces"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>logspaces`"
   },
   { "logbuffers",
     "SELECT node_id, "
@@ -87,7 +87,7 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS log_type, "
     "log_id, log_part, total, used "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>logbuffers"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>logbuffers`"
   },
   { "resources",
     "SELECT node_id, "
@@ -105,9 +105,9 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS resource_name, "
     "reserved, used, max "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>resources"
-   },
-   { "counters",
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>resources`"
+  },
+  { "counters",
     "SELECT node_id, b.block_name, block_instance, "
     "counter_id, "
     "CASE counter_id"
@@ -137,11 +137,11 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS counter_name, "
     "val "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>counters c "
-    "LEFT JOIN <NDBINFO_DB>.blocks b "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>counters` c "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b "
     "ON c.block_number = b.block_number"
-   },
-   { "nodes",
+  },
+  { "nodes",
     "SELECT node_id, "
     "uptime, "
     "CASE status"
@@ -158,8 +158,8 @@ struct view {
     " END AS status, "
     "start_phase, "
     "config_generation "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>nodes"
-   },
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>nodes`"
+  },
   { "memoryusage",
     "SELECT node_id,"
     "  pool_name AS memory_type,"
@@ -167,17 +167,91 @@ struct view {
     "  SUM(used) AS used_pages,"
     "  SUM(total*entry_size) AS total,"
     "  SUM(total) AS total_pages "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>pools "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>pools` "
     "WHERE block_number IN (248, 254) AND "
     "  (pool_name = \"Index memory\" OR pool_name = \"Data memory\") "
     "GROUP BY node_id, memory_type"
   },
-   { "diskpagebuffer",
+  { "diskpagebuffer",
      "SELECT node_id, block_instance, "
      "pages_written, pages_written_lcp, pages_read, log_waits, "
      "page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io "
-     "FROM <NDBINFO_DB>.<TABLE_PREFIX>diskpagebuffer"
-   }
+     "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>diskpagebuffer`"
+  },
+  { "diskpagebuffer",
+     "SELECT node_id, block_instance, "
+     "pages_written, pages_written_lcp, pages_read, log_waits, "
+     "page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io "
+     "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>diskpagebuffer`"
+  },
+  { "threadblocks",
+    "SELECT t.node_id, t.thr_no, b.block_name, t.block_instance "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>threadblocks` t "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b "
+    "ON t.block_number = b.block_number"
+  },
+  { "threadstat",
+    "SELECT * from `<NDBINFO_DB>`.`<TABLE_PREFIX>threadstat`"
+  },
+  { "cluster_transactions",
+    "SELECT"
+    " t.node_id,"
+    " t.block_instance,"
+    " t.transid0 + (t.transid1 << 32) as transid,"
+    " s.state_friendly_name as state, "
+    " t.c_ops as count_operations, "
+    " t.outstanding as outstanding_operations, "
+    " t.timer as inactive_seconds, "
+    " (t.apiref & 65535) as client_node_id, "
+    " (t.apiref >> 16) as client_block_ref "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>transactions` t"
+    " LEFT JOIN `<NDBINFO_DB>`.`<TABLE_PREFIX>dbtc_apiconnect_state` s"
+    "        ON s.state_int_value = t.state"
+  },
+  { "server_transactions",
+    "SELECT map.mysql_connection_id, t.*"
+    "FROM information_schema.ndb_transid_mysql_connection_map map "
+    "JOIN `<NDBINFO_DB>`.cluster_transactions t "
+    "  ON (map.ndb_transid >> 32) = (t.transid >> 32)"
+  },
+  { "cluster_operations",
+    "SELECT"
+    " o.node_id,"
+    " o.block_instance,"
+    " o.transid0 + (o.transid1 << 32) as transid,"
+    " case o.op "
+    " when 1 then \"READ\""
+    " when 2 then \"READ-SH\""
+    " when 3 then \"READ-EX\""
+    " when 4 then \"INSERT\""
+    " when 5 then \"UPDATE\""
+    " when 6 then \"DELETE\""
+    " when 7 then \"WRITE\""
+    " when 8 then \"UNLOCK\""
+    " when 9 then \"REFRESH\""
+    " when 257 then \"SCAN\""
+    " when 258 then \"SCAN-SH\""
+    " when 259 then \"SCAN-EX\""
+    " ELSE \"<unknown>\""
+    " END as operation_type, "
+    " s.state_friendly_name as state, "
+    " o.tableid, "
+    " o.fragmentid, "
+    " (o.apiref & 65535) as client_node_id, "
+    " (o.apiref >> 16) as client_block_ref, "
+    " (o.tcref & 65535) as tc_node_id, "
+    " ((o.tcref >> 16) & 511) as tc_block_no, "
+    " ((o.tcref >> (16 + 9)) & 127) as tc_block_instance "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>operations` o"
+    " LEFT JOIN `<NDBINFO_DB>`.`<TABLE_PREFIX>dblqh_tcconnect_state` s"
+    "        ON s.state_int_value = o.state"
+  },
+  { "server_operations",
+    "SELECT map.mysql_connection_id, o.* "
+    "FROM `<NDBINFO_DB>`.cluster_operations o "
+    "JOIN information_schema.ndb_transid_mysql_connection_map map"
+    "  ON (map.ndb_transid >> 32) = (o.transid >> 32)"
+  }
 };
 
 size_t num_views = sizeof(views)/sizeof(views[0]);
@@ -214,6 +288,38 @@ static void fill_blocks(BaseString& sql)
   }
 }
 
+#include "kernel/statedesc.hpp"
+
+static void fill_dbtc_apiconnect_state(BaseString& sql)
+{
+  const char* separator = "";
+  for (unsigned i = 0; g_dbtc_apiconnect_state_desc[i].name != 0; i++)
+  {
+    sql.appfmt("%s(%u, \"%s\", \"%s\", \"%s\")",
+               separator,
+               g_dbtc_apiconnect_state_desc[i].value,
+               g_dbtc_apiconnect_state_desc[i].name,
+               g_dbtc_apiconnect_state_desc[i].friendly_name,
+               g_dbtc_apiconnect_state_desc[i].description);
+    separator = ", ";
+  }
+}
+
+static void fill_dblqh_tcconnect_state(BaseString& sql)
+{
+  const char* separator = "";
+  for (unsigned i = 0; g_dblqh_tcconnect_state_desc[i].name != 0; i++)
+  {
+    sql.appfmt("%s(%u, \"%s\", \"%s\", \"%s\")",
+               separator,
+               g_dblqh_tcconnect_state_desc[i].value,
+               g_dblqh_tcconnect_state_desc[i].name,
+               g_dblqh_tcconnect_state_desc[i].friendly_name,
+               g_dblqh_tcconnect_state_desc[i].description);
+    separator = ", ";
+  }
+}
+
 struct lookup {
   const char* name;
   const char* columns;
@@ -224,12 +330,28 @@ struct lookup {
     "block_number INT UNSIGNED PRIMARY KEY, "
     "block_name VARCHAR(512)",
     &fill_blocks
-   },
+  },
   { "config_params",
     "param_number INT UNSIGNED PRIMARY KEY, "
     "param_name VARCHAR(512)",
     &fill_config_params
-   }
+  },
+  {
+    "<TABLE_PREFIX>dbtc_apiconnect_state",
+    "state_int_value  INT UNSIGNED PRIMARY KEY, "
+    "state_name VARCHAR(256), "
+    "state_friendly_name VARCHAR(256), "
+    "state_description VARCHAR(256)",
+    &fill_dbtc_apiconnect_state
+  },
+  {
+    "<TABLE_PREFIX>dblqh_tcconnect_state",
+    "state_int_value  INT UNSIGNED PRIMARY KEY, "
+    "state_name VARCHAR(256), "
+    "state_friendly_name VARCHAR(256), "
+    "state_description VARCHAR(256)",
+    &fill_dblqh_tcconnect_state
+  }
 };
 
 size_t num_lookups = sizeof(lookups)/sizeof(lookups[0]);
@@ -339,7 +461,7 @@ int main(int argc, char** argv){
   printf("# Drop any old views in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_views; i++)
   {
-    sql.assfmt("DROP VIEW IF EXISTS %s.%s",
+    sql.assfmt("DROP VIEW IF EXISTS `%s`.`%s`",
                opt_ndbinfo_db, views[i].name);
     print_conditional_sql(sql);
   }
@@ -347,8 +469,10 @@ int main(int argc, char** argv){
   printf("# Drop any old lookup tables in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_lookups; i++)
   {
-    sql.assfmt("DROP TABLE IF EXISTS %s.%s",
-               opt_ndbinfo_db, lookups[i].name);
+    BaseString table_name = replace_tags(lookups[i].name);
+
+    sql.assfmt("DROP TABLE IF EXISTS `%s`.`%s`",
+               opt_ndbinfo_db, table_name.c_str());
     print_conditional_sql(sql);
   }
 
@@ -409,16 +533,17 @@ int main(int argc, char** argv){
   for (size_t i = 0; i < num_lookups; i++)
   {
     lookup l = lookups[i];
-    printf("# %s.%s\n", opt_ndbinfo_db, l.name);
+    BaseString table_name = replace_tags(l.name);
+    printf("# %s.%s\n", opt_ndbinfo_db, table_name.c_str());
 
     /* Create lookup table */
     sql.assfmt("CREATE TABLE `%s`.`%s` (%s)",
-               opt_ndbinfo_db, l.name, l.columns);
+               opt_ndbinfo_db, table_name.c_str(), l.columns);
     print_conditional_sql(sql);
 
     /* Insert data */
     sql.assfmt("INSERT INTO `%s`.`%s` VALUES ",
-               opt_ndbinfo_db, l.name);
+               opt_ndbinfo_db, table_name.c_str());
     l.fill(sql);
     print_conditional_sql(sql);
   }

=== modified file 'storage/ndb/tools/restore/consumer_restore.cpp'
--- a/storage/ndb/tools/restore/consumer_restore.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/tools/restore/consumer_restore.cpp	2011-10-21 08:59:23 +0000
@@ -679,7 +679,7 @@ BackupRestore::rebuild_indexes(const Tab
   NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
 
   Vector<NdbDictionary::Index*> & indexes = m_index_per_table[id];
-  for(size_t i = 0; i<indexes.size(); i++)
+  for(unsigned i = 0; i<indexes.size(); i++)
   {
     const NdbDictionary::Index * const idx = indexes[i];
     const char * const idx_name = idx->getName();
@@ -818,7 +818,7 @@ bool BackupRestore::search_replace(char
                                    const char **data, const char *end_data,
                                    uint *new_data_len)
 {
-  uint search_str_len = strlen(search_str);
+  uint search_str_len = (uint)strlen(search_str);
   uint inx = 0;
   bool in_delimiters = FALSE;
   bool escape_char = FALSE;
@@ -969,7 +969,7 @@ bool BackupRestore::translate_frm(NdbDic
   {
     DBUG_RETURN(TRUE);
   }
-  if (map_in_frm(new_data, (const char*)data, data_len, &new_data_len))
+  if (map_in_frm(new_data, (const char*)data, (uint)data_len, &new_data_len))
   {
     free(new_data);
     DBUG_RETURN(TRUE);
@@ -1997,7 +1997,7 @@ BackupRestore::endOfTables(){
     return true;
 
   NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
-  for(size_t i = 0; i<m_indexes.size(); i++){
+  for(unsigned i = 0; i<m_indexes.size(); i++){
     NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
 
     BaseString db_name, schema_name, table_name;

=== modified file 'storage/ndb/tools/waiter.cpp'
--- a/storage/ndb/tools/waiter.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/tools/waiter.cpp	2011-10-21 08:59:23 +0000
@@ -322,7 +322,7 @@ waitClusterStatus(const char* _addr,
 	 * First check if any node is not starting
 	 * then it's no idea to wait anymore
 	 */
-	for (size_t n = 0; n < ndbNodes.size(); n++){
+	for (unsigned n = 0; n < ndbNodes.size(); n++){
 	  if (ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTED &&
 	      ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTING)
 	    waitMore = false;
@@ -359,7 +359,7 @@ waitClusterStatus(const char* _addr,
     allInState = (ndbNodes.size() > 0);
 
     /* Loop through all nodes and check their state */
-    for (size_t n = 0; n < ndbNodes.size(); n++) {
+    for (unsigned n = 0; n < ndbNodes.size(); n++) {
       ndb_mgm_node_state* ndbNode = &ndbNodes[n];
 
       assert(ndbNode != NULL);

=== modified file 'support-files/compiler_warnings.supp'
--- a/support-files/compiler_warnings.supp	2011-06-30 15:59:25 +0000
+++ b/support-files/compiler_warnings.supp	2011-10-20 11:45:13 +0000
@@ -59,23 +59,23 @@ db_vrfy.c : .*comparison is always false
 # Ignore all conversion warnings on windows 64
 # (Is safe as we are not yet supporting strings >= 2G)
 #
-.* : conversion from '__int64' to .*int'.*
-.* : conversion from '__int64' to 'uint8'.*
-.* : conversion from '__int64' to 'uint32'.*
-.* : conversion from '__int64' to 'u.*long'.*
-.* : conversion from '__int64' to 'long'.*
-.* : conversion from '__int64' to 'off_t'.*
-.* : conversion from '.*size_t' to .*int'.*
-.* : conversion from '.*size_t' to 'TaoCrypt::word32'.*
-.* : conversion from '.*size_t' to 'u.*long'.*
-.* : conversion from '.*size_t' to 'uint32'.*
-.* : conversion from '.*size_t' to 'off_t'.*
-.* : conversion from '.*size_t' to 'size_s'.*
-.* : conversion from '.*size_t' to 'DWORD'.*
-.* : conversion from '.*size_t' to 'uLongf'.*
-.* : conversion from '.*size_t' to 'UINT'.*
-.* : conversion from '.*size_t' to 'uInt'.*
-.* : conversion from '.*size_t' to 'uint16'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint8'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'TaoCrypt::word32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'size_s'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'DWORD'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uLongf'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'UINT'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uInt'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint16'.*
 
 #
 # Ignore a few specific warnings in ndb

=== modified file 'tests/mysql_client_test.c'
--- a/tests/mysql_client_test.c	2011-06-30 15:55:35 +0000
+++ b/tests/mysql_client_test.c	2011-10-17 11:35:32 +0000
@@ -18399,6 +18399,87 @@ static void test_bug47485()
 }
 
 
+#ifndef MCP_BUG13001491
+/*
+  Bug#13001491: MYSQL_REFRESH CRASHES WHEN STORED ROUTINES ARE RUN CONCURRENTLY.
+*/
+static void test_bug13001491()
+{
+  int rc;
+  char query[MAX_TEST_QUERY_LENGTH];
+  MYSQL *c;
+
+  myheader("test_bug13001491");
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "GRANT ALL PRIVILEGES ON *.* TO mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "GRANT RELOAD ON *.* TO mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+
+  c= mysql_client_init(NULL);
+
+  DIE_UNLESS(mysql_real_connect(c, opt_host, "mysqltest_u1", NULL,
+                                current_db, opt_port, opt_unix_socket,
+                                CLIENT_MULTI_STATEMENTS |
+                                CLIENT_MULTI_RESULTS));
+
+  rc= mysql_query(c, "DROP PROCEDURE IF EXISTS p1");
+  myquery(rc);
+
+  rc= mysql_query(c,
+    "CREATE PROCEDURE p1() "
+    "BEGIN "
+    " DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END; "
+    " SELECT COUNT(*) "
+    " FROM INFORMATION_SCHEMA.PROCESSLIST "
+    " GROUP BY user "
+    " ORDER BY NULL "
+    " INTO @a; "
+    "END");
+  myquery(rc);
+
+  rc= mysql_query(c, "CALL p1()");
+  myquery(rc);
+
+  mysql_free_result(mysql_store_result(c));
+
+  /* Check that mysql_refresh() succeeds without REFRESH_LOG. */
+  rc= mysql_refresh(c, REFRESH_GRANT |
+                       REFRESH_TABLES | REFRESH_HOSTS |
+                       REFRESH_STATUS | REFRESH_THREADS);
+  myquery(rc);
+
+  /*
+    Check that mysql_refresh(REFRESH_LOG) does not crash the server even if it
+    fails. mysql_refresh(REFRESH_LOG) fails when error log points to unavailable
+    location.
+  */
+  mysql_refresh(c, REFRESH_LOG);
+
+  rc= mysql_query(c, "DROP PROCEDURE p1");
+  myquery(rc);
+
+  mysql_close(c);
+  c= NULL;
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "DROP USER mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+}
+
+#endif
 /*
   Read and parse arguments and MySQL options from my.cnf
 */
@@ -18725,6 +18806,9 @@ static struct my_tests_st my_tests[]= {
   { "test_bug42373", test_bug42373 },
   { "test_bug54041", test_bug54041 },
   { "test_bug47485", test_bug47485 },
+#ifndef MCP_BUG13001491
+  { "test_bug13001491", test_bug13001491 },
+#endif
   { 0, 0 }
 };
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0 branch (pekka.nousiainen:4590 to 4629) Pekka Nousiainen1 Nov