List:Commits« Previous MessageNext Message »
From:Martin Zaun Date:October 31 2011 7:40am
Subject:bzr push into mysql-5.1-telco-7.1 branch (martin.zaun:4316 to 4325)
View as plain text  
 4325 Martin Zaun	2011-10-31
      ndbjtie - test patch for CluB X.X - fix for jtie id member cache bug

    modified:
      storage/ndb/src/ndbjtie/jtie/jtie_tconv_idcache_impl.hpp
      storage/ndb/src/ndbjtie/jtie/jtie_tconv_object.hpp
      storage/ndb/src/ndbjtie/jtie/jtie_tconv_object_impl.hpp
 4324 Craig L Russell	2011-10-28
      update mysql-test/suite/ndb/r/ndb_config.result to match configuration

    modified:
      mysql-test/suite/ndb/r/ndb_config.result
 4323 jonas oreland	2011-10-29
      ndb increase acc memory to 5m

    modified:
      mysql-test/include/default_ndbd.cnf
 4322 Craig L Russell	2011-10-28
      Improve error reporting for schema change test

    modified:
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java
 4321 jonas oreland	2011-10-28 [merge]
      ndb - merge 70 to 71

    modified:
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 4320 Magnus Blåudd	2011-10-28
      ndb
       - add MCP patch for bug 51828 also to 7.1 which we hit when
         adding SQL to create ndbinfo tables to mysql_system_tables.sql
       - choose only significants part of patch, i.e make comp_sql
         print the SQL as an array of strings and then make mysql_upgrade
         concat that string in dynamic memory before running the query

    modified:
      client/mysql_upgrade.c
      scripts/comp_sql.c
 4319 jonas oreland	2011-10-28
      ndb - update result file

    modified:
      mysql-test/suite/ndb/r/ndbinfo_dump.result
 4318 jonas oreland	2011-10-28 [merge]
      ndb - merge 70 to 71

    added:
      storage/ndb/include/kernel/statedesc.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
    modified:
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/t/ndbinfo.test
      scripts/mysql_system_tables.sql
      sql/ha_ndbcluster_connection.cc
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/test/src/HugoQueries.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndbinfo_sql.cpp
 4317 Craig L Russell	2011-10-27
      Add a new method to clusterj Session to unload cached schema information
      after the schema has changed (due to alter table). This allows for recovery
      from error code 284 without restarting the VM. If there is an error while
      creating the domain type handler also unload the schema.
      A test case changes schema for a table and verifies recovery of the schema.
      This change was requested by a community customer using clusterj with Tomcat.

    added:
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java
    modified:
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/clusterj/clusterj-test/Makefile.am
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java
 4316 Jonas Oreland	2011-10-24 [merge]
      ndb - merge 71 to 72

    modified:
      storage/ndb/include/util/OutputStream.hpp
      storage/ndb/src/common/util/OutputStream.cpp
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/test/include/NdbMgmd.hpp
      storage/ndb/test/ndbapi/testMgm.cpp
=== modified file 'client/mysql_upgrade.c'
--- a/client/mysql_upgrade.c	2011-06-30 15:55:35 +0000
+++ b/client/mysql_upgrade.c	2011-10-28 14:46:05 +0000
@@ -763,10 +763,42 @@ static int run_sql_fix_privilege_tables(
   if (init_dynamic_string(&ds_result, "", 512, 512))
     die("Out of memory");
 
+#ifndef MCP_BUG51828
+  /*
+    The SQL to run are kept in a big array of string in order
+    to avoid hitting compiler limits for max string length,
+    concatenate the strings into dynamic memory before
+    running the SQL. Significant parts of patch for bug#51828
+    backported from trunk
+  */
+  {
+    const char **query_ptr;
+    DYNAMIC_STRING ds_script;
+
+    if (init_dynamic_string(&ds_script, "", 65536, 1024))
+      die("Out of memory");
+
+    for ( query_ptr= &mysql_fix_privilege_tables[0];
+          *query_ptr != NULL;
+          query_ptr++
+          )
+    {
+      dynstr_append(&ds_script, *query_ptr);
+    }
+
+    verbose("Running 'mysql_fix_privilege_tables'...");
+    run_query(ds_script.str,
+              &ds_result, /* Collect result */
+              TRUE);
+
+    dynstr_free(&ds_script);
+  }
+#else
   verbose("Running 'mysql_fix_privilege_tables'...");
   run_query(mysql_fix_privilege_tables,
             &ds_result, /* Collect result */
             TRUE);
+#endif
 
   {
     /*

=== modified file 'mysql-test/include/default_ndbd.cnf'
--- a/mysql-test/include/default_ndbd.cnf	2011-03-31 20:19:39 +0000
+++ b/mysql-test/include/default_ndbd.cnf	2011-10-29 03:19:28 +0000
@@ -4,7 +4,7 @@ MaxNoOfSavedMessages=          1000
 MaxNoOfConcurrentTransactions= 2048
 MaxNoOfConcurrentOperations=   10000
 DataMemory=                    20M
-IndexMemory=                   3M
+IndexMemory=                   5M
 Diskless=                      0
 TimeBetweenWatchDogCheck=      30000
 MaxNoOfOrderedIndexes=         128

=== modified file 'mysql-test/suite/ndb/r/ndb_config.result'
--- a/mysql-test/suite/ndb/r/ndb_config.result	2011-09-04 17:04:25 +0000
+++ b/mysql-test/suite/ndb/r/ndb_config.result	2011-10-29 05:39:18 +0000
@@ -1,10 +1,10 @@
 == 1 ==
 ndb_mgmd,3,localhost mysqld,49,localhost mysqld,16,localhost mysqld,32,localhost mysqld,48,localhost mysqld,63,localhost mysqld,127,localhost mysqld,192,localhost mysqld,228,localhost mysqld,229,localhost mysqld,230,localhost mysqld,231,localhost mysqld,232,localhost mysqld,233,localhost mysqld,255,localhost ndbd,1,localhost ndbd,2,localhost
 == 2 ==
-1,localhost,20971520,3145728 2,localhost,20971520,3145728
+1,localhost,20971520,5242880 2,localhost,20971520,5242880
 == 3 ==
-1 localhost 20971520 3145728
-2 localhost 20971520 3145728
+1 localhost 20971520 5242880
+2 localhost 20971520 5242880
 == 4 ==
 1 2
 == 5 ==

=== modified file 'mysql-test/suite/ndb/r/ndbinfo.result'
--- a/mysql-test/suite/ndb/r/ndbinfo.result	2011-10-13 16:58:56 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo.result	2011-10-28 11:52:35 +0000
@@ -317,6 +317,147 @@ node_id
 1
 2
 
+desc threadblocks;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+thr_no	int(10) unsigned	YES		NULL	
+block_name	varchar(512)	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+select distinct block_name from threadblocks order by 1;
+block_name
+BACKUP
+CMVMI
+DBACC
+DBDICT
+DBDIH
+DBINFO
+DBLQH
+DBSPJ
+DBTC
+DBTUP
+DBTUX
+DBUTIL
+LGMAN
+NDBCNTR
+NDBFS
+PGMAN
+QMGR
+RESTORE
+SUMA
+THRMAN
+TRIX
+TSMAN
+desc threadstat;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+thr_no	int(10) unsigned	YES		NULL	
+thr_nm	varchar(512)	YES		NULL	
+c_loop	bigint(20) unsigned	YES		NULL	
+c_exec	bigint(20) unsigned	YES		NULL	
+c_wait	bigint(20) unsigned	YES		NULL	
+c_l_sent_prioa	bigint(20) unsigned	YES		NULL	
+c_l_sent_priob	bigint(20) unsigned	YES		NULL	
+c_r_sent_prioa	bigint(20) unsigned	YES		NULL	
+c_r_sent_priob	bigint(20) unsigned	YES		NULL	
+os_tid	bigint(20) unsigned	YES		NULL	
+os_now	bigint(20) unsigned	YES		NULL	
+os_ru_utime	bigint(20) unsigned	YES		NULL	
+os_ru_stime	bigint(20) unsigned	YES		NULL	
+os_ru_minflt	bigint(20) unsigned	YES		NULL	
+os_ru_majflt	bigint(20) unsigned	YES		NULL	
+os_ru_nvcsw	bigint(20) unsigned	YES		NULL	
+os_ru_nivcsw	bigint(20) unsigned	YES		NULL	
+select count(*) > 0 block_name from threadstat;
+block_name
+1
+
+desc cluster_transactions;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+state	varchar(256)	YES		NULL	
+count_operations	int(10) unsigned	YES		NULL	
+outstanding_operations	int(10) unsigned	YES		NULL	
+inactive_seconds	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+desc server_transactions;
+Field	Type	Null	Key	Default	Extra
+mysql_connection_id	bigint(21) unsigned	NO		0	
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+state	varchar(256)	YES		NULL	
+count_operations	int(10) unsigned	YES		NULL	
+outstanding_operations	int(10) unsigned	YES		NULL	
+inactive_seconds	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+desc cluster_operations;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+operation_type	varchar(9)	YES		NULL	
+state	varchar(256)	YES		NULL	
+tableid	int(10) unsigned	YES		NULL	
+fragmentid	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+tc_node_id	bigint(21) unsigned	YES		NULL	
+tc_block_no	bigint(21) unsigned	YES		NULL	
+tc_block_instance	bigint(21) unsigned	YES		NULL	
+desc server_operations;
+Field	Type	Null	Key	Default	Extra
+mysql_connection_id	bigint(21) unsigned	NO		0	
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+operation_type	varchar(9)	YES		NULL	
+state	varchar(256)	YES		NULL	
+tableid	int(10) unsigned	YES		NULL	
+fragmentid	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+tc_node_id	bigint(21) unsigned	YES		NULL	
+tc_block_no	bigint(21) unsigned	YES		NULL	
+tc_block_instance	bigint(21) unsigned	YES		NULL	
+
+create table t1 (a int primary key) engine = ndb;
+begin;
+insert into t1 values (1);
+select state, count_operations, outstanding_operations,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_transactions;
+state	count_operations	outstanding_operations	client_node_id
+Started	1	0	<client_node_id>
+select node_id, operation_type, state,
+IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_operations
+order by 1;
+node_id	operation_type	state	tc_node_id	client_node_id
+1	INSERT	Prepared	<tc_node_id>	<client_node_id>
+2	INSERT	Prepared	<tc_node_id>	<client_node_id>
+
+select st.state, st.count_operations, st.outstanding_operations,
+       so.node_id, so.state, so.operation_type
+from server_transactions st,
+     server_operations so
+where st.transid = so.transid
+  and so.tc_node_id = st.node_id
+  and so.tc_block_instance = st.block_instance
+  and so.client_node_id = st.client_node_id
+  and so.client_block_ref = st.client_block_ref;
+state	count_operations	outstanding_operations	node_id	state	operation_type
+Started	1	0	1	Prepared	INSERT
+Started	1	0	2	Prepared	INSERT
+rollback;
+drop table t1;
+
 set @@global.ndbinfo_offline=TRUE;
 select @@ndbinfo_offline;
 @@ndbinfo_offline

=== modified file 'mysql-test/suite/ndb/r/ndbinfo_dump.result'
--- a/mysql-test/suite/ndb/r/ndbinfo_dump.result	2011-10-07 17:15:53 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo_dump.result	2011-10-28 13:01:42 +0000
@@ -1,7 +1,7 @@
 USE ndbinfo;
 select count(*) from blocks;
 count(*)
-21
+22
 select count(*) from blocks;
 count(*)
-21
+22

=== modified file 'mysql-test/suite/ndb/t/ndbinfo.test'
--- a/mysql-test/suite/ndb/t/ndbinfo.test	2011-05-23 14:50:45 +0000
+++ b/mysql-test/suite/ndb/t/ndbinfo.test	2011-10-28 11:52:35 +0000
@@ -201,6 +201,44 @@ set @@ndbinfo_offline=1;
 let $q1 = SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
 eval $q1;
 
+# new views
+desc threadblocks;
+select distinct block_name from threadblocks order by 1;
+desc threadstat;
+select count(*) > 0 block_name from threadstat;
+
+desc cluster_transactions;
+desc server_transactions;
+desc cluster_operations;
+desc server_operations;
+
+create table t1 (a int primary key) engine = ndb;
+begin;
+insert into t1 values (1);
+select state, count_operations, outstanding_operations,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_transactions;
+select node_id, operation_type, state,
+IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_operations
+order by 1;
+
+--sorted_result
+select st.state, st.count_operations, st.outstanding_operations,
+       so.node_id, so.state, so.operation_type
+from server_transactions st,
+     server_operations so
+where st.transid = so.transid
+  and so.tc_node_id = st.node_id
+  and so.tc_block_instance = st.block_instance
+  and so.client_node_id = st.client_node_id
+  and so.client_block_ref = st.client_block_ref; 
+rollback;
+drop table t1;
+
 # Turn on ndbinfo_offline
 set @@global.ndbinfo_offline=TRUE;
 select @@ndbinfo_offline;

=== modified file 'scripts/comp_sql.c'
--- a/scripts/comp_sql.c	2009-05-26 18:53:34 +0000
+++ b/scripts/comp_sql.c	2011-10-28 14:46:05 +0000
@@ -74,7 +74,16 @@ int main(int argc, char *argv[])
   if (!(out= fopen(outfile_name, "w")))
     die("Failed to open output file '%s'", outfile_name);
 
+#ifndef MCP_BUG51828
+  /*
+    Print the SQL as an array of strings instead of one
+    large string in order to avoid compiler limit on max string length.
+    Significant parts of patch for bug#51828 backported from trunk.
+  */
+  fprintf(out, "const char* %s[]={\n\"", struct_name);
+#else
   fprintf(out, "const char* %s={\n\"", struct_name);
+#endif
 
   while (fgets(buff, sizeof(buff), in))
   {
@@ -115,9 +124,17 @@ int main(int argc, char *argv[])
       */
       fprintf(out, "\"\n\"");
     }
+#ifndef MCP_BUG51828
+    /* new line -> convert to new entry in array */
+    fprintf(out, "\",\n\"");
+#endif
   }
 
+#ifndef MCP_BUG51828
+  fprintf(out, "\",\nNULL\n};\n");
+#else
   fprintf(out, "\\\n\"};\n");
+#endif
 
   fclose(in);
   fclose(out);

=== modified file 'scripts/mysql_system_tables.sql'
--- a/scripts/mysql_system_tables.sql	2011-06-30 16:04:23 +0000
+++ b/scripts/mysql_system_tables.sql	2011-10-28 11:52:35 +0000
@@ -144,53 +144,98 @@ EXECUTE stmt;
 DROP PREPARE stmt;
 
 # Drop any old views in ndbinfo
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.transporters','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`transporters`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.logspaces','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`logspaces`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.logbuffers','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`logbuffers`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.resources','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`resources`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.counters','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`counters`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.nodes','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`nodes`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.memoryusage','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`memoryusage`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.diskpagebuffer','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`threadblocks`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`threadstat`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`cluster_transactions`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`server_transactions`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`cluster_operations`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`server_operations`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # Drop any old lookup tables in ndbinfo
-SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS ndbinfo.blocks','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`blocks`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS ndbinfo.config_params','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`config_params`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$dbtc_apiconnect_state`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$dblqh_tcconnect_state`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
@@ -316,13 +361,57 @@ PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
+# ndbinfo.ndb$threadblocks
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$threadblocks`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$threadblocks` (`node_id` INT UNSIGNED COMMENT "node id",`thr_no` INT UNSIGNED COMMENT "thread number",`block_number` INT UNSIGNED COMMENT "block number",`block_instance` INT UNSIGNED COMMENT "block instance") COMMENT="which blocks are run in which threads" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$threadstat
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$threadstat`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$threadstat` (`node_id` INT UNSIGNED COMMENT "node id",`thr_no` INT UNSIGNED COMMENT "thread number",`thr_nm` VARCHAR(512) COMMENT "thread name",`c_loop` BIGINT UNSIGNED COMMENT "No of loops in main loop",`c_exec` BIGINT UNSIGNED COMMENT "No of signals executed",`c_wait` BIGINT UNSIGNED COMMENT "No of times waited for more input",`c_l_sent_prioa` BIGINT UNSIGNED COMMENT "No of prio A signals sent to own node",`c_l_sent_priob` BIGINT UNSIGNED COMMENT "No of prio B signals sent to own node",`c_r_sent_prioa` BIGINT UNSIGNED COMMENT "No of prio A signals sent to remote node",`c_r_sent_priob` BIGINT UNSIGNED COMMENT "No of prio B signals sent to remote node",`os_tid` BIGINT UNSIGNED COMMENT "OS thread id",`os_now` BIGINT UNSIGNED COMMENT "OS gettimeofday (millis)",`os_ru_utime` BIGINT UNSIGNED COMMENT "OS user CPU time (micros)",`os_ru_stime` BIGINT UNSIGNED COMMENT "OS system CPU time (micros)",`os_ru_minflt` BIGINT UNSIGNED COMMENT "OS page reclaims (soft page faults",`os_ru_majflt` BIGINT UNSIGNED COMMENT "OS page faults (hard page faults)",`os_ru_nvcsw` BIGINT UNSIGNED COMMENT "OS voluntary context switches",`os_ru_nivcsw` BIGINT UNSIGNED COMMENT "OS involuntary context switches") COMMENT="Statistics on execution threads" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$transactions
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$transactions`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$transactions` (`node_id` INT UNSIGNED COMMENT "node id",`block_instance` INT UNSIGNED COMMENT "TC instance no",`objid` INT UNSIGNED COMMENT "Object id of transaction object",`apiref` INT UNSIGNED COMMENT "API reference",`transid0` INT UNSIGNED COMMENT "Transaction id",`transid1` INT UNSIGNED COMMENT "Transaction id",`state` INT UNSIGNED COMMENT "Transaction state",`flags` INT UNSIGNED COMMENT "Transaction flags",`c_ops` INT UNSIGNED COMMENT "No of operations in transaction",`outstanding` INT UNSIGNED COMMENT "Currently outstanding request",`timer` INT UNSIGNED COMMENT "Timer (seconds)") COMMENT="transactions" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$operations
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$operations`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$operations` (`node_id` INT UNSIGNED COMMENT "node id",`block_instance` INT UNSIGNED COMMENT "LQH instance no",`objid` INT UNSIGNED COMMENT "Object id of operation object",`tcref` INT UNSIGNED COMMENT "TC reference",`apiref` INT UNSIGNED COMMENT "API reference",`transid0` INT UNSIGNED COMMENT "Transaction id",`transid1` INT UNSIGNED COMMENT "Transaction id",`tableid` INT UNSIGNED COMMENT "Table id",`fragmentid` INT UNSIGNED COMMENT "Fragment id",`op` INT UNSIGNED COMMENT "Operation type",`state` INT UNSIGNED COMMENT "Operation state",`flags` INT UNSIGNED COMMENT "Operation flags") COMMENT="operations" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
 # ndbinfo.blocks
 SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`blocks` (block_number INT UNSIGNED PRIMARY KEY, block_name VARCHAR(512))','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`blocks` VALUES (254, "CMVMI"), (248, "DBACC"), (250, "DBDICT"), (246, "DBDIH"), (247, "DBLQH"), (245, "DBTC"), (249, "DBTUP"), (253, "NDBFS"), (251, "NDBCNTR"), (252, "QMGR"), (255, "TRIX"), (244, "BACKUP"), (256, "DBUTIL"), (257, "SUMA"), (258, "DBTUX"), (259, "TSMAN"), (260, "LGMAN"), (261, "PGMAN"), (262, "RESTORE"), (263, "DBINFO"), (264, "DBSPJ")','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`blocks` VALUES (254, "CMVMI"), (248, "DBACC"), (250, "DBDICT"), (246, "DBDIH"), (247, "DBLQH"), (245, "DBTC"), (249, "DBTUP"), (253, "NDBFS"), (251, "NDBCNTR"), (252, "QMGR"), (255, "TRIX"), (244, "BACKUP"), (256, "DBUTIL"), (257, "SUMA"), (258, "DBTUX"), (259, "TSMAN"), (260, "LGMAN"), (261, "PGMAN"), (262, "RESTORE"), (263, "DBINFO"), (264, "DBSPJ"), (265, "THRMAN")','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
@@ -333,55 +422,119 @@ PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`config_params` VALUES (179, "MaxNoOfSubscriptions"), (180, "MaxNoOfSubscribers"), (181, "MaxNoOfConcurrentSubOperations"), (5, "HostName"), (3, "NodeId"), (101, "NoOfReplicas"), (103, "MaxNoOfAttributes"), (102, "MaxNoOfTables"), (149, "MaxNoOfOrderedIndexes"), (150, "MaxNoOfUniqueHashIndexes"), (110, "MaxNoOfConcurrentIndexOperations"), (105, "MaxNoOfTriggers"), (109, "MaxNoOfFiredTriggers"), (100, "MaxNoOfSavedMessages"), (177, "LockExecuteThreadToCPU"), (178, "LockMaintThreadsToCPU"), (176, "RealtimeScheduler"), (114, "LockPagesInMainMemory"), (123, "TimeBetweenWatchDogCheck"), (174, "SchedulerExecutionTimer"), (175, "SchedulerSpinTimer"), (141, "TimeBetweenWatchDogCheckInitial"), (124, "StopOnError"), (107, "MaxNoOfConcurrentOperations"), (151, "MaxNoOfLocalOperations"), (152, "MaxNoOfLocalScans"), (153, "BatchSizePerLocalScan"), (106, "MaxNoOfConcurrentTransactions"), (108, "MaxNoOfConcurrentScans"), (111, "TransactionBufferMemory"), (113, "IndexMemory"), (112, "DataMemory"), (154, "UndoIndexBuffer"), (155, "UndoDataBuffer"), (156, "RedoBuffer"), (157, "LongMessageBuffer"), (160, "DiskPageBufferMemory"), (198, "SharedGlobalMemory"), (115, "StartPartialTimeout"), (116, "StartPartitionedTimeout"), (117, "StartFailureTimeout"), (118, "HeartbeatIntervalDbDb"), (119, "HeartbeatIntervalDbApi"), (120, "TimeBetweenLocalCheckpoints"), (121, "TimeBetweenGlobalCheckpoints"), (170, "TimeBetweenEpochs"), (171, "TimeBetweenEpochsTimeout"), (182, "MaxBufferedEpochs"), (126, "NoOfFragmentLogFiles"), (140, "FragmentLogFileSize"), (189, "InitFragmentLogFiles"), (190, "DiskIOThreadPool"), (159, "MaxNoOfOpenFiles"), (162, "InitialNoOfOpenFiles"), (129, "TimeBetweenInactiveTransactionAbortCheck"), (130, "TransactionInactiveTimeout"), (131, "TransactionDeadlockDetectionTimeout"), (148, "Diskless"), (122, "ArbitrationTimeout"), (142, "Arbitration"), (7, "DataDir"), (125, "FileSystemPath"), (250, "LogLevelStartup"), (251, "LogLevelShutdown"), (252, "LogLevelStatistic"), (253, "LogLevelCheckpoint"), (254, "LogLevelNodeRestart"), (255, "LogLevelConnection"), (259, "LogLevelCongestion"), (258, "LogLevelError"), (256, "LogLevelInfo"), (158, "BackupDataDir"), (163, "DiskSyncSize"), (164, "DiskCheckpointSpeed"), (165, "DiskCheckpointSpeedInRestart"), (133, "BackupMemory"), (134, "BackupDataBufferSize"), (135, "BackupLogBufferSize"), (136, "BackupWriteSize"), (139, "BackupMaxWriteSize"), (161, "StringMemory"), (169, "MaxAllocate"), (166, "MemReportFrequency"), (167, "BackupReportFrequency"), (184, "StartupStatusReportFrequency"), (168, "ODirect"), (172, "CompressedBackup"), (173, "CompressedLCP"), (9, "TotalSendBufferMemory"), (202, "ReservedSendBufferMemory"), (185, "Nodegroup"), (186, "MaxNoOfExecutionThreads"), (188, "__ndbmt_lqh_workers"), (187, "__ndbmt_lqh_threads"), (191, "__ndbmt_classic"), (193, "FileSystemPathDD"), (194, "FileSystemPathDataFiles"), (195, "FileSystemPathUndoFiles"), (196, "InitialLogfileGroup"), (197, "InitialTablespace"), (605, "MaxLCPStartDelay"), (606, "BuildIndexThreads"), (607, "HeartbeatOrder"), (608, "DictTrace"), (609, "MaxStartFailRetries"), (610, "StartFailRetryDelay")','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`config_params` VALUES (179, "MaxNoOfSubscriptions"), (180, "MaxNoOfSubscribers"), (181, "MaxNoOfConcurrentSubOperations"), (5, "HostName"), (3, "NodeId"), (101, "NoOfReplicas"), (103, "MaxNoOfAttributes"), (102, "MaxNoOfTables"), (149, "MaxNoOfOrderedIndexes"), (150, "MaxNoOfUniqueHashIndexes"), (110, "MaxNoOfConcurrentIndexOperations"), (105, "MaxNoOfTriggers"), (109, "MaxNoOfFiredTriggers"), (100, "MaxNoOfSavedMessages"), (177, "LockExecuteThreadToCPU"), (178, "LockMaintThreadsToCPU"), (176, "RealtimeScheduler"), (114, "LockPagesInMainMemory"), (123, "TimeBetweenWatchDogCheck"), (174, "SchedulerExecutionTimer"), (175, "SchedulerSpinTimer"), (141, "TimeBetweenWatchDogCheckInitial"), (124, "StopOnError"), (107, "MaxNoOfConcurrentOperations"), (627, "MaxDMLOperationsPerTransaction"), (151, "MaxNoOfLocalOperations"), (152, "MaxNoOfLocalScans"), (153, "BatchSizePerLocalScan"), (106, "MaxNoOfConcurrentTransactions"), (108, "MaxNoOfConcurrentScans"), (111, "TransactionBufferMemory"), (113, "IndexMemory"), (112, "DataMemory"), (154, "UndoIndexBuffer"), (155, "UndoDataBuffer"), (156, "RedoBuffer"), (157, "LongMessageBuffer"), (160, "DiskPageBufferMemory"), (198, "SharedGlobalMemory"), (115, "StartPartialTimeout"), (116, "StartPartitionedTimeout"), (117, "StartFailureTimeout"), (619, "StartNoNodegroupTimeout"), (118, "HeartbeatIntervalDbDb"), (618, "ConnectCheckIntervalDelay"), (119, "HeartbeatIntervalDbApi"), (120, "TimeBetweenLocalCheckpoints"), (121, "TimeBetweenGlobalCheckpoints"), (170, "TimeBetweenEpochs"), (171, "TimeBetweenEpochsTimeout"), (182, "MaxBufferedEpochs"), (126, "NoOfFragmentLogFiles"), (140, "FragmentLogFileSize"), (189, "InitFragmentLogFiles"), (190, "DiskIOThreadPool"), (159, "MaxNoOfOpenFiles"), (162, "InitialNoOfOpenFiles"), (129, "TimeBetweenInactiveTransactionAbortCheck"), (130, "TransactionInactiveTimeout"), (131, "TransactionDeadlockDetectionTimeout"), (148, "Diskless"), (122, "ArbitrationTimeout"), (142, "Arbitration"), (7, "DataDir"), (125, "FileSystemPath"), (250, "LogLevelStartup"), (251, "LogLevelShutdown"), (252, "LogLevelStatistic"), (253, "LogLevelCheckpoint"), (254, "LogLevelNodeRestart"), (255, "LogLevelConnection"), (259, "LogLevelCongestion"), (258, "LogLevelError"), (256, "LogLevelInfo"), (158, "BackupDataDir"), (163, "DiskSyncSize"), (164, "DiskCheckpointSpeed"), (165, "DiskCheckpointSpeedInRestart"), (133, "BackupMemory"), (134, "BackupDataBufferSize"), (135, "BackupLogBufferSize"), (136, "BackupWriteSize"), (139, "BackupMaxWriteSize"), (161, "StringMemory"), (169, "MaxAllocate"), (166, "MemReportFrequency"), (167, "BackupReportFrequency"), (184, "StartupStatusReportFrequency"), (168, "ODirect"), (172, "CompressedBackup"), (173, "CompressedLCP"), (9, "TotalSendBufferMemory"), (202, "ReservedSendBufferMemory"), (185, "Nodegroup"), (186, "MaxNoOfExecutionThreads"), (188, "__ndbmt_lqh_workers"), (187, "__ndbmt_lqh_threads"), (191, "__ndbmt_classic"), (628, "ThreadConfig"), (193, "FileSystemPathDD"), (194, "FileSystemPathDataFiles"), (195, "FileSystemPathUndoFiles"), (196, "InitialLogfileGroup"), (197, "InitialTablespace"), (605, "MaxLCPStartDelay"), (606, "BuildIndexThreads"), (607, "HeartbeatOrder"), (608, "DictTrace"), (609, "MaxStartFailRetries"), (610, "StartFailRetryDelay"), (613, "EventLogBufferSize"), (614, "Numa"), (611, "RedoOverCommitLimit"), (612, "RedoOverCommitCounter"), (615, "LateAlloc"), (616, "TwoPassInitialNodeRestartCopy"), (617, "MaxParallelScansPerFragment"), (620, "IndexStatAutoCreate"), (621, "IndexStatAutoUpdate"), (622, "IndexStatSaveSize"), (623, "IndexStatSaveScale"), (624, "IndexStatTriggerPct"), (625, "IndexStatTriggerScale"), (626, "IndexStatUpdateDelay"), (629, "CrashOnCorruptedTuple")','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$dbtc_apiconnect_state
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$dbtc_apiconnect_state` (state_int_value  INT UNSIGNED PRIMARY KEY, state_name VARCHAR(256), state_friendly_name VARCHAR(256), state_description VARCHAR(256))','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`ndb$dbtc_apiconnect_state` VALUES (0, "CS_CONNECTED", "Connected", "An allocated idle transaction object"), (1, "CS_DISCONNECTED", "Disconnected", "An unallocated connection object"), (2, "CS_STARTED", "Started", "A started transaction"), (3, "CS_RECEIVING", "Receiving", "A transaction receiving operations"), (7, "CS_RESTART", "", ""), (8, "CS_ABORTING", "Aborting", "A transaction aborting"), (9, "CS_COMPLETING", "Completing", "A transaction completing"), (10, "CS_COMPLETE_SENT", "Completing", "A transaction completing"), (11, "CS_PREPARE_TO_COMMIT", "", ""), (12, "CS_COMMIT_SENT", "Committing", "A transaction committing"), (13, "CS_START_COMMITTING", "", ""), (14, "CS_COMMITTING", "Committing", "A transaction committing"), (15, "CS_REC_COMMITTING", "", ""), (16, "CS_WAIT_ABORT_CONF", "Aborting", ""), (17, "CS_WAIT_COMPLETE_CONF", "Completing", ""), (18, "CS_WAIT_COMMIT_CONF", "Committing", ""), (19, "CS_FAIL_ABORTING", "TakeOverAborting", ""), (20, "CS_FAIL_ABORTED", "TakeOverAborting", ""), (21, "CS_FAIL_PREPARED", "", ""), (22, "CS_FAIL_COMMITTING", "TakeOverCommitting", ""), (23, "CS_FAIL_COMMITTED", "TakeOverCommitting", ""), (24, "CS_FAIL_COMPLETED", "TakeOverCompleting", ""), (25, "CS_START_SCAN", "Scanning", ""), (26, "CS_SEND_FIRE_TRIG_REQ", "Precomitting", ""), (27, "CS_WAIT_FIRE_TRIG_REQ", "Precomitting", "")','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$dblqh_tcconnect_state
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$dblqh_tcconnect_state` (state_int_value  INT UNSIGNED PRIMARY KEY, state_name VARCHAR(256), state_friendly_name VARCHAR(256), state_description VARCHAR(256))','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`ndb$dblqh_tcconnect_state` VALUES (0, "IDLE", "Idle", ""), (1, "WAIT_ACC", "WaitLock", ""), (2, "WAIT_TUPKEYINFO", "", ""), (3, "WAIT_ATTR", "WaitData", ""), (4, "WAIT_TUP", "WaitTup", ""), (5, "STOPPED", "Stopped", ""), (6, "LOG_QUEUED", "LogPrepare", ""), (7, "PREPARED", "Prepared", ""), (8, "LOG_COMMIT_WRITTEN_WAIT_SIGNAL", "", ""), (9, "LOG_COMMIT_QUEUED_WAIT_SIGNAL", "", ""), (10, "COMMIT_STOPPED", "CommittingStopped", ""), (11, "LOG_COMMIT_QUEUED", "Committing", ""), (12, "COMMIT_QUEUED", "Committing", ""), (13, "COMMITTED", "Committed", ""), (35, "WAIT_TUP_COMMIT", "Committing", ""), (14, "WAIT_ACC_ABORT", "Aborting", ""), (15, "ABORT_QUEUED", "Aborting", ""), (16, "ABORT_STOPPED", "AbortingStopped", ""), (17, "WAIT_AI_AFTER_ABORT", "Aborting", ""), (18, "LOG_ABORT_QUEUED", "Aborting", ""), (19, "WAIT_TUP_TO_ABORT", "Aborting", ""), (20, "WAIT_SCAN_AI", "Scanning", ""), (21, "SCAN_STATE_USED", "Scanning", ""), (22, "SCAN_FIRST_STOPPED", "Scanning", ""), (23, "SCAN_CHECK_STOPPED", "Scanning", ""), (24, "SCAN_STOPPED", "ScanningStopped", ""), (25, "SCAN_RELEASE_STOPPED", "ScanningStopped", ""), (26, "SCAN_CLOSE_STOPPED", "ScanningStopped", ""), (27, "COPY_CLOSE_STOPPED", "ScanningStopped", ""), (28, "COPY_FIRST_STOPPED", "ScanningStopped", ""), (29, "COPY_STOPPED", "ScanningStopped", ""), (30, "SCAN_TUPKEY", "Scanning", ""), (31, "COPY_TUPKEY", "NodeRecoveryScanning", ""), (32, "TC_NOT_CONNECTED", "Idle", ""), (33, "PREPARED_RECEIVED_COMMIT", "Committing", ""), (34, "LOG_COMMIT_WRITTEN", "Committing", "")','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.transporters
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`transporters` AS SELECT node_id, remote_node_id,  CASE connection_status  WHEN 0 THEN "CONNECTED"  WHEN 1 THEN "CONNECTING"  WHEN 2 THEN "DISCONNECTED"  WHEN 3 THEN "DISCONNECTING"  ELSE NULL  END AS status FROM ndbinfo.ndb$transporters','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`transporters` AS SELECT node_id, remote_node_id,  CASE connection_status  WHEN 0 THEN "CONNECTED"  WHEN 1 THEN "CONNECTING"  WHEN 2 THEN "DISCONNECTED"  WHEN 3 THEN "DISCONNECTING"  ELSE NULL  END AS status FROM `ndbinfo`.`ndb$transporters`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.logspaces
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logspaces` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE NULL  END AS log_type, log_id, log_part, total, used FROM ndbinfo.ndb$logspaces','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logspaces` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE NULL  END AS log_type, log_id, log_part, total, used FROM `ndbinfo`.`ndb$logspaces`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.logbuffers
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logbuffers` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE "<unknown>"  END AS log_type, log_id, log_part, total, used FROM ndbinfo.ndb$logbuffers','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logbuffers` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE "<unknown>"  END AS log_type, log_id, log_part, total, used FROM `ndbinfo`.`ndb$logbuffers`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.resources
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`resources` AS SELECT node_id,  CASE resource_id  WHEN 0 THEN "RESERVED"  WHEN 1 THEN "DISK_OPERATIONS"  WHEN 2 THEN "DISK_RECORDS"  WHEN 3 THEN "DATA_MEMORY"  WHEN 4 THEN "JOBBUFFER"  WHEN 5 THEN "FILE_BUFFERS"  WHEN 6 THEN "TRANSPORTER_BUFFERS"  ELSE "<unknown>"  END AS resource_name, reserved, used, max FROM ndbinfo.ndb$resources','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`resources` AS SELECT node_id,  CASE resource_id  WHEN 0 THEN "RESERVED"  WHEN 1 THEN "DISK_OPERATIONS"  WHEN 2 THEN "DISK_RECORDS"  WHEN 3 THEN "DATA_MEMORY"  WHEN 4 THEN "JOBBUFFER"  WHEN 5 THEN "FILE_BUFFERS"  WHEN 6 THEN "TRANSPORTER_BUFFERS"  WHEN 7 THEN "DISK_PAGE_BUFFER"  WHEN 8 THEN "QUERY_MEMORY"  WHEN 9 THEN "SCHEMA_TRANS_MEMORY"  ELSE "<unknown>"  END AS resource_name, reserved, used, max FROM `ndbinfo`.`ndb$resources`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.counters
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`counters` AS SELECT node_id, b.block_name, block_instance, counter_id, CASE counter_id  WHEN 1 THEN "ATTRINFO"  WHEN 2 THEN "TRANSACTIONS"  WHEN 3 THEN "COMMITS"  WHEN 4 THEN "READS"  WHEN 5 THEN "SIMPLE_READS"  WHEN 6 THEN "WRITES"  WHEN 7 THEN "ABORTS"  WHEN 8 THEN "TABLE_SCANS"  WHEN 9 THEN "RANGE_SCANS"  WHEN 10 THEN "OPERATIONS"  ELSE "<unknown>"  END AS counter_name, val FROM ndbinfo.ndb$counters c LEFT JOIN ndbinfo.blocks b ON c.block_number = b.block_number','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`counters` AS SELECT node_id, b.block_name, block_instance, counter_id, CASE counter_id  WHEN 1 THEN "ATTRINFO"  WHEN 2 THEN "TRANSACTIONS"  WHEN 3 THEN "COMMITS"  WHEN 4 THEN "READS"  WHEN 5 THEN "SIMPLE_READS"  WHEN 6 THEN "WRITES"  WHEN 7 THEN "ABORTS"  WHEN 8 THEN "TABLE_SCANS"  WHEN 9 THEN "RANGE_SCANS"  WHEN 10 THEN "OPERATIONS"  WHEN 11 THEN "READS_RECEIVED"  WHEN 12 THEN "LOCAL_READS_SENT"  WHEN 13 THEN "REMOTE_READS_SENT"  WHEN 14 THEN "READS_NOT_FOUND"  WHEN 15 THEN "TABLE_SCANS_RECEIVED"  WHEN 16 THEN "LOCAL_TABLE_SCANS_SENT"  WHEN 17 THEN "RANGE_SCANS_RECEIVED"  WHEN 18 THEN "LOCAL_RANGE_SCANS_SENT"  WHEN 19 THEN "REMOTE_RANGE_SCANS_SENT"  WHEN 20 THEN "SCAN_BATCHES_RETURNED"  WHEN 21 THEN "SCAN_ROWS_RETURNED"  WHEN 22 THEN "PRUNED_RANGE_SCANS_RECEIVED"  WHEN 23 THEN "CONST_PRUNED_RANGE_SCANS_RECEIVED"  ELSE "<unknown>"  END AS counter_name, val FROM `ndbinfo`.`ndb$counters` c LEFT JOIN `ndbinfo`.blocks b ON c.block_number = b.block_number','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.nodes
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`nodes` AS SELECT node_id, uptime, CASE status  WHEN 0 THEN "NOTHING"  WHEN 1 THEN "CMVMI"  WHEN 2 THEN "STARTING"  WHEN 3 THEN "STARTED"  WHEN 4 THEN "SINGLEUSER"  WHEN 5 THEN "STOPPING_1"  WHEN 6 THEN "STOPPING_2"  WHEN 7 THEN "STOPPING_3"  WHEN 8 THEN "STOPPING_4"  ELSE "<unknown>"  END AS status, start_phase, config_generation FROM ndbinfo.ndb$nodes','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`nodes` AS SELECT node_id, uptime, CASE status  WHEN 0 THEN "NOTHING"  WHEN 1 THEN "CMVMI"  WHEN 2 THEN "STARTING"  WHEN 3 THEN "STARTED"  WHEN 4 THEN "SINGLEUSER"  WHEN 5 THEN "STOPPING_1"  WHEN 6 THEN "STOPPING_2"  WHEN 7 THEN "STOPPING_3"  WHEN 8 THEN "STOPPING_4"  ELSE "<unknown>"  END AS status, start_phase, config_generation FROM `ndbinfo`.`ndb$nodes`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.memoryusage
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`memoryusage` AS SELECT node_id,  pool_name AS memory_type,  SUM(used*entry_size) AS used,  SUM(used) AS used_pages,  SUM(total*entry_size) AS total,  SUM(total) AS total_pages FROM ndbinfo.ndb$pools WHERE block_number IN (248, 254) AND   (pool_name = "Index memory" OR pool_name = "Data memory") GROUP BY node_id, memory_type','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`memoryusage` AS SELECT node_id,  pool_name AS memory_type,  SUM(used*entry_size) AS used,  SUM(used) AS used_pages,  SUM(total*entry_size) AS total,  SUM(total) AS total_pages FROM `ndbinfo`.`ndb$pools` WHERE block_number IN (248, 254) AND   (pool_name = "Index memory" OR pool_name = "Data memory") GROUP BY node_id, memory_type','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.diskpagebuffer
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE SQL SECURITY INVOKER VIEW `ndbinfo`.`diskpagebuffer` AS SELECT node_id, block_instance, pages_written, pages_written_lcp, pages_read, log_waits, page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io FROM ndbinfo.ndb$diskpagebuffer','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`diskpagebuffer` AS SELECT node_id, block_instance, pages_written, pages_written_lcp, pages_read, log_waits, page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io FROM `ndbinfo`.`ndb$diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.diskpagebuffer
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`diskpagebuffer` AS SELECT node_id, block_instance, pages_written, pages_written_lcp, pages_read, log_waits, page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io FROM `ndbinfo`.`ndb$diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.threadblocks
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`threadblocks` AS SELECT t.node_id, t.thr_no, b.block_name, t.block_instance FROM `ndbinfo`.`ndb$threadblocks` t LEFT JOIN `ndbinfo`.blocks b ON t.block_number = b.block_number','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.threadstat
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`threadstat` AS SELECT * from `ndbinfo`.`ndb$threadstat`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.cluster_transactions
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`cluster_transactions` AS SELECT t.node_id, t.block_instance, t.transid0 + (t.transid1 << 32) as transid, s.state_friendly_name as state,  t.c_ops as count_operations,  t.outstanding as outstanding_operations,  t.timer as inactive_seconds,  (t.apiref & 65535) as client_node_id,  (t.apiref >> 16) as client_block_ref FROM `ndbinfo`.`ndb$transactions` t LEFT JOIN `ndbinfo`.`ndb$dbtc_apiconnect_state` s        ON s.state_int_value = t.state','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.server_transactions
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`server_transactions` AS SELECT map.mysql_connection_id, t.*FROM information_schema.ndb_transid_mysql_connection_map map JOIN `ndbinfo`.cluster_transactions t   ON (map.ndb_transid >> 32) = (t.transid >> 32)','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.cluster_operations
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`cluster_operations` AS SELECT o.node_id, o.block_instance, o.transid0 + (o.transid1 << 32) as transid, case o.op  when 1 then "READ" when 2 then "READ-SH" when 3 then "READ-EX" when 4 then "INSERT" when 5 then "UPDATE" when 6 then "DELETE" when 7 then "WRITE" when 8 then "UNLOCK" when 9 then "REFRESH" when 257 then "SCAN" when 258 then "SCAN-SH" when 259 then "SCAN-EX" ELSE "<unknown>" END as operation_type,  s.state_friendly_name as state,  o.tableid,  o.fragmentid,  (o.apiref & 65535) as client_node_id,  (o.apiref >> 16) as client_block_ref,  (o.tcref & 65535) as tc_node_id,  ((o.tcref >> 16) & 511) as tc_block_no,  ((o.tcref >> (16 + 9)) & 127) as tc_block_instance FROM `ndbinfo`.`ndb$operations` o LEFT JOIN `ndbinfo`.`ndb$dblqh_tcconnect_state` s        ON s.state_int_value = o.state','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.server_operations
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`server_operations` AS SELECT map.mysql_connection_id, o.* FROM `ndbinfo`.cluster_operations o JOIN information_schema.ndb_transid_mysql_connection_map map  ON (map.ndb_transid >> 32) = (o.transid >> 32)','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
@@ -391,3 +544,4 @@ SET @str=IF(@have_ndbinfo,'SET @@global.
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
+

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-10-22 07:56:33 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-10-28 09:32:10 +0000
@@ -346,7 +346,7 @@ ndb_transid_mysql_connection_map_fill_ta
 {
   DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
 
-  const bool all = check_global_access(thd, PROCESS_ACL);
+  const bool all = (check_global_access(thd, PROCESS_ACL) == 0);
   const ulonglong self = thd_get_thread_id(thd);
 
   TABLE* table= tables->table;

=== modified file 'storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java'
--- a/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java	2011-01-31 09:07:01 +0000
+++ b/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java	2011-10-27 23:43:25 +0000
@@ -229,4 +229,12 @@ public interface Session {
      */
     void markModified(Object instance, String fieldName);
 
+    /** Unload the schema definition for a class. This must be done after the schema
+     * definition has changed in the database due to an alter table command.
+     * The next time the class is used the schema will be reloaded.
+     * @param cls the class for which the schema is unloaded
+     * @return the name of the schema that was unloaded
+     */
+    String unloadSchema(Class<?> cls);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2011-08-03 01:00:56 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2011-10-27 23:43:25 +0000
@@ -489,4 +489,19 @@ public class SessionFactoryImpl implemen
         return result;
     }
 
+    public String unloadSchema(Class<?> cls, Dictionary dictionary) {
+        synchronized(typeToHandlerMap) {
+            String tableName = null;
+            DomainTypeHandler<?> domainTypeHandler = typeToHandlerMap.remove(cls);
+            if (domainTypeHandler != null) {
+                // remove the ndb dictionary cached table definition
+                tableName = domainTypeHandler.getTableName();
+                if (tableName != null) {
+                    dictionary.removeCachedTable(tableName);
+                }
+            }
+            return tableName;
+        }
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-10-22 00:40:34 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-10-27 23:43:25 +0000
@@ -1384,4 +1384,8 @@ public class SessionImpl implements Sess
         }
     }
 
+    public String unloadSchema(Class<?> cls) {
+        return factory.unloadSchema(cls, dictionary);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java	2011-10-27 23:43:25 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -53,7 +53,7 @@ public class DomainTypeHandlerFactoryImp
     }
 
     public <T> DomainTypeHandler<T> createDomainTypeHandler(Class<T> domainClass, Dictionary dictionary) {
-        DomainTypeHandler<T> handler;
+        DomainTypeHandler<T> handler = null;
         StringBuffer errorMessages = new StringBuffer();
         for (DomainTypeHandlerFactory factory: domainTypeHandlerFactories) {
             try {
@@ -82,6 +82,15 @@ public class DomainTypeHandlerFactoryImp
         } catch (Exception e) {
             errorMessages.append(e.toString());
             throw new ClusterJUserException(errorMessages.toString(), e);
+        } finally {
+            // if handler is null, there may be a problem with the schema, so remove it from the local dictionary
+            if (handler == null) {
+                String tableName = DomainTypeHandlerImpl.getTableName(domainClass);
+                if (tableName != null) {
+                    logger.info(local.message("MSG_Removing_Schema", tableName, domainClass.getName()));
+                    dictionary.removeCachedTable(tableName);                    
+                }
+            }
         }
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java	2011-10-27 23:43:25 +0000
@@ -244,7 +244,29 @@ public class DomainTypeHandlerImpl<T> ex
         }
     }
 
-    protected <O extends DynamicObject> String getTableNameForDynamicObject(Class<O> cls) {
+    /** Get the table name mapped to the domain class.
+     * @param cls the domain class
+     * @return the table name for the domain class
+     */
+    protected static String getTableName(Class<?> cls) {
+        String tableName = null;
+        if (DynamicObject.class.isAssignableFrom(cls)) {
+            tableName = getTableNameForDynamicObject((Class<DynamicObject>)cls);
+        } else {
+            PersistenceCapable persistenceCapable = cls.getAnnotation(PersistenceCapable.class);
+            if (persistenceCapable != null) {
+                tableName = persistenceCapable.table();            
+            }
+        }
+        return tableName;
+    }
+
+    /** Get the table name for a dynamic object. The table name is available either from
+     * the PersistenceCapable annotation or via the table() method.
+     * @param cls the dynamic object class
+     * @return the table name for the dynamic object class
+     */
+    protected static <O extends DynamicObject> String getTableNameForDynamicObject(Class<O> cls) {
         DynamicObject dynamicObject;
         PersistenceCapable persistenceCapable = cls.getAnnotation(PersistenceCapable.class);
         String tableName = null;

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java	2011-06-30 16:04:23 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java	2011-10-27 23:43:25 +0000
@@ -1,6 +1,5 @@
 /*
-   Copyright (c) 2010 Sun Microsystems, Inc.
-   Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -27,4 +26,6 @@ public interface Dictionary {
 
     public Table getTable(String tableName);
 
+    public void removeCachedTable(String tableName);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties'
--- a/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2011-08-29 08:17:26 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2011-10-27 23:43:25 +0000
@@ -133,3 +133,4 @@ ERR_Wrong_Parameter_Type_For_In:For fiel
 either an array of Object types or a List.
 ERR_Parameter_Too_Big_For_In:For field ''{0}'', the parameter of length {1} for query operator ''in'' \
 is too big; it must contain fewer than 4097 items.
+MSG_Removing_Schema:Removing schema {0} after failure to initialize domain type handler for class {1}.

=== modified file 'storage/ndb/clusterj/clusterj-test/Makefile.am'
--- a/storage/ndb/clusterj/clusterj-test/Makefile.am	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-test/Makefile.am	2011-10-27 23:43:25 +0000
@@ -107,6 +107,7 @@ clusterj_test_java = \
   $(clusterj_test_src)/testsuite/clusterj/QueryUniqueKeyTest.java \
   $(clusterj_test_src)/testsuite/clusterj/QueryYearTypesTest.java \
   $(clusterj_test_src)/testsuite/clusterj/SaveTest.java \
+  $(clusterj_test_src)/testsuite/clusterj/SchemaChangeTest.java \
   $(clusterj_test_src)/testsuite/clusterj/SerialTransactionsTest.java \
   $(clusterj_test_src)/testsuite/clusterj/TimeAsSqlTimeTypesTest.java \
   $(clusterj_test_src)/testsuite/clusterj/TimeAsUtilDateTypesTest.java \

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java	2011-05-10 01:19:27 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java	2011-10-28 23:29:26 +0000
@@ -37,6 +37,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.SQLWarning;
 import java.sql.Statement;
 
 import java.util.ArrayList;
@@ -319,6 +320,77 @@ public abstract class AbstractClusterJTe
         }
     }
 
+    /** Execute the sql in its own statement. If the connection is not
+     * currently autocommit, set autocommit to true and restore it after
+     * the statement is executed.
+     * @param sql the sql to execute
+     */
+    protected void executeSQL(String sql) {
+        Statement statement = null;
+        try {
+            boolean autoCommit = connection.getAutoCommit();
+            if (!autoCommit) {
+                connection.setAutoCommit(true);
+            }
+            statement = connection.createStatement();
+            statement.execute(sql);
+            if (!autoCommit) {
+                connection.setAutoCommit(autoCommit);
+            }
+        } catch (SQLException e) {
+            error("Caught " + e.getClass() + " trying: " + sql);
+            if (statement == null) {
+                error(analyzeWarnings(connection));
+            } else {
+                error(analyzeWarnings(statement));
+            }
+        } finally {
+            if (statement != null) {
+                try {
+                    statement.close();
+                } catch (SQLException e) {
+                    // nothing can be done here
+                    error("Error closing statement " + sql);
+                }
+            }
+        }
+    }
+
+    protected String analyzeWarnings(Connection connection) {
+        SQLWarning warning = null;
+        StringBuilder builder = new StringBuilder();
+        try {
+            warning = connection.getWarnings();
+            analyzeWarnings(warning, builder);
+        } catch (SQLException e) {
+            builder.append("Error getting warnings from connection:\n");
+            builder.append(e.getMessage());
+        }
+        return builder.toString();
+    }
+
+    protected String analyzeWarnings(Statement statement) {
+        SQLWarning warning = null;
+        StringBuilder builder = new StringBuilder();
+        try {
+            warning = statement.getWarnings();
+            analyzeWarnings(warning, builder);
+        } catch (SQLException e) {
+            builder.append("Error getting warnings from statement:\n");
+            builder.append(e.getMessage());
+        }
+        return builder.toString();
+    }
+
+    protected StringBuilder analyzeWarnings(SQLWarning warning, StringBuilder builder) {
+        if (warning != null) {
+            builder.append(warning.getMessage());
+            builder.append("\n");
+            analyzeWarnings(warning.getNextWarning(), builder);
+        }
+        return builder;
+    }
+
     Properties getProperties(String fileName) {
         Properties result = null;
         try {

=== added file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java	2011-10-28 23:29:26 +0000
@@ -0,0 +1,92 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package testsuite.clusterj;
+
+import com.mysql.clusterj.ClusterJDatastoreException;
+import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.ColumnMetadata;
+import com.mysql.clusterj.DynamicObject;
+import com.mysql.clusterj.annotation.PersistenceCapable;
+
+import testsuite.clusterj.model.StringTypes;
+
+public class SchemaChangeTest extends AbstractClusterJModelTest {
+
+    private static final String modifyTableStatement = 
+        "alter table stringtypes drop column string_not_null_none";
+
+    private static final String restoreTableStatement = 
+        "alter table stringtypes add string_not_null_none varchar(20) DEFAULT NULL";
+
+    @Override
+    public void localSetUp() {
+        createSessionFactory();
+        session = sessionFactory.getSession();
+        // create will cache the schema
+        session.deletePersistentAll(StringTypes.class);
+        session.makePersistent(session.newInstance(StringTypes.class, 0));
+        addTearDownClasses(StringTypes.class);
+    }
+
+    public void testFind() {
+        // change the schema (drop a column)
+        executeSQL(modifyTableStatement);
+        try {
+            // find the row (with a different schema) which will fail
+            session.find(StringTypes.class, 0);
+        } catch (ClusterJDatastoreException dex) {
+            // make sure it's the right exception
+            if (!dex.getMessage().contains("code 284")) {
+                error("ClusterJDatastoreException must contain code 284 but contains only " + dex.getMessage());
+            }
+            // unload the schema for StringTypes which also clears the cached dictionary table
+            String tableName = session.unloadSchema(StringTypes.class);
+            // make sure we unloaded the right table
+            errorIfNotEqual("Table name mismatch", "stringtypes", tableName);
+            // it should work with a different schema that doesn't include the dropped column
+            StringTypes2 zero = session.find(StringTypes2.class, 0);
+            // verify that column string_not_null_none does not exist
+            ColumnMetadata[] metadatas = zero.columnMetadata();
+            for (ColumnMetadata metadata: metadatas) {
+                if ("string_not_null_none".equals(metadata.name())) {
+                    error("Column string_not_null_none should not exist after schema change.");
+                }
+            }
+            try {
+                // find the row (with a different schema) which will fail with a user exception
+                session.find(StringTypes.class, 0);
+                error("Unexpected success using StringTypes class without column string_not_null_none defined");
+            } catch (ClusterJUserException uex) {
+                // StringTypes can't be loaded because of the missing column, but
+                // the cached dictionary table was removed when the domain type handler couldn't be created
+                executeSQL(restoreTableStatement);
+                // after restoreTableDefinition, string_not_null_none is defined again
+                // find the row (with a different schema) which will now work
+                session.find(StringTypes.class, 0);
+            }
+        }
+        failOnError();
+    }
+
+    /** StringTypes dynamic class to map stringtypes after column string_not_null_none is removed.
+     */
+    @PersistenceCapable(table="stringtypes")
+    public static class StringTypes2 extends DynamicObject {
+        public StringTypes2() {}
+    }
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2011-10-27 23:43:25 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -17,11 +17,12 @@
 
 package com.mysql.clusterj.tie;
 
+import com.mysql.ndbjtie.ndbapi.NdbDictionary.Dictionary;
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.DictionaryConst;
-import com.mysql.ndbjtie.ndbapi.NdbDictionary.IndexConst;
-import com.mysql.ndbjtie.ndbapi.NdbDictionary.TableConst;
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.DictionaryConst.ListConst.Element;
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.DictionaryConst.ListConst.ElementArray;
+import com.mysql.ndbjtie.ndbapi.NdbDictionary.IndexConst;
+import com.mysql.ndbjtie.ndbapi.NdbDictionary.TableConst;
 
 import com.mysql.clusterj.core.store.Index;
 import com.mysql.clusterj.core.store.Table;
@@ -43,9 +44,9 @@ class DictionaryImpl implements com.mysq
     static final Logger logger = LoggerFactoryService.getFactory()
             .getInstance(DictionaryImpl.class);
 
-    private DictionaryConst ndbDictionary;
+    private Dictionary ndbDictionary;
 
-    public DictionaryImpl(DictionaryConst ndbDictionary) {
+    public DictionaryImpl(Dictionary ndbDictionary) {
         this.ndbDictionary = ndbDictionary;
     }
 
@@ -122,4 +123,8 @@ class DictionaryImpl implements com.mysq
         }
     }
 
+    public void removeCachedTable(String tableName) {
+        ndbDictionary.removeCachedTable(tableName);
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java	2011-10-27 23:43:25 +0000
@@ -0,0 +1,22 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package testsuite.clusterj.tie;
+
+public class SchemaChangeTest extends testsuite.clusterj.SchemaChangeTest {
+
+}

=== added file 'storage/ndb/include/kernel/statedesc.hpp'
--- a/storage/ndb/include/kernel/statedesc.hpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/include/kernel/statedesc.hpp	2011-10-28 10:16:23 +0000
@@ -0,0 +1,32 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_STATE_DESC_H
+#define NDB_STATE_DESC_H
+
+struct ndbkernel_state_desc
+{
+  unsigned value;
+  const char * name;
+  const char * friendly_name;
+  const char * description;
+};
+
+extern struct ndbkernel_state_desc g_dbtc_apiconnect_state_desc[];
+extern struct ndbkernel_state_desc g_dblqh_tcconnect_state_desc[];
+
+#endif

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-05-25 15:03:11 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-10-28 14:17:25 +0000
@@ -18,7 +18,7 @@ Next NDBCNTR 1002
 Next NDBFS 2000
 Next DBACC 3002
 Next DBTUP 4035
-Next DBLQH 5072
+Next DBLQH 5074
 Next DBDICT 6026
 Next DBDIH 7229
 Next DBTC 8092

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-23 08:37:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-28 16:37:39 +0000
@@ -2372,6 +2372,17 @@ void Dbdih::execSTART_PERMREQ(Signal* si
   CRASH_INSERTION(7122);
   ndbrequire(isMaster());
   ndbrequire(refToNode(retRef) == nodeId);
+  if (c_lcpMasterTakeOverState.state != LMTOS_IDLE)
+  {
+    jam();
+    infoEvent("DIH : Denied request for start permission from %u "
+              "while LCP Master takeover in progress.",
+              nodeId);
+    signal->theData[0] = nodeId;
+    signal->theData[1] = StartPermRef::ZNODE_START_DISALLOWED_ERROR;
+    sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
+    return;
+  }
   if ((c_nodeStartMaster.activeState) ||
       (c_nodeStartMaster.wait != ZFALSE) ||
       ERROR_INSERTED_CLEAR(7175)) {

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-10-13 09:51:01 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-10-28 09:56:57 +0000
@@ -18,6 +18,7 @@
 #ifndef DBLQH_H
 #define DBLQH_H
 
+#ifndef DBLQH_STATE_EXTRACT
 #include <pc.hpp>
 #include <ndb_limits.h>
 #include <SimulatedBlock.hpp>
@@ -41,6 +42,7 @@
 class Dbacc;
 class Dbtup;
 class Lgman;
+#endif // DBLQH_STATE_EXTRACT
 
 #ifdef DBLQH_C
 // Constants
@@ -410,10 +412,15 @@ class Lgman;
  *  - TEST 
  *  - LOG 
  */
-class Dblqh: public SimulatedBlock {
+class Dblqh 
+#ifndef DBLQH_STATE_EXTRACT
+  : public SimulatedBlock
+#endif
+{
   friend class DblqhProxy;
 
 public:
+#ifndef DBLQH_STATE_EXTRACT
   enum LcpCloseState {
     LCP_IDLE = 0,
     LCP_RUNNING = 1,       // LCP is running
@@ -1940,7 +1947,7 @@ public:
     Uint32 usageCountW; // writers
   }; // Size 100 bytes
   typedef Ptr<Tablerec> TablerecPtr;
-
+#endif // DBLQH_STATE_EXTRACT
   struct TcConnectionrec {
     enum ListState {
       NOT_IN_LIST = 0,
@@ -2021,6 +2028,7 @@ public:
       COPY_CONNECTED = 2,
       LOG_CONNECTED = 3
     };
+#ifndef DBLQH_STATE_EXTRACT
     ConnectState connectState;
     UintR copyCountWords;
     Uint32 keyInfoIVal;
@@ -2131,8 +2139,10 @@ public:
       Uint32 m_page_id[2];
       Local_key m_disk_ref[2];
     } m_nr_delete;
+#endif // DBLQH_STATE_EXTRACT
   }; /* p2c: size = 280 bytes */
-  
+
+#ifndef DBLQH_STATE_EXTRACT
   typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
 
   struct TcNodeFailRecord {
@@ -3278,8 +3288,9 @@ public:
 
   void sendFireTrigConfTc(Signal* signal, BlockReference ref, Uint32 Tdata[]);
   bool check_fire_trig_pass(Uint32 op, Uint32 pass);
+#endif
 };
-
+#ifndef DBLQH_STATE_EXTRACT
 inline
 bool
 Dblqh::ScanRecord::check_scan_batch_completed() const
@@ -3402,5 +3413,5 @@ Dblqh::TRACE_OP_CHECK(const TcConnection
 	   regTcPtr->operation == ZDELETE)) ||
     ERROR_INSERTED(5713);
 }
-
+#endif
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-17 16:46:12 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-28 16:37:39 +0000
@@ -13788,6 +13788,15 @@ void Dblqh::execBACKUP_FRAGMENT_REF(Sign
 void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal) 
 {
   jamEntry();
+
+  if (ERROR_INSERTED(5073))
+  {
+    ndbout_c("Delaying BACKUP_FRAGMENT_CONF");
+    sendSignalWithDelay(reference(), GSN_BACKUP_FRAGMENT_CONF, signal, 500,
+                        signal->getLength());
+    return;
+  }
+
   //BackupFragmentConf* conf= (BackupFragmentConf*)signal->getDataPtr();
 
   lcpPtr.i = 0;

=== added file 'storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp	2011-10-28 09:56:57 +0000
@@ -0,0 +1,76 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <kernel/statedesc.hpp>
+#define DBLQH_STATE_EXTRACT
+#include "Dblqh.hpp"
+
+#define SDESC(a,b,c) { (unsigned)Dblqh::TcConnectionrec::a, #a, b, c }
+
+struct ndbkernel_state_desc g_dblqh_tcconnect_state_desc[] =
+{
+  SDESC(IDLE, "Idle", ""),
+  SDESC(WAIT_ACC, "WaitLock", ""),
+  SDESC(WAIT_TUPKEYINFO, "", ""),
+  SDESC(WAIT_ATTR, "WaitData", ""),
+  SDESC(WAIT_TUP, "WaitTup", ""),
+  SDESC(STOPPED, "Stopped", ""),
+  SDESC(LOG_QUEUED, "LogPrepare", ""),
+  SDESC(PREPARED, "Prepared", ""),
+  SDESC(LOG_COMMIT_WRITTEN_WAIT_SIGNAL, "", ""),
+  SDESC(LOG_COMMIT_QUEUED_WAIT_SIGNAL, "", ""),
+
+  // Commit in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(COMMIT_STOPPED, "CommittingStopped", ""),
+  SDESC(LOG_COMMIT_QUEUED, "Committing", ""),
+  SDESC(COMMIT_QUEUED, "Committing", ""),
+  SDESC(COMMITTED, "Committed", ""),
+  SDESC(WAIT_TUP_COMMIT, "Committing", ""),
+
+  /* -------------------------------------------------------------------- */
+  // Abort in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(WAIT_ACC_ABORT, "Aborting", ""),
+  SDESC(ABORT_QUEUED, "Aborting", ""),
+  SDESC(ABORT_STOPPED, "AbortingStopped", ""),
+  SDESC(WAIT_AI_AFTER_ABORT, "Aborting", ""),
+  SDESC(LOG_ABORT_QUEUED, "Aborting", ""),
+  SDESC(WAIT_TUP_TO_ABORT, "Aborting", ""),
+
+  /* -------------------------------------------------------------------- */
+  // Scan in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(WAIT_SCAN_AI, "Scanning", ""),
+  SDESC(SCAN_STATE_USED, "Scanning", ""),
+  SDESC(SCAN_FIRST_STOPPED, "Scanning", ""),
+  SDESC(SCAN_CHECK_STOPPED, "Scanning", ""),
+  SDESC(SCAN_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_RELEASE_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_CLOSE_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_CLOSE_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_FIRST_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_TUPKEY, "Scanning", ""),
+  SDESC(COPY_TUPKEY, "NodeRecoveryScanning", ""),
+
+  SDESC(TC_NOT_CONNECTED, "Idle", ""),
+  SDESC(PREPARED_RECEIVED_COMMIT, "Committing", ""),
+  SDESC(LOG_COMMIT_WRITTEN, "Committing", ""),
+
+  { 0, 0, 0, 0 }
+};

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-10-13 16:58:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-10-28 11:52:35 +0000
@@ -18,6 +18,7 @@
 #ifndef DBTC_H
 #define DBTC_H
 
+#ifndef DBTC_STATE_EXTRACT
 #include <ndb_limits.h>
 #include <pc.hpp>
 #include <SimulatedBlock.hpp>
@@ -37,6 +38,7 @@
 #include <signaldata/EventReport.hpp>
 #include <trigger_definitions.h>
 #include <SignalCounter.hpp>
+#endif
 
 #ifdef DBTC_C
 /*
@@ -143,14 +145,20 @@
 #define ZTRANS_TOO_BIG 261
 #endif
 
-class Dbtc: public SimulatedBlock {
+class Dbtc
+#ifndef DBTC_STATE_EXTRACT
+  : public SimulatedBlock
+#endif
+{
 public:
 
+#ifndef DBTC_STATE_EXTRACT
   /**
    * Incase of mt-TC...only one instance will perform actual take-over
    *   let this be TAKE_OVER_INSTANCE
    */
   STATIC_CONST( TAKE_OVER_INSTANCE = 1 );
+#endif
 
   enum ConnectionState {
     CS_CONNECTED = 0,
@@ -188,6 +196,7 @@ public:
     CS_WAIT_FIRE_TRIG_REQ = 27
   };
 
+#ifndef DBTC_STATE_EXTRACT
   enum OperationState {
     OS_CONNECTED = 1,
     OS_OPERATING = 2,
@@ -2103,6 +2112,7 @@ private:
 #endif
   Uint32 m_deferred_enabled;
   Uint32 m_max_writes_per_trans;
+#endif
 };
 
 #endif

=== added file 'storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp	2011-10-28 09:56:57 +0000
@@ -0,0 +1,59 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <kernel/statedesc.hpp>
+#define DBTC_STATE_EXTRACT
+#include "Dbtc.hpp"
+
+#define SDESC(a,b,c) { (unsigned)Dbtc::a, #a, b, c }
+
+/**
+ * Value
+ * Friendly name
+ * Description
+ */
+struct ndbkernel_state_desc g_dbtc_apiconnect_state_desc[] =
+{
+  SDESC(CS_CONNECTED, "Connected",
+        "An allocated idle transaction object"),
+  SDESC(CS_DISCONNECTED, "Disconnected",
+        "An unallocated connection object"),
+  SDESC(CS_STARTED, "Started", "A started transaction"),
+  SDESC(CS_RECEIVING, "Receiving", "A transaction receiving operations"),
+  SDESC(CS_RESTART, "", ""),
+  SDESC(CS_ABORTING, "Aborting", "A transaction aborting"),
+  SDESC(CS_COMPLETING, "Completing", "A transaction completing"),
+  SDESC(CS_COMPLETE_SENT, "Completing", "A transaction completing"),
+  SDESC(CS_PREPARE_TO_COMMIT, "", ""),
+  SDESC(CS_COMMIT_SENT, "Committing", "A transaction committing"),
+  SDESC(CS_START_COMMITTING, "", ""),
+  SDESC(CS_COMMITTING, "Committing", "A transaction committing"),
+  SDESC(CS_REC_COMMITTING, "", ""),
+  SDESC(CS_WAIT_ABORT_CONF, "Aborting", ""),
+  SDESC(CS_WAIT_COMPLETE_CONF, "Completing", ""),
+  SDESC(CS_WAIT_COMMIT_CONF, "Committing", ""),
+  SDESC(CS_FAIL_ABORTING, "TakeOverAborting", ""),
+  SDESC(CS_FAIL_ABORTED, "TakeOverAborting", ""),
+  SDESC(CS_FAIL_PREPARED, "", ""),
+  SDESC(CS_FAIL_COMMITTING, "TakeOverCommitting", ""),
+  SDESC(CS_FAIL_COMMITTED, "TakeOverCommitting", ""),
+  SDESC(CS_FAIL_COMPLETED, "TakeOverCompleting", ""),
+  SDESC(CS_START_SCAN, "Scanning", ""),
+  SDESC(CS_SEND_FIRE_TRIG_REQ, "Precomitting", ""),
+  SDESC(CS_WAIT_FIRE_TRIG_REQ, "Precomitting", ""),
+  { 0, 0, 0, 0 }
+};

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-22 09:38:48 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-28 13:38:36 +0000
@@ -2464,6 +2464,7 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
            << ", finalBatchFrags=" << m_finalBatchFrags
            <<  endl;
   }
+  assert(rootFrag.isFragBatchComplete());
 
   /* May received fragment data after a SCANREF() (timeout?) 
    * terminated the scan.  We are about to close this query, 
@@ -2471,8 +2472,6 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
    */
   if (likely(m_errorReceived == 0))
   {
-    assert(rootFrag.isFragBatchComplete());
-
     assert(m_pendingFrags > 0);                // Check against underflow.
     assert(m_pendingFrags <= m_rootFragCount); // .... and overflow
     m_pendingFrags--;
@@ -2489,6 +2488,16 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
     rootFrag.setReceivedMore();
     return true;
   }
+  else if (!getQueryDef().isScanQuery())  // A failed lookup query
+  {
+    /**
+     * A lookup query will retrieve the rows as part of ::execute().
+     * -> Error must be visible through API before we return control
+     *    to the application.
+     */
+    setErrorCode(m_errorReceived);
+    return true;
+  }
 
   return false;
 } // NdbQueryImpl::handleBatchComplete
@@ -4970,12 +4979,12 @@ NdbQueryOperationImpl::execTCKEYREF(cons
   if (&getRoot() == this || 
       ref->errorCode != static_cast<Uint32>(Err_TupleNotFound))
   {
-    getQuery().setErrorCode(ref->errorCode);
     if (aSignal->getLength() == TcKeyRef::SignalLength)
     {
       // Signal may contain additional error data
       getQuery().m_error.details = (char *)UintPtr(ref->errorData);
     }
+    getQuery().setFetchTerminated(ref->errorCode,false);
   }
 
   NdbRootFragment& rootFrag = getQuery().m_rootFrags[0];

=== modified file 'storage/ndb/src/ndbjtie/jtie/jtie_tconv_idcache_impl.hpp'
--- a/storage/ndb/src/ndbjtie/jtie/jtie_tconv_idcache_impl.hpp	2011-09-14 13:42:19 +0000
+++ b/storage/ndb/src/ndbjtie/jtie/jtie_tconv_idcache_impl.hpp	2011-10-31 07:34:12 +0000
@@ -1,18 +1,18 @@
 /*
- Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
 */
 /*
  * jtie_tconv_idcache_impl.hpp
@@ -79,8 +79,6 @@ jniGetMemberID< jfieldID >(JNIEnv * env,
 
 // ---------------------------------------------------------------------------
 
-// XXX document these macros...
-
 /**
  * Defines an info type describing a field member of a Java class.
  */
@@ -98,7 +96,7 @@ jniGetMemberID< jfieldID >(JNIEnv * env,
  */
 #define JTIE_DEFINE_CLASS_MEMBER_INFO( T, IDT )                 \
     struct T {                                                  \
-        static const char * const class_name;                   \
+        static const char * const jclass_name;                  \
         static const char * const member_name;                  \
         static const char * const member_descriptor;            \
         typedef IDT * memberID_t;                               \
@@ -107,16 +105,21 @@ jniGetMemberID< jfieldID >(JNIEnv * env,
 /**
  * Instantiates an info type describing a member of a Java class.
  */
-#define JTIE_INSTANTIATE_CLASS_MEMBER_INFO( T, CN, MN, MD )             \
-    const char * const T::class_name = CN;                              \
-    const char * const T::member_name = MN;                             \
-    const char * const T::member_descriptor = MD;                       \
+// XXX: may need separate macro versions if T is non-template type
+//    const char * const T::jclass_name = JCN;
+#define JTIE_INSTANTIATE_CLASS_MEMBER_INFO( T, JCN, JMN, JMD )          \
+    template<> const char * const T::jclass_name = JCN;                 \
+    template<> const char * const T::member_name = JMN;                 \
+    template<> const char * const T::member_descriptor = JMD;           \
+    template<> unsigned long MemberId< T >::nIdLookUps = 0;             \
+    template<> jclass MemberIdCache< T >::gClassRef = NULL;             \
+    template<> T::memberID_t MemberIdCache< T >::mid = NULL;            \
     template struct MemberId< T >;                                      \
     template struct MemberIdCache< T >;
 
 /**
  * Provides uniform access to the JNI Field/Method ID of a Java class member
- * as described by the member info type 'C'.
+ * specified by a info type M.
  *
  * This base class does not cache the member ID and the class object, but
  * it retrieves the member ID from JNI upon each access; different caching
@@ -151,9 +154,9 @@ jniGetMemberID< jfieldID >(JNIEnv * env,
  *
  * Derived classes implement any caching underneath this usage pattern.
  */
-template< typename C >
+template< typename M >
 struct MemberId {
-    typedef typename C::memberID_t ID_t;
+    typedef typename M::memberID_t ID_t;
 
     // number of JNI Get<Field|Method>ID() invocations for statistics
     static unsigned long nIdLookUps;
@@ -175,7 +178,7 @@ struct MemberId {
 
     /**
      * Returns a JNI Reference to the class declaring the member specified
-     * by info type 'C'.
+     * by info type M.
      *
      * Depending upon the underlying caching strategy, a returned reference
      * may be local or global, weak or strong; the scope of its use must be
@@ -194,23 +197,23 @@ struct MemberId {
      */
     static jclass getClass(JNIEnv * env) {
         assert(env->ExceptionCheck() == JNI_OK);
-        jclass cls = env->FindClass(C::class_name);
+        jclass cls = env->FindClass(M::jclass_name);
         if (cls == NULL) { // break out for better diagnostics
             assert(env->ExceptionCheck() != JNI_OK); // exception pending
+            env->ExceptionDescribe(); // print error diagnostics to stderr
 
-//#ifndef NDEBUG // XXX for debugging
-            // print error diagnostics
-            char m[256];
+#if 1
+            // XXX raise a fatal error for debugging
+            char m[1024];
 #ifndef _WIN32
-            snprintf(m, 256, "JTie: failed to find Java class '%s'\n",
+            snprintf(m, 1024, "JTie: failed to find Java class '%s'\n",
 #else
-            _snprintf(m, 256, "JTie: failed to find Java class '%s'\n",
+            _snprintf(m, 1024, "JTie: failed to find Java class '%s'\n",
+#endif
+                     (M::jclass_name == NULL ? "NULL" : M::jclass_name));
+            fprintf(stderr, m);
+            env->FatalError(m);
 #endif
-                     (C::class_name == NULL ? "NULL" : C::class_name));
-            fprintf(stderr, "%s", m);
-            env->ExceptionDescribe();
-            env->FatalError(m); // XXX for debugging
-//#endif // NDEBUG
         } else {
             assert(env->ExceptionCheck() == JNI_OK); // ok
         }
@@ -240,7 +243,7 @@ struct MemberId {
         // multithreaded access ok, inaccurate if non-atomic increment
         nIdLookUps++;
         return jniGetMemberID< ID_t >(env, cls,
-                                      C::member_name, C::member_descriptor);
+                                      M::member_name, M::member_descriptor);
     }
 
     /**
@@ -259,9 +262,9 @@ struct MemberId {
 /**
  * Base class for caching of JNI Field/Method IDs.
  */
-template< typename C >
-struct MemberIdCache : MemberId< C > {
-    typedef typename C::memberID_t ID_t;
+template< typename M >
+struct MemberIdCache : MemberId< M > {
+    typedef typename M::memberID_t ID_t;
 
     static ID_t getId(JNIEnv * env, jclass cls) {
         assert(cls != NULL);
@@ -283,10 +286,10 @@ protected:
  * Provides caching of JNI Field/Method IDs using weak class references,
  * allowing classes to be unloaded when no longer used by Java code.
  */
-template< typename C >
-struct MemberIdWeakCache : MemberIdCache< C > {
-    typedef MemberId< C > A;
-    typedef MemberIdCache< C > Base;
+template< typename M >
+struct MemberIdWeakCache : MemberIdCache< M > {
+    typedef MemberId< M > A;
+    typedef MemberIdCache< M > Base;
 
     static void setClass(JNIEnv * env, jclass cls) {
         assert(cls != NULL);
@@ -325,10 +328,10 @@ struct MemberIdWeakCache : MemberIdCache
  * Provides caching of JNI Field/Method IDs using strong class references,
  * preventing classes from being unloaded even if no longer used by Java code.
  */
-template< typename C >
-struct MemberIdStrongCache : MemberIdCache< C > {
-    typedef MemberId< C > A;
-    typedef MemberIdCache< C > Base;
+template< typename M >
+struct MemberIdStrongCache : MemberIdCache< M > {
+    typedef MemberId< M > A;
+    typedef MemberIdCache< M > Base;
 
     static void setClass(JNIEnv * env, jclass cls) {
         assert(cls != NULL);
@@ -363,9 +366,9 @@ struct MemberIdStrongCache : MemberIdCac
  * Provides caching of JNI Field/Method IDs using weak class references
  * with preloading (at class initialization) -- VERY TRICKY, NOT SUPPORTED.
  */
-template< typename C >
-struct MemberIdPreloadedWeakCache : MemberIdWeakCache< C > {
-    typedef MemberIdWeakCache< C > Base;
+template< typename M >
+struct MemberIdPreloadedWeakCache : MemberIdWeakCache< M > {
+    typedef MemberIdWeakCache< M > Base;
 
     using Base::setClass; // use as inherited (some compiler wanted this)
 
@@ -388,9 +391,9 @@ struct MemberIdPreloadedWeakCache : Memb
  * Provides caching of JNI Field/Method IDs using strong class references
  * with preloading (at class initialization) -- VERY TRICKY, NOT SUPPORTED.
  */
-template< typename C >
-struct MemberIdPreloadedStrongCache : MemberIdStrongCache< C > {
-    typedef MemberIdStrongCache< C > Base;
+template< typename M >
+struct MemberIdPreloadedStrongCache : MemberIdStrongCache< M > {
+    typedef MemberIdStrongCache< M > Base;
 
     using Base::setClass; // use as inherited (some compiler wanted this)
 
@@ -406,15 +409,18 @@ struct MemberIdPreloadedStrongCache : Me
     using Base::releaseRef; // use as inherited (some compiler wanted this)
 };
 
-// XXX static initialization <-> multiple compilation units <-> jtie_lib.hpp
-template< typename C > unsigned long MemberId< C >
+// XXX now done by JTIE_INSTANTIATE_CLASS_MEMBER_INFO()
+// XXX test with multiple compilation units <-> jtie_lib.hpp
+#if 0
+template< typename M > unsigned long MemberId< M >
     ::nIdLookUps = 0;
 
-template< typename C > jclass MemberIdCache< C >
+template< typename M > jclass MemberIdCache< M >
     ::gClassRef = NULL;
 
-template< typename C > typename C::memberID_t MemberIdCache< C >
+template< typename M > typename M::memberID_t MemberIdCache< M >
     ::mid = NULL;
+#endif
 
 // XXX document
 
@@ -434,29 +440,29 @@ enum JniMemberIdCaching {
 /**
  * Generic class for member ID access with selection of caching strategy.
  */
-template< JniMemberIdCaching M, typename C >
+template< JniMemberIdCaching M, typename M >
 struct JniMemberId;
 
-template< typename C >
-struct JniMemberId< NO_CACHING, C >
-    : MemberId< C > {};
-
-template< typename C >
-struct JniMemberId< WEAK_CACHING, C >
-    : MemberIdWeakCache< C > {};
-
-template< typename C >
-struct JniMemberId< STRONG_CACHING, C >
-    : MemberIdStrongCache< C > {};
+template< typename M >
+struct JniMemberId< NO_CACHING, M >
+    : MemberId< M > {};
+
+template< typename M >
+struct JniMemberId< WEAK_CACHING, M >
+    : MemberIdWeakCache< M > {};
+
+template< typename M >
+struct JniMemberId< STRONG_CACHING, M >
+    : MemberIdStrongCache< M > {};
 
 #if 0 // preloaded caching very tricky, not supported at this time
-template< typename C >
-struct JniMemberId< WEAK_CACHING_PRELOAD, C >
-    : MemberIdPreloadedWeakCache< C > {};
-
-template< typename C >
-struct JniMemberId< STRONG_CACHING_PRELOAD, C >
-    : MemberIdPreloadedStrongCache< C > {};
+template< typename M >
+struct JniMemberId< WEAK_CACHING_PRELOAD, M >
+    : MemberIdPreloadedWeakCache< M > {};
+
+template< typename M >
+struct JniMemberId< STRONG_CACHING_PRELOAD, M >
+    : MemberIdPreloadedStrongCache< M > {};
 #endif // preloaded caching very tricky, not supported at this time
 
 // ---------------------------------------------------------------------------

=== modified file 'storage/ndb/src/ndbjtie/jtie/jtie_tconv_object.hpp'
--- a/storage/ndb/src/ndbjtie/jtie/jtie_tconv_object.hpp	2011-07-04 15:58:21 +0000
+++ b/storage/ndb/src/ndbjtie/jtie/jtie_tconv_object.hpp	2011-10-31 07:34:12 +0000
@@ -1,18 +1,18 @@
 /*
- Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
 */
 /*
  * jtie_tconv_object.hpp
@@ -31,7 +31,7 @@
 // ---------------------------------------------------------------------------
 
 /**
- * A root class representing Java peer classes in type mappings.
+ * Internal root class for representing Java classes in peer type mappings.
  *
  * Rationale: A dedicated type, distinct from JNI's _jobject, allows for
  * better control of template resolution (avoiding ambiguities) and
@@ -41,62 +41,21 @@
 struct _jtie_Object : _jobject {
 };
 
-// XXX, document: type specifying an Object mapping with a class name
-
-// trait type wrapping named-parametrized Object mappings for specialization
-// XXX make use of
-//   JTIE_DEFINE_METHOD_MEMBER_INFO( _jtie_ObjectMapper< T > )
-// to replace
-//   static const char * const class_name;
-//   static const char * const member_name;
-//   static const char * const member_descriptor;
-//   typedef _jmethodID * memberID_t;
+/**
+ * Internal, generic trait type mapping a Java class.
+ *
+ * Rationale: This generic class has outlived its purpose, but can be used
+ * as a container for additional, class-specific mapping information.
+ */
 template< typename J >
 struct _jtie_ObjectMapper : _jtie_Object {
-    // the name of the Java peer class in the JVM format (i.e., '/'-separated)
-    static const char * const class_name;
-
-    // the name, descriptor, and JNI type of the class's no-arg c'tor
-    static const char * const member_name;
-    static const char * const member_descriptor;
-    typedef _jmethodID * memberID_t;
+    /**
+     * Name and descriptor of this class's no-argument constructor.
+     */
+    // XXX cleanup: use a template decl instead of a macro
+    JTIE_DEFINE_METHOD_MEMBER_INFO( ctor )
 };
 
-// XXX static initialization <-> multiple compilation units <-> jtie_lib.hpp
-template< typename J >
-const char * const _jtie_ObjectMapper< J >::class_name
-    = J::class_name; // XXX static initialization order dependency?
-
-template< typename J >
-const char * const _jtie_ObjectMapper< J >::member_name
-    = "<init>";
-
-template< typename J >
-const char * const _jtie_ObjectMapper< J >::member_descriptor
-    = "()V";
-
-// Design note:
-//
-// As of pre-C++0x, string literals cannot be used as template arguments
-// which must be integral constants with external linkage.
-//
-// So, we cannot declare:
-//
-//    template< const char * >
-//    struct _jtie_ClassNamedObject : _jtie_Object {
-//        static const char * const java_internal_class_name;
-//    };
-//
-// As a feasible workaround, we require the application to provide a
-// trait type for each class, e.g.
-//
-//    struct _m_A : _jobject {
-//        static const char * const java_internal_class_name;
-//    };
-//    const char * const _m_A::java_internal_class_name = "myjapi/A";
-//
-// and we retrieve the class name from there.
-
 /**
  * Defines the trait type aliases for the mapping of a
  * user-defined Java class to a C++ class.
@@ -127,9 +86,7 @@ const char * const _jtie_ObjectMapper< J
  * to be prepended with the C++ keyword "typename".
  */
 #define JTIE_DEFINE_PEER_CLASS_MAPPING( C, T )                          \
-    struct T {                                                          \
-        static const char * const class_name;                           \
-    };                                                                  \
+    struct T {};                                                        \
     typedef ttrait< jobject, C, _jtie_ObjectMapper< T > *               \
                     > ttrait_##T##_t;                                   \
     typedef ttrait< jobject, const C, _jtie_ObjectMapper< T > *         \
@@ -220,18 +177,11 @@ const char * const _jtie_ObjectMapper< J
 #endif // XXX cleanup this unsupported mapping
 
 // XXX to document
-// XXX static initialization <-> multiple compilation units <-> jtie_lib.hpp
-// XXX replace
-//   template struct MemberId< _jtie_ObjectMapper< T > >;
-//   template struct MemberIdCache< _jtie_ObjectMapper< T > >;
-// with
-//   JTIE_INSTANTIATE_CLASS_MEMBER_INFO_X(_jtie_ObjectMapper< T >,
-//                                        JCN, "<init>", "()V")
-#define JTIE_INSTANTIATE_PEER_CLASS_MAPPING( T, JCN )           \
-    const char * const T::class_name = JCN;                     \
-    template struct _jtie_ObjectMapper< T >;                    \
-    template struct MemberId< _jtie_ObjectMapper< T > >;        \
-    template struct MemberIdCache< _jtie_ObjectMapper< T > >;
+// XXX cleanup: symmetry with JTIE_DEFINE_METHOD_MEMBER_INFO( ctor ) above
+#define JTIE_INSTANTIATE_PEER_CLASS_MAPPING( T, JCN )                   \
+    template struct _jtie_ObjectMapper< T >;                            \
+    JTIE_INSTANTIATE_CLASS_MEMBER_INFO(_jtie_ObjectMapper< T >::ctor,   \
+                                       JCN, "<init>", "()V")
 
 // ---------------------------------------------------------------------------
 

=== modified file 'storage/ndb/src/ndbjtie/jtie/jtie_tconv_object_impl.hpp'
--- a/storage/ndb/src/ndbjtie/jtie/jtie_tconv_object_impl.hpp	2011-07-04 15:58:21 +0000
+++ b/storage/ndb/src/ndbjtie/jtie/jtie_tconv_object_impl.hpp	2011-10-31 07:34:12 +0000
@@ -1,18 +1,18 @@
 /*
- Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
 */
 /*
  * jtie_tconv_object_impl.hpp
@@ -45,12 +45,14 @@ JTIE_DEFINE_FIELD_MEMBER_INFO(_Wrapper_c
 typedef JniMemberId< WEAK_CACHING, _Wrapper_cdelegate > Wrapper_cdelegate;
 //typedef JniMemberId< STRONG_CACHING, _Wrapper_cdelegate > Wrapper_cdelegate;
 
+// ---------------------------------------------------------------------------
+
 // XXX consider changing
 //template< typename C > struct ObjectParam< _jtie_Object *, C * > {
 // to
 //template< typename J, typename C > struct ObjectParam< J *, C * > {
-//
-// same for Target, Result
+// same for Target, Result; or conversly
+//template< typename J > struct ObjectResult< J *, void * > {
 
 // Implements the mapping of jtie_Objects parameters.
 template< typename J, typename C >
@@ -195,10 +197,10 @@ struct Target< _jtie_Object *, C > {
 template< typename J, typename C >
 struct ObjectResult< J *, C * > {
     // Provides a (cached) access to the method Id of the constructor of J.
-    //typedef JniMemberId< NO_CACHING, J > J_ctor;
-    typedef JniMemberId< WEAK_CACHING, J > J_ctor;
-    //typedef JniMemberId< STRONG_CACHING, J > J_ctor;
-    
+    //typedef JniMemberId< NO_CACHING, typename J::ctor > J_ctor;
+    typedef JniMemberId< WEAK_CACHING, typename J::ctor > J_ctor;
+    //typedef JniMemberId< STRONG_CACHING, typename J::ctor > J_ctor;
+
     static J *
     convert(C * c, JNIEnv * env) {
         TRACE("J * ObjectResult.convert(JNIEnv *, C *)");

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-17 13:54:09 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-28 14:17:25 +0000
@@ -4757,6 +4757,125 @@ int runSplitLatency25PctFail(NDBT_Contex
   return NDBT_OK;
 }
 
+int
+runMasterFailSlowLCP(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Motivated by bug# 13323589 */
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 4)
+  {
+    return NDBT_OK;
+  }
+
+  int master = res.getMasterNodeId();
+  int otherVictim = res.getRandomNodeOtherNodeGroup(master, rand());
+  int nextMaster = res.getNextMasterNodeId(master);
+  nextMaster = (nextMaster == otherVictim) ? res.getNextMasterNodeId(otherVictim) :
+    nextMaster;
+  assert(nextMaster != master);
+  assert(nextMaster != otherVictim);
+
+  /* Get a node which is not current or next master */
+  int slowNode= nextMaster;
+  while ((slowNode == nextMaster) ||
+         (slowNode == otherVictim) ||
+         (slowNode == master))
+  {
+    slowNode = res.getRandomNotMasterNodeId(rand());
+  }
+
+  ndbout_c("master: %d otherVictim : %d nextMaster: %d slowNode: %d",
+           master,
+           otherVictim,
+           nextMaster,
+           slowNode);
+
+  /* Steps :
+   * 1. Insert slow LCP frag error in slowNode
+   * 2. Start LCP
+   * 3. Wait for LCP to start
+   * 4. Kill at least two nodes including Master
+   * 5. Wait for killed nodes to attempt to rejoin
+   * 6. Remove slow LCP error
+   * 7. Allow system to stabilise + check no errors
+   */
+  // 5073 = Delay on handling BACKUP_FRAGMENT_CONF in LQH
+  if (res.insertErrorInNode(slowNode, 5073))
+  {
+    return NDBT_FAILED;
+  }
+
+  {
+    int req[1] = {DumpStateOrd::DihStartLcpImmediately};
+    if (res.dumpStateOneNode(master, req, 1))
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("Giving LCP time to start...");
+
+  NdbSleep_SecSleep(10);
+
+  ndbout_c("Killing other victim node (%u)...", otherVictim);
+
+  if (res.restartOneDbNode(otherVictim, false, false, true))
+  {
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Killing Master node (%u)...", master);
+
+  if (res.restartOneDbNode(master, false, false, true))
+  {
+    return NDBT_FAILED;
+  }
+
+  /*
+     ndbout_c("Waiting for old Master node to enter NoStart state...");
+     if (res.waitNodesNoStart(&master, 1, 10))
+     return NDBT_FAILED;
+
+     ndbout_c("Starting old Master...");
+     if (res.startNodes(&master, 1))
+     return NDBT_FAILED;
+
+  */
+  ndbout_c("Waiting for some progress on old Master and other victim restart");
+  NdbSleep_SecSleep(15);
+
+  ndbout_c("Now removing error insert on slow node (%u)", slowNode);
+
+  if (res.insertErrorInNode(slowNode, 0))
+  {
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Now wait a while to check stability...");
+  NdbSleep_SecSleep(30);
+
+  if (res.getNodeStatus(master) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+  {
+    ndbout_c("Old Master needs kick to restart");
+    if (res.startNodes(&master, 1))
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("Wait for cluster recovery...");
+  if (res.waitClusterStarted())
+  {
+    return NDBT_FAILED;
+  }
+
+
+  ndbout_c("Done");
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testNodeRestart);
 TESTCASE("NoLoad", 
 	 "Test that one node at a time can be stopped and then restarted "\
@@ -5288,6 +5407,11 @@ TESTCASE("Bug57522", "")
 {
   INITIALIZER(runBug57522);
 }
+TESTCASE("MasterFailSlowLCP",
+         "DIH Master failure during a slow LCP can cause a crash.")
+{
+  INITIALIZER(runMasterFailSlowLCP);
+}
 TESTCASE("ForceStopAndRestart", "Test restart and stop -with force flag")
 {
   STEP(runForceStopAndRestart);

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-14 13:24:26 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-28 14:17:25 +0000
@@ -1835,3 +1835,8 @@ max-time 1800
 cmd: testNdbApi
 args: -n TestFragmentedSend T1
 
+max-time: 300
+cmd: testNodeRestart
+args: -nMasterFailSlowLCP T1
+
+

=== modified file 'storage/ndb/test/src/HugoQueries.cpp'
--- a/storage/ndb/test/src/HugoQueries.cpp	2011-10-21 08:59:23 +0000
+++ b/storage/ndb/test/src/HugoQueries.cpp	2011-10-28 08:47:01 +0000
@@ -219,11 +219,51 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
       pTrans->close();
       return NDBT_FAILED;
     }
+#if 0
+    // Disabled, as this is incorrectly handled in SPJ API, will fix soon
+    else
+    {
+      /**
+       * If ::execute() didn't fail, there should not be an error on
+       * its NdbError object either:
+       */
+      const NdbError err = pTrans->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        ndbout_c("API INCONSISTENCY: NdbTransaction returned NdbError even if ::execute() succeeded");
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
+#endif
 
+    bool retry = false;
     for (int b = 0; b<batch; b++)
     {
       NdbQuery * query = queries[b];
-      if (query->nextResult() == NdbQuery::NextResult_gotRow)
+
+      /**
+       * As NdbQuery is always 'dirty read' (impl. limitations), 'AbortOnError'
+       * is ignored and handled as 'IgnoreError'. We will therefore not get
+       * errors returned from ::execute() or set into 'pTrans->getNdbError()':
+       * Has to check for errors on the NdbQuery object instead:
+       */
+      const NdbError& err = query->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          retry = true;
+          break;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+
+      const NdbQuery::NextResultOutcome stat = query->nextResult();
+      if (stat == NdbQuery::NextResult_gotRow)
       {
         for (unsigned o = 0; o<m_ops.size(); o++)
         {
@@ -239,7 +279,26 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
           }
         }
       }
+      else if (stat == NdbQuery::NextResult_error)
+      {
+        const NdbError& err = query->getNdbError();
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          retry = true;
+          break;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
+    if (retry)
+    {
+      NdbSleep_MilliSleep(50);
+      retryAttempt++;
+      continue;
     }
+
     pTrans->close();
     r += batch;
 
@@ -312,6 +371,44 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       pTrans->close();
       return NDBT_FAILED;
     }
+    else
+    {
+      // Disabled, as this is incorrectly handled in SPJ API, will fix soon
+#if 0
+      /**
+       * If ::execute() didn't fail, there should not be an error on
+       * its NdbError object either:
+       */
+      const NdbError err = pTrans->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        ndbout_c("API INCONSISTENCY: NdbTransaction returned NdbError even if ::execute() succeeded");
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+#endif
+
+      /**
+       * As NdbQuery is always 'dirty read' (impl. limitations), 'AbortOnError'
+       * is ignored and handled as 'IgnoreError'. We will therefore not get
+       * errors returned from ::execute() or set into 'pTrans->getNdbError()':
+       * Has to check for errors on the NdbQuery object instead:
+       */
+      NdbError err = query->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          NdbSleep_MilliSleep(50);
+          retryAttempt++;
+          continue;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
 
     int r = rand() % 100;
     if (r < abort && ((r & 1) == 0))

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-10-05 13:41:47 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-10-28 11:52:35 +0000
@@ -73,7 +73,11 @@ MYSQL_ADD_EXECUTABLE(ndb_config
 TARGET_LINK_LIBRARIES(ndb_config ndbmgmclient ndbconf)
 
 # Build ndbinfo_sql and run it to create ndbinfo.sql
-ADD_EXECUTABLE(ndbinfo_sql ndbinfo_sql.cpp)
+ADD_EXECUTABLE(ndbinfo_sql
+  ndbinfo_sql.cpp
+  ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
+  ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
+)
 TARGET_LINK_LIBRARIES(ndbinfo_sql ndbclient)
 GET_TARGET_PROPERTY(NDBINFO_SQL_EXE ndbinfo_sql LOCATION)
 ADD_CUSTOM_COMMAND(OUTPUT ${PROJECT_SOURCE_DIR}/storage/ndb/tools/ndbinfo.sql

=== modified file 'storage/ndb/tools/Makefile.am'
--- a/storage/ndb/tools/Makefile.am	2011-10-05 13:41:47 +0000
+++ b/storage/ndb/tools/Makefile.am	2011-10-28 11:52:35 +0000
@@ -70,7 +70,9 @@ ndb_restore_LDADD = $(top_builddir)/stor
 ndbinfo_sql_SOURCES = ndbinfo_sql.cpp \
 	../src/mgmsrv/Config.cpp \
 	../src/mgmsrv/InitConfigFileParser.cpp \
-        $(top_srcdir)/storage/ndb/src/kernel/vm/mt_thr_config.cpp
+        $(top_srcdir)/storage/ndb/src/kernel/vm/mt_thr_config.cpp \
+        $(top_srcdir)/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp \
+        $(top_srcdir)/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
 
 ndbinfo_sql_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/mgmapi
 

=== modified file 'storage/ndb/tools/ndbinfo_sql.cpp'
--- a/storage/ndb/tools/ndbinfo_sql.cpp	2011-10-07 17:15:53 +0000
+++ b/storage/ndb/tools/ndbinfo_sql.cpp	2011-10-28 11:52:35 +0000
@@ -50,12 +50,12 @@ struct view {
     "used, total, high, entry_size, cp1.param_name AS param_name1, "
     "cp2.param_name AS param_name2, cp3.param_name AS param_name3, "
     "cp4.param_name AS param_name4 "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>pools p "
-    "LEFT JOIN <NDBINFO_DB>.blocks b ON p.block_number = b.block_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp1 ON p.config_param1 = cp1.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp2 ON p.config_param2 = cp2.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp3 ON p.config_param3 = cp3.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp4 ON p.config_param4 = cp4.param_number"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>pools` p "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b ON p.block_number = b.block_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp1 ON p.config_param1 = cp1.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp2 ON p.config_param2 = cp2.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp3 ON p.config_param3 = cp3.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp4 ON p.config_param4 = cp4.param_number"
   },
 #endif
   { "transporters",
@@ -67,7 +67,7 @@ struct view {
     "  WHEN 3 THEN \"DISCONNECTING\""
     "  ELSE NULL "
     " END AS status "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>transporters"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>transporters`"
   },
   { "logspaces",
     "SELECT node_id, "
@@ -77,7 +77,7 @@ struct view {
     "  ELSE NULL "
     " END AS log_type, "
     "log_id, log_part, total, used "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>logspaces"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>logspaces`"
   },
   { "logbuffers",
     "SELECT node_id, "
@@ -87,7 +87,7 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS log_type, "
     "log_id, log_part, total, used "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>logbuffers"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>logbuffers`"
   },
   { "resources",
     "SELECT node_id, "
@@ -105,9 +105,9 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS resource_name, "
     "reserved, used, max "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>resources"
-   },
-   { "counters",
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>resources`"
+  },
+  { "counters",
     "SELECT node_id, b.block_name, block_instance, "
     "counter_id, "
     "CASE counter_id"
@@ -137,11 +137,11 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS counter_name, "
     "val "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>counters c "
-    "LEFT JOIN <NDBINFO_DB>.blocks b "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>counters` c "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b "
     "ON c.block_number = b.block_number"
-   },
-   { "nodes",
+  },
+  { "nodes",
     "SELECT node_id, "
     "uptime, "
     "CASE status"
@@ -158,8 +158,8 @@ struct view {
     " END AS status, "
     "start_phase, "
     "config_generation "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>nodes"
-   },
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>nodes`"
+  },
   { "memoryusage",
     "SELECT node_id,"
     "  pool_name AS memory_type,"
@@ -167,17 +167,91 @@ struct view {
     "  SUM(used) AS used_pages,"
     "  SUM(total*entry_size) AS total,"
     "  SUM(total) AS total_pages "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>pools "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>pools` "
     "WHERE block_number IN (248, 254) AND "
     "  (pool_name = \"Index memory\" OR pool_name = \"Data memory\") "
     "GROUP BY node_id, memory_type"
   },
-   { "diskpagebuffer",
+  { "diskpagebuffer",
      "SELECT node_id, block_instance, "
      "pages_written, pages_written_lcp, pages_read, log_waits, "
      "page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io "
-     "FROM <NDBINFO_DB>.<TABLE_PREFIX>diskpagebuffer"
-   }
+     "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>diskpagebuffer`"
+  },
+  { "diskpagebuffer",
+     "SELECT node_id, block_instance, "
+     "pages_written, pages_written_lcp, pages_read, log_waits, "
+     "page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io "
+     "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>diskpagebuffer`"
+  },
+  { "threadblocks",
+    "SELECT t.node_id, t.thr_no, b.block_name, t.block_instance "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>threadblocks` t "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b "
+    "ON t.block_number = b.block_number"
+  },
+  { "threadstat",
+    "SELECT * from `<NDBINFO_DB>`.`<TABLE_PREFIX>threadstat`"
+  },
+  { "cluster_transactions",
+    "SELECT"
+    " t.node_id,"
+    " t.block_instance,"
+    " t.transid0 + (t.transid1 << 32) as transid,"
+    " s.state_friendly_name as state, "
+    " t.c_ops as count_operations, "
+    " t.outstanding as outstanding_operations, "
+    " t.timer as inactive_seconds, "
+    " (t.apiref & 65535) as client_node_id, "
+    " (t.apiref >> 16) as client_block_ref "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>transactions` t"
+    " LEFT JOIN `<NDBINFO_DB>`.`<TABLE_PREFIX>dbtc_apiconnect_state` s"
+    "        ON s.state_int_value = t.state"
+  },
+  { "server_transactions",
+    "SELECT map.mysql_connection_id, t.*"
+    "FROM information_schema.ndb_transid_mysql_connection_map map "
+    "JOIN `<NDBINFO_DB>`.cluster_transactions t "
+    "  ON (map.ndb_transid >> 32) = (t.transid >> 32)"
+  },
+  { "cluster_operations",
+    "SELECT"
+    " o.node_id,"
+    " o.block_instance,"
+    " o.transid0 + (o.transid1 << 32) as transid,"
+    " case o.op "
+    " when 1 then \"READ\""
+    " when 2 then \"READ-SH\""
+    " when 3 then \"READ-EX\""
+    " when 4 then \"INSERT\""
+    " when 5 then \"UPDATE\""
+    " when 6 then \"DELETE\""
+    " when 7 then \"WRITE\""
+    " when 8 then \"UNLOCK\""
+    " when 9 then \"REFRESH\""
+    " when 257 then \"SCAN\""
+    " when 258 then \"SCAN-SH\""
+    " when 259 then \"SCAN-EX\""
+    " ELSE \"<unknown>\""
+    " END as operation_type, "
+    " s.state_friendly_name as state, "
+    " o.tableid, "
+    " o.fragmentid, "
+    " (o.apiref & 65535) as client_node_id, "
+    " (o.apiref >> 16) as client_block_ref, "
+    " (o.tcref & 65535) as tc_node_id, "
+    " ((o.tcref >> 16) & 511) as tc_block_no, "
+    " ((o.tcref >> (16 + 9)) & 127) as tc_block_instance "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>operations` o"
+    " LEFT JOIN `<NDBINFO_DB>`.`<TABLE_PREFIX>dblqh_tcconnect_state` s"
+    "        ON s.state_int_value = o.state"
+  },
+  { "server_operations",
+    "SELECT map.mysql_connection_id, o.* "
+    "FROM `<NDBINFO_DB>`.cluster_operations o "
+    "JOIN information_schema.ndb_transid_mysql_connection_map map"
+    "  ON (map.ndb_transid >> 32) = (o.transid >> 32)"
+  }
 };
 
 size_t num_views = sizeof(views)/sizeof(views[0]);
@@ -214,6 +288,38 @@ static void fill_blocks(BaseString& sql)
   }
 }
 
+#include "kernel/statedesc.hpp"
+
+static void fill_dbtc_apiconnect_state(BaseString& sql)
+{
+  const char* separator = "";
+  for (unsigned i = 0; g_dbtc_apiconnect_state_desc[i].name != 0; i++)
+  {
+    sql.appfmt("%s(%u, \"%s\", \"%s\", \"%s\")",
+               separator,
+               g_dbtc_apiconnect_state_desc[i].value,
+               g_dbtc_apiconnect_state_desc[i].name,
+               g_dbtc_apiconnect_state_desc[i].friendly_name,
+               g_dbtc_apiconnect_state_desc[i].description);
+    separator = ", ";
+  }
+}
+
+static void fill_dblqh_tcconnect_state(BaseString& sql)
+{
+  const char* separator = "";
+  for (unsigned i = 0; g_dblqh_tcconnect_state_desc[i].name != 0; i++)
+  {
+    sql.appfmt("%s(%u, \"%s\", \"%s\", \"%s\")",
+               separator,
+               g_dblqh_tcconnect_state_desc[i].value,
+               g_dblqh_tcconnect_state_desc[i].name,
+               g_dblqh_tcconnect_state_desc[i].friendly_name,
+               g_dblqh_tcconnect_state_desc[i].description);
+    separator = ", ";
+  }
+}
+
 struct lookup {
   const char* name;
   const char* columns;
@@ -224,12 +330,28 @@ struct lookup {
     "block_number INT UNSIGNED PRIMARY KEY, "
     "block_name VARCHAR(512)",
     &fill_blocks
-   },
+  },
   { "config_params",
     "param_number INT UNSIGNED PRIMARY KEY, "
     "param_name VARCHAR(512)",
     &fill_config_params
-   }
+  },
+  {
+    "<TABLE_PREFIX>dbtc_apiconnect_state",
+    "state_int_value  INT UNSIGNED PRIMARY KEY, "
+    "state_name VARCHAR(256), "
+    "state_friendly_name VARCHAR(256), "
+    "state_description VARCHAR(256)",
+    &fill_dbtc_apiconnect_state
+  },
+  {
+    "<TABLE_PREFIX>dblqh_tcconnect_state",
+    "state_int_value  INT UNSIGNED PRIMARY KEY, "
+    "state_name VARCHAR(256), "
+    "state_friendly_name VARCHAR(256), "
+    "state_description VARCHAR(256)",
+    &fill_dblqh_tcconnect_state
+  }
 };
 
 size_t num_lookups = sizeof(lookups)/sizeof(lookups[0]);
@@ -339,7 +461,7 @@ int main(int argc, char** argv){
   printf("# Drop any old views in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_views; i++)
   {
-    sql.assfmt("DROP VIEW IF EXISTS %s.%s",
+    sql.assfmt("DROP VIEW IF EXISTS `%s`.`%s`",
                opt_ndbinfo_db, views[i].name);
     print_conditional_sql(sql);
   }
@@ -347,8 +469,10 @@ int main(int argc, char** argv){
   printf("# Drop any old lookup tables in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_lookups; i++)
   {
-    sql.assfmt("DROP TABLE IF EXISTS %s.%s",
-               opt_ndbinfo_db, lookups[i].name);
+    BaseString table_name = replace_tags(lookups[i].name);
+
+    sql.assfmt("DROP TABLE IF EXISTS `%s`.`%s`",
+               opt_ndbinfo_db, table_name.c_str());
     print_conditional_sql(sql);
   }
 
@@ -409,16 +533,17 @@ int main(int argc, char** argv){
   for (size_t i = 0; i < num_lookups; i++)
   {
     lookup l = lookups[i];
-    printf("# %s.%s\n", opt_ndbinfo_db, l.name);
+    BaseString table_name = replace_tags(l.name);
+    printf("# %s.%s\n", opt_ndbinfo_db, table_name.c_str());
 
     /* Create lookup table */
     sql.assfmt("CREATE TABLE `%s`.`%s` (%s)",
-               opt_ndbinfo_db, l.name, l.columns);
+               opt_ndbinfo_db, table_name.c_str(), l.columns);
     print_conditional_sql(sql);
 
     /* Insert data */
     sql.assfmt("INSERT INTO `%s`.`%s` VALUES ",
-               opt_ndbinfo_db, l.name);
+               opt_ndbinfo_db, table_name.c_str());
     l.fill(sql);
     print_conditional_sql(sql);
   }

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.1 branch (martin.zaun:4316 to 4325) Martin Zaun1 Nov