List:Commits« Previous MessageNext Message »
From:jonas oreland Date:October 18 2011 12:47pm
Subject:bzr push into mysql-5.1-telco-7.0-llcp branch (jonas.oreland:3692 to 3693)
View as plain text  
 3693 jonas oreland	2011-10-18 [merge]
      ndb - merge 70-tip into 70-jonas (llcp)

    added:
      mysql-test/include/not_ndb_is.inc
      mysql-test/suite/ndb_big/bug37983-master.opt
      mysql-test/suite/ndb_big/disabled.def
      storage/ndb/include/portlib/NdbGetRUsage.h
      storage/ndb/src/common/portlib/NdbGetRUsage.cpp
      storage/ndb/src/kernel/blocks/thrman.cpp
      storage/ndb/src/kernel/blocks/thrman.hpp
      storage/ndb/test/run-test/conf-daily-perf.cnf
      storage/ndb/test/run-test/daily-perf-tests.txt
      storage/ndb/tools/ndbinfo_select_all.cpp
    modified:
      client/mysqlbinlog.cc
      mysql-test/mysql-test-run.pl
      mysql-test/r/information_schema.result
      mysql-test/r/information_schema_db.result
      mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_db_filter.test
      mysql-test/suite/funcs_1/r/is_columns_is.result
      mysql-test/suite/funcs_1/r/is_tables_is.result
      mysql-test/suite/funcs_1/t/is_columns_is.test
      mysql-test/suite/funcs_1/t/is_tables_is.test
      mysql-test/suite/ndb/r/ndb_alter_table.result
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_index_stat.result
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/r/ndbinfo_dump.result
      mysql-test/suite/ndb/t/ndb_alter_table.test
      mysql-test/t/information_schema.test
      mysql-test/t/information_schema_db.test
      mysql-test/t/mysqlshow.test
      sql/ha_ndb_index_stat.cc
      sql/ha_ndb_index_stat.h
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_connection.cc
      sql/ha_ndbinfo.cc
      sql/ha_ndbinfo.h
      sql/sql_parse.cc
      storage/ndb/include/kernel/BlockNumbers.h
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/kernel/signaldata/SchemaTrans.hpp
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/include/ndb_constants.h
      storage/ndb/include/ndbapi/Ndb.hpp
      storage/ndb/src/common/debugger/BlockNames.cpp
      storage/ndb/src/common/debugger/signaldata/ScanTab.cpp
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/portlib/Makefile.am
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/kernel/SimBlockList.cpp
      storage/ndb/src/kernel/blocks/CMakeLists.txt
      storage/ndb/src/kernel/blocks/LocalProxy.cpp
      storage/ndb/src/kernel/blocks/LocalProxy.hpp
      storage/ndb/src/kernel/blocks/Makefile.am
      storage/ndb/src/kernel/blocks/PgmanProxy.cpp
      storage/ndb/src/kernel/blocks/PgmanProxy.hpp
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dbinfo/Dbinfo.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhProxy.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjProxy.hpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
      storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/blocks/record_types.hpp
      storage/ndb/src/kernel/blocks/tsman.cpp
      storage/ndb/src/kernel/ndbd.cpp
      storage/ndb/src/kernel/vm/DLFifoList.hpp
      storage/ndb/src/kernel/vm/DLHashTable.hpp
      storage/ndb/src/kernel/vm/DataBuffer2.hpp
      storage/ndb/src/kernel/vm/Ndbinfo.hpp
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
      storage/ndb/src/kernel/vm/Pool.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/dummy_nonmt.cpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/kernel/vm/mt.hpp
      storage/ndb/src/kernel/vm/mt_thr_config.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.hpp
      storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp
      storage/ndb/src/mgmsrv/ConfigInfo.cpp
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/NdbImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/HugoCalculator.hpp
      storage/ndb/test/include/HugoOperations.hpp
      storage/ndb/test/include/HugoQueryBuilder.hpp
      storage/ndb/test/ndbapi/Makefile.am
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/run-test/daily-devel-tests.txt
      storage/ndb/test/run-test/files.cpp
      storage/ndb/test/run-test/main.cpp
      storage/ndb/test/run-test/setup.cpp
      storage/ndb/test/src/HugoOperations.cpp
      storage/ndb/test/src/NDBT_Find.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndbinfo_sql.cpp
      tests/mysql_client_test.c
 3692 Jonas Oreland	2011-09-29 [merge]
      ndb - merge 70 into 70-llcp

    added:
      mysql-test/include/have_binlog_row_v2.inc
      mysql-test/r/have_binlog_row_v2.require
      mysql-test/suite/ndb/ndb_config_threadconfig.ini
      mysql-test/suite/ndb/r/ndb_alter_table_error.result
      mysql-test/suite/ndb/r/ndb_index_stat_partitions.result
      mysql-test/suite/ndb/t/ndb_alter_table_error.test
      mysql-test/suite/ndb/t/ndb_index_stat_partitions-master.opt
      mysql-test/suite/ndb/t/ndb_index_stat_partitions.test
      mysql-test/suite/ndb_binlog/r/ndb_binlog_log_transaction_id.result
      mysql-test/suite/ndb_binlog/t/ndb_binlog_get_row_extra_data.inc
      mysql-test/suite/ndb_binlog/t/ndb_binlog_log_transaction_id-master.opt
      mysql-test/suite/ndb_binlog/t/ndb_binlog_log_transaction_id.test
      mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_epoch_trans.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_trans.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_trans.test
      mysql-test/suite/ndb_rpl/t/ndb_trans_conflict_info.inc
      mysql-test/suite/ndb_rpl/t/ndb_trans_conflict_info_init.inc
      mysql-test/suite/ndb_rpl/t/ndb_trans_conflict_info_stable.inc
      mysql-test/suite/rpl/r/rpl_extra_row_data.result
      mysql-test/suite/rpl/t/rpl_extra_row_data-master.opt
      mysql-test/suite/rpl/t/rpl_extra_row_data-slave.opt
      mysql-test/suite/rpl/t/rpl_extra_row_data.test
      sql/ndb_conflict_trans.cc
      sql/ndb_conflict_trans.h
      storage/ndb/include/util/HashMap2.hpp
      storage/ndb/include/util/LinkedStack.hpp
      storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.cpp
      storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.h
      storage/ndb/src/common/util/HashMap2.cpp
      storage/ndb/src/common/util/LinkedStack.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.hpp
      storage/ndb/src/ndbapi/NdbWaitGroup.cpp
      storage/ndb/src/ndbapi/NdbWaitGroup.hpp
      storage/ndb/src/ndbapi/WakeupHandler.cpp
      storage/ndb/src/ndbapi/WakeupHandler.hpp
      storage/ndb/test/ndbapi/testAsynchMultiwait.cpp
    modified:
      configure.in
      libmysqld/Makefile.am
      mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
      mysql-test/include/show_binlog_events2.inc
      mysql-test/include/show_binlog_using_logname.inc
      mysql-test/r/ctype_cp932_binlog_stm.result
      mysql-test/r/flush2.result
      mysql-test/r/sp_trans_log.result
      mysql-test/suite/binlog/r/binlog_database.result
      mysql-test/suite/binlog/r/binlog_innodb.result
      mysql-test/suite/binlog/r/binlog_innodb_row.result
      mysql-test/suite/binlog/r/binlog_mixed_failure_mixing_engines.result
      mysql-test/suite/binlog/r/binlog_mixed_load_data.result
      mysql-test/suite/binlog/r/binlog_multi_engine.result
      mysql-test/suite/binlog/r/binlog_row_binlog.result
      mysql-test/suite/binlog/r/binlog_row_ctype_ucs.result
      mysql-test/suite/binlog/r/binlog_row_insert_select.result
      mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result
      mysql-test/suite/binlog/r/binlog_stm_binlog.result
      mysql-test/suite/binlog/r/binlog_stm_row.result
      mysql-test/suite/binlog/t/binlog_incident.test
      mysql-test/suite/binlog/t/binlog_killed.test
      mysql-test/suite/binlog/t/binlog_killed_simulate.test
      mysql-test/suite/ndb/my.cnf
      mysql-test/suite/ndb/r/ndb_auto_increment.result
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_config.result
      mysql-test/suite/ndb/r/ndb_discover_db.result
      mysql-test/suite/ndb/r/ndb_index_stat.result
      mysql-test/suite/ndb/r/ndb_mgm.result
      mysql-test/suite/ndb/r/ndb_statistics1.result
      mysql-test/suite/ndb/t/ndb_auto_increment.test
      mysql-test/suite/ndb/t/ndb_basic.test
      mysql-test/suite/ndb/t/ndb_config.test
      mysql-test/suite/ndb/t/ndb_index_stat.test
      mysql-test/suite/ndb/t/ndb_mgm.test
      mysql-test/suite/ndb_binlog/my.cnf
      mysql-test/suite/ndb_binlog/r/ndb_binlog_ddl_multi.result
      mysql-test/suite/ndb_binlog/r/ndb_binlog_discover.result
      mysql-test/suite/ndb_binlog/r/ndb_binlog_log_bin.result
      mysql-test/suite/ndb_binlog/r/ndb_binlog_multi.result
      mysql-test/suite/ndb_binlog/r/ndb_binlog_restore.result
      mysql-test/suite/ndb_rpl/my.cnf
      mysql-test/suite/ndb_rpl/r/ndb_rpl_circular.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_epoch.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_basic.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_multi.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_lsu.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_lsu_anyval.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_restart.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_ui.result
      mysql-test/suite/ndb_rpl/r/rpl_truncate_7ndb.result
      mysql-test/suite/ndb_rpl/r/rpl_truncate_7ndb_2.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_1.inc
      mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_multi.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_restart.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ui.test
      mysql-test/suite/ndb_team/my.cnf
      mysql-test/suite/rpl/r/rpl_auto_increment_update_failure.result
      mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result
      mysql-test/suite/rpl/r/rpl_rbr_to_sbr.result
      mysql-test/suite/rpl/r/rpl_row_basic_11bugs.result
      mysql-test/suite/rpl/r/rpl_row_conflicts.result
      mysql-test/suite/rpl/r/rpl_row_log.result
      mysql-test/suite/rpl/r/rpl_row_log_innodb.result
      mysql-test/suite/rpl/r/rpl_slave_skip.result
      mysql-test/suite/rpl/r/rpl_temp_table_mix_row.result
      mysql-test/suite/rpl/t/rpl_row_4_bytes-master.opt
      mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
      mysql-test/suite/rpl/t/rpl_row_mysqlbinlog.test
      mysql-test/suite/rpl/t/rpl_stm_flsh_tbls.test
      mysql-test/suite/rpl_ndb/my.cnf
      mysql-test/t/ctype_cp932_binlog_stm.test
      mysql-test/t/mysqlbinlog2.test
      sql/Makefile.am
      sql/ha_ndb_index_stat.cc
      sql/ha_ndb_index_stat.h
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbinfo.cc
      sql/log_event.cc
      sql/log_event.h
      sql/log_event_old.h
      sql/mysql_priv.h
      sql/mysqld.cc
      sql/ndb_mi.cc
      sql/ndb_mi.h
      sql/rpl_constants.h
      sql/rpl_injector.cc
      sql/rpl_injector.h
      sql/set_var.cc
      sql/slave.h
      sql/sql_class.cc
      sql/sql_class.h
      storage/ndb/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/QueryTree.hpp
      storage/ndb/include/kernel/signaldata/TcKeyReq.hpp
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/include/mgmcommon/ConfigRetriever.hpp
      storage/ndb/include/ndbapi/Ndb.hpp
      storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
      storage/ndb/include/portlib/NdbMutex.h
      storage/ndb/include/transporter/TransporterCallback.hpp
      storage/ndb/include/util/SparseBitmask.hpp
      storage/ndb/ndb_configure.m4
      storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/portlib/Makefile.am
      storage/ndb/src/common/portlib/NdbCondition.c
      storage/ndb/src/common/portlib/NdbMutex.c
      storage/ndb/src/common/portlib/NdbTCP.cpp
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/common/transporter/TransporterRegistry.cpp
      storage/ndb/src/common/util/CMakeLists.txt
      storage/ndb/src/common/util/Makefile.am
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/kernel/SimBlockList.cpp
      storage/ndb/src/kernel/angel.cpp
      storage/ndb/src/kernel/blocks/LocalProxy.cpp
      storage/ndb/src/kernel/blocks/LocalProxy.hpp
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjProxy.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp
      storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/ndbd.cpp
      storage/ndb/src/kernel/vm/ArrayPool.hpp
      storage/ndb/src/kernel/vm/CMakeLists.txt
      storage/ndb/src/kernel/vm/Configuration.cpp
      storage/ndb/src/kernel/vm/Configuration.hpp
      storage/ndb/src/kernel/vm/Emulator.cpp
      storage/ndb/src/kernel/vm/GlobalData.hpp
      storage/ndb/src/kernel/vm/Makefile.am
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/dummy_mt.cpp
      storage/ndb/src/kernel/vm/dummy_nonmt.cpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/kernel/vm/mt.hpp
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmsrv/CMakeLists.txt
      storage/ndb/src/mgmsrv/ConfigInfo.cpp
      storage/ndb/src/mgmsrv/Makefile.am
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.hpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/src/mgmsrv/Services.hpp
      storage/ndb/src/ndbapi/API.hpp
      storage/ndb/src/ndbapi/CMakeLists.txt
      storage/ndb/src/ndbapi/Makefile.am
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/NdbImpl.hpp
      storage/ndb/src/ndbapi/NdbIndexStat.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.hpp
      storage/ndb/src/ndbapi/NdbQueryOperationImpl.hpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/src/ndbapi/Ndbif.cpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/src/ndbapi/TransporterFacade.hpp
      storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
      storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/src/ndbapi/trp_client.cpp
      storage/ndb/src/ndbapi/trp_client.hpp
      storage/ndb/test/include/NdbMgmd.hpp
      storage/ndb/test/ndbapi/CMakeLists.txt
      storage/ndb/test/ndbapi/Makefile.am
      storage/ndb/test/ndbapi/testBlobs.cpp
      storage/ndb/test/ndbapi/testMgm.cpp
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/test/ndbapi/testRestartGci.cpp
      storage/ndb/test/ndbapi/testSpj.cpp
      storage/ndb/test/ndbapi/test_event.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/tools/CMakeLists.txt
      storage/ndb/test/tools/connect.cpp
      storage/ndb/test/tools/hugoJoin.cpp
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndb_config.cpp
=== modified file 'client/mysqlbinlog.cc'
--- a/client/mysqlbinlog.cc	2011-06-30 15:59:25 +0000
+++ b/client/mysqlbinlog.cc	2011-10-06 14:43:17 +0000
@@ -900,6 +900,11 @@ Exit_status process_event(PRINT_EVENT_IN
     case WRITE_ROWS_EVENT:
     case DELETE_ROWS_EVENT:
     case UPDATE_ROWS_EVENT:
+#ifndef MCP_WL5353
+    case WRITE_ROWS_EVENT_V1:
+    case DELETE_ROWS_EVENT_V1:
+    case UPDATE_ROWS_EVENT_V1:
+#endif
     case PRE_GA_WRITE_ROWS_EVENT:
     case PRE_GA_DELETE_ROWS_EVENT:
     case PRE_GA_UPDATE_ROWS_EVENT:

=== added file 'mysql-test/include/not_ndb_is.inc'
--- a/mysql-test/include/not_ndb_is.inc	1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/not_ndb_is.inc	2011-10-17 14:16:56 +0000
@@ -0,0 +1,27 @@
+#
+# Check if cluster is available by selecting from is.engines
+# if an error about no such table occurs bail out
+#
+
+disable_result_log;
+disable_query_log;
+
+--error 0, 1109
+select @have_ndb_is:= count(*) from information_schema.plugins
+where plugin_name like '%ndb%'
+  and PLUGIN_TYPE = 'INFORMATION SCHEMA';
+
+
+if ($mysql_errno){
+  # For backward compatibility, implement old fashioned way
+  # to check here ie. use SHOW VARIABLES LIKE "have_ndb"
+  die Can not determine if server supports ndb without is.engines table;
+}
+
+
+if (`select @have_ndb_is`){
+  skip NDB information schema table installed;
+}
+
+enable_query_log;
+enable_result_log;

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2011-07-06 09:20:17 +0000
+++ b/mysql-test/mysql-test-run.pl	2011-10-10 14:28:05 +0000
@@ -1502,6 +1502,10 @@ sub command_line_setup {
 
     mtr_report("Running valgrind with options \"",
 	       join(" ", @valgrind_args), "\"");
+
+    # Turn off check testcases to save time 
+    mtr_report("Turning off --check-testcases to save time when valgrinding");
+    $opt_check_testcases = 0;
   }
 
   mtr_report("Checking supported features...");

=== modified file 'mysql-test/r/information_schema.result'
--- a/mysql-test/r/information_schema.result	2011-03-29 14:09:05 +0000
+++ b/mysql-test/r/information_schema.result	2011-10-17 14:16:56 +0000
@@ -39,8 +39,7 @@ insert into t5 values (10);
 create view v1 (c) as
 SELECT table_name FROM information_schema.TABLES
 WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
-table_name<>'ndb_binlog_index' AND
-table_name<>'ndb_apply_status';
+table_name not like 'ndb%';
 select * from v1;
 c
 CHARACTER_SETS
@@ -850,7 +849,7 @@ VIEWS	TABLE_NAME	select
 delete from mysql.user where user='mysqltest_4';
 delete from mysql.db where user='mysqltest_4';
 flush privileges;
-SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
+SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name not like 'ndb%' GROUP BY TABLE_SCHEMA;
 table_schema	count(*)
 information_schema	28
 mysql	22
@@ -1230,7 +1229,8 @@ INNER JOIN
 information_schema.columns c1
 ON t.table_schema = c1.table_schema AND
 t.table_name = c1.table_name
-WHERE t.table_schema = 'information_schema' AND
+WHERE t.table_name not like 'ndb%' AND
+t.table_schema = 'information_schema' AND
 c1.ordinal_position =
 ( SELECT COALESCE(MIN(c2.ordinal_position),1)
 FROM information_schema.columns c2
@@ -1273,7 +1273,8 @@ INNER JOIN
 information_schema.columns c1
 ON t.table_schema = c1.table_schema AND
 t.table_name = c1.table_name
-WHERE t.table_schema = 'information_schema' AND
+WHERE t.table_name not like 'ndb%' AND
+t.table_schema = 'information_schema' AND
 c1.ordinal_position =
 ( SELECT COALESCE(MIN(c2.ordinal_position),1)
 FROM information_schema.columns c2
@@ -1365,7 +1366,8 @@ count(*) as num1
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+t.table_schema = 'information_schema' and
 c1.ordinal_position =
 (select isnull(c2.column_type) -
 isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/r/information_schema_db.result'
--- a/mysql-test/r/information_schema_db.result	2010-11-30 17:51:25 +0000
+++ b/mysql-test/r/information_schema_db.result	2011-10-17 14:16:56 +0000
@@ -3,7 +3,7 @@ drop view if exists v1,v2;
 drop function if exists f1;
 drop function if exists f2;
 use INFORMATION_SCHEMA;
-show tables;
+show tables where Tables_in_information_schema NOT LIKE 'ndb%';
 Tables_in_information_schema
 CHARACTER_SETS
 COLLATIONS

=== modified file 'mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_db_filter.test'
--- a/mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_db_filter.test	2009-09-30 02:31:25 +0000
+++ b/mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_db_filter.test	2011-10-06 14:43:17 +0000
@@ -140,6 +140,17 @@ while($i)
     -- echo **** ERROR **** Database name 'b42941' NOT FOUND in mysqlbinlog output ($flags $outfile.2).
   }
 
+  #bug #13067813
+  if (`SELECT INSTR(@b42941_output.1, 'unknown table')`)
+  {
+    -- echo **** ERROR **** Table mapping failure detected
+  }
+
+  if (`SELECT INSTR(@b42941_output.2, 'unknown table')`)
+  {
+    -- echo **** ERROR **** Table mapping failure detected
+  }
+
   dec $i;
 }
 

=== modified file 'mysql-test/suite/funcs_1/r/is_columns_is.result'
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result	2010-10-06 10:06:47 +0000
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result	2011-10-18 07:22:32 +0000
@@ -1,6 +1,6 @@
 SELECT * FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema, table_name, column_name;
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	ORDINAL_POSITION	COLUMN_DEFAULT	IS_NULLABLE	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	NUMERIC_PRECISION	NUMERIC_SCALE	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE	COLUMN_KEY	EXTRA	PRIVILEGES	COLUMN_COMMENT	STORAGE	FORMAT
 NULL	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	1		NO	varchar	32	96	NULL	NULL	utf8	utf8_general_ci	varchar(32)			select		Default	Default
@@ -312,7 +312,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH = 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -324,7 +324,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH <> 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -336,7 +336,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH IS NULL
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -357,7 +357,7 @@ COLLATION_NAME,
 COLUMN_TYPE
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY TABLE_SCHEMA, TABLE_NAME, ORDINAL_POSITION;
 COL_CML	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE
 3.0000	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	varchar	32	96	utf8	utf8_general_ci	varchar(32)

=== modified file 'mysql-test/suite/funcs_1/r/is_tables_is.result'
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result	2008-06-18 17:23:55 +0000
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result	2011-10-18 07:22:32 +0000
@@ -11,7 +11,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	NULL
 TABLE_SCHEMA	information_schema
@@ -649,7 +649,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling'
+AND table_name <> 'profiling' and table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	NULL
 TABLE_SCHEMA	information_schema

=== modified file 'mysql-test/suite/funcs_1/t/is_columns_is.test'
--- a/mysql-test/suite/funcs_1/t/is_columns_is.test	2008-06-16 18:39:58 +0000
+++ b/mysql-test/suite/funcs_1/t/is_columns_is.test	2011-10-18 07:22:32 +0000
@@ -18,5 +18,5 @@
 --source include/not_embedded.inc
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling';
+AND table_name <> 'profiling' and table_name not like 'ndb%';
 --source suite/funcs_1/datadict/columns.inc

=== modified file 'mysql-test/suite/funcs_1/t/is_tables_is.test'
--- a/mysql-test/suite/funcs_1/t/is_tables_is.test	2008-03-07 16:33:07 +0000
+++ b/mysql-test/suite/funcs_1/t/is_tables_is.test	2011-10-18 07:22:32 +0000
@@ -13,6 +13,6 @@
 #
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling';
+AND table_name <> 'profiling' and table_name not like 'ndb%';
 --source suite/funcs_1/datadict/tables1.inc
 

=== modified file 'mysql-test/suite/ndb/r/ndb_alter_table.result'
--- a/mysql-test/suite/ndb/r/ndb_alter_table.result	2009-07-13 10:51:28 +0000
+++ b/mysql-test/suite/ndb/r/ndb_alter_table.result	2011-10-07 14:28:37 +0000
@@ -283,7 +283,7 @@ c108 int(11) not null,
 c109 int(11) not null,
 primary key (ai),
 unique key tx1 (c002, c003, c004, c005)) engine=ndb;
-create index tx2 
+create index tx2
 on t1 (c010, c011, c012, c013);
 drop table t1;
 create table t1 (a int primary key auto_increment, b int) engine=ndb;
@@ -395,4 +395,136 @@ a	b
 2	200
 1	300
 drop table t1;
+create table t1 (
+ci001 int, ci002 int, ci003 int, ci004 int, ci005 int, ci006 int, ci007 int, ci008 int,
+ci009 int, ci010 int, ci011 int, ci012 int, ci013 int, ci014 int, ci015 int, ci016 int,
+ci017 int, ci018 int, ci019 int, ci020 int, ci021 int, ci022 int, ci023 int, ci024 int,
+ci025 int, ci026 int, ci027 int, ci028 int, ci029 int, ci030 int, ci031 int, ci032 int,
+ci033 int, ci034 int, ci035 int, ci036 int, ci037 int, ci038 int, ci039 int, ci040 int,
+ci041 int, ci042 int, ci043 int, ci044 int, ci045 int, ci046 int, ci047 int, ci048 int,
+ci049 int, ci050 int, ci051 int, ci052 int, ci053 int, ci054 int, ci055 int, ci056 int,
+ci057 int, ci058 int, ci059 int, ci060 int, ci061 int, ci062 int, ci063 int, ci064 int,
+ci065 int, ci066 int, ci067 int, ci068 int, ci069 int, ci070 int, ci071 int, ci072 int,
+ci073 int, ci074 int, ci075 int, ci076 int, ci077 int, ci078 int, ci079 int, ci080 int,
+ci081 int, ci082 int, ci083 int, ci084 int, ci085 int, ci086 int, ci087 int, ci088 int,
+ci089 int, ci090 int, ci091 int, ci092 int, ci093 int, ci094 int, ci095 int, ci096 int,
+ci097 int, ci098 int, ci099 int, ci100 int, ci101 int, ci102 int, ci103 int, ci104 int,
+ci105 int, ci106 int, ci107 int, ci108 int, ci109 int, ci110 int, ci111 int, ci112 int,
+ci113 int, ci114 int, ci115 int, ci116 int, ci117 int, ci118 int, ci119 int, ci120 int,
+ci121 int, ci122 int, ci123 int, ci124 int, ci125 int, ci126 int, ci127 int, ci128 int,
+ci129 int, ci130 int, ci131 int, ci132 int, ci133 int, ci134 int, ci135 int, ci136 int,
+ci137 int, ci138 int, ci139 int, ci140 int, ci141 int, ci142 int, ci143 int, ci144 int,
+ci145 int, ci146 int, ci147 int, ci148 int, ci149 int, ci150 int, ci151 int, ci152 int,
+ci153 int, ci154 int, ci155 int, ci156 int, ci157 int, ci158 int, ci159 int, ci160 int,
+ci161 int, ci162 int, ci163 int, ci164 int, ci165 int, ci166 int, ci167 int, ci168 int,
+ci169 int, ci170 int, ci171 int, ci172 int, ci173 int, ci174 int, ci175 int, ci176 int,
+ci177 int, ci178 int, ci179 int, ci180 int, ci181 int, ci182 int, ci183 int, ci184 int,
+ci185 int, ci186 int, ci187 int, ci188 int, ci189 int, ci190 int, ci191 int, ci192 int,
+ci193 int, ci194 int, ci195 int, ci196 int, ci197 int, ci198 int, ci199 int, ci200 int,
+ci201 int, ci202 int, ci203 int, ci204 int, ci205 int, ci206 int, ci207 int, ci208 int,
+ci209 int, ci210 int, ci211 int, ci212 int, ci213 int, ci214 int, ci215 int, ci216 int,
+ci217 int, ci218 int, ci219 int, ci220 int, ci221 int, ci222 int, ci223 int, ci224 int,
+ci225 int, ci226 int, ci227 int, ci228 int, ci229 int, ci230 int, ci231 int, ci232 int,
+ci233 int, ci234 int, ci235 int, ci236 int, ci237 int, ci238 int, ci239 int, ci240 int,
+ci241 int, ci242 int, ci243 int, ci244 int, ci245 int, ci246 int, ci247 int, ci248 int,
+ci249 int, ci250 int, ci251 int, ci252 int, ci253 int, ci254 int, ci255 int, ci256 int,
+ci257 int, ci258 int, ci259 int, ci260 int, ci261 int, ci262 int, ci263 int, ci264 int,
+ci265 int, ci266 int, ci267 int, ci268 int, ci269 int, ci270 int, ci271 int, ci272 int,
+ci273 int, ci274 int, ci275 int, ci276 int, ci277 int, ci278 int, ci279 int, ci280 int,
+ci281 int, ci282 int, ci283 int, ci284 int, ci285 int, ci286 int, ci287 int, ci288 int,
+ci289 int, ci290 int, ci291 int, ci292 int, ci293 int, ci294 int, ci295 int, ci296 int,
+ci297 int, ci298 int, ci299 int, ci300 int, ci301 int, ci302 int, ci303 int, ci304 int,
+ci305 int, ci306 int, ci307 int, ci308 int, ci309 int, ci310 int, ci311 int, ci312 int,
+ci313 int, ci314 int, ci315 int, ci316 int, ci317 int, ci318 int, ci319 int, ci320 int,
+ci321 int, ci322 int, ci323 int, ci324 int, ci325 int, ci326 int, ci327 int, ci328 int,
+ci329 int, ci330 int, ci331 int, ci332 int, ci333 int, ci334 int, ci335 int, ci336 int,
+ci337 int, ci338 int, ci339 int, ci340 int, ci341 int, ci342 int, ci343 int, ci344 int,
+ci345 int, ci346 int, ci347 int, ci348 int, ci349 int, ci350 int, ci351 int, ci352 int,
+ci353 int, ci354 int, ci355 int, ci356 int, ci357 int, ci358 int, ci359 int, ci360 int,
+ci361 int, ci362 int, ci363 int, ci364 int, ci365 int, ci366 int, ci367 int, ci368 int,
+ci369 int, ci370 int, ci371 int, ci372 int, ci373 int, ci374 int, ci375 int, ci376 int,
+ci377 int, ci378 int, ci379 int, ci380 int, ci381 int, ci382 int, ci383 int, ci384 int,
+ci385 int, ci386 int, ci387 int, ci388 int, ci389 int, ci390 int, ci391 int, ci392 int,
+ci393 int, ci394 int, ci395 int, ci396 int, ci397 int, ci398 int, ci399 int, ci400 int,
+ci401 int, ci402 int, ci403 int, ci404 int, ci405 int, ci406 int, ci407 int, ci408 int,
+ci409 int, ci410 int, ci411 int, ci412 int, ci413 int, ci414 int, ci415 int, ci416 int,
+ci417 int, ci418 int, ci419 int, ci420 int, ci421 int, ci422 int, ci423 int, ci424 int,
+ci425 int, ci426 int, ci427 int, ci428 int, ci429 int, ci430 int, ci431 int, ci432 int,
+ci433 int, ci434 int, ci435 int, ci436 int, ci437 int, ci438 int, ci439 int, ci440 int,
+ci441 int, ci442 int, ci443 int, ci444 int, ci445 int, ci446 int, ci447 int, ci448 int,
+ci449 int, ci450 int, ci451 int, ci452 int, ci453 int, ci454 int, ci455 int, ci456 int,
+ci457 int, ci458 int, ci459 int, ci460 int, ci461 int, ci462 int, ci463 int, ci464 int,
+ci465 int, ci466 int, ci467 int, ci468 int, ci469 int, ci470 int,
+cb471 blob, cb472 blob, cb473 blob, cb474 blob, cb475 blob, cb476 blob, cb477 blob, cb478 blob,
+cb479 blob, cb480 blob, cb481 blob, cb482 blob, cb483 blob, cb484 blob, cb485 blob, cb486 blob,
+cb487 blob, cb488 blob, cb489 blob, cb490 blob, cb491 blob, cb492 blob, cb493 blob, cb494 blob,
+cb495 blob, cb496 blob, cb497 blob, cb498 blob, cb499 blob, cb500 blob, cb501 blob, cb502 blob,
+cb503 blob, cb504 blob, cb505 blob, cb506 blob, cb507 blob, cb508 blob, cb509 blob, cb510 blob,
+cb511 blob, cb512 blob,
+primary key (ci001),
+unique i1 (ci002),
+unique i2 (ci003),
+unique i3 (ci004),
+unique i4 (ci005),
+unique i5 (ci006),
+unique i6 (ci007),
+unique i7 (ci008),
+unique i8 (ci009),
+unique i9 (ci010),
+unique i10 (ci011),
+unique i11 (ci012),
+unique i12 (ci013),
+unique i13 (ci014),
+unique i14 (ci015),
+unique i15 (ci016),
+unique i16 (ci017),
+unique i17 (ci018),
+unique i18 (ci019),
+unique i19 (ci020),
+unique i20 (ci021),
+unique i21 (ci022),
+unique i22 (ci023),
+unique i23 (ci024),
+unique i24 (ci025),
+unique i25 (ci026),
+unique i26 (ci027),
+unique i27 (ci028),
+unique i28 (ci029),
+unique i29 (ci030),
+unique i30 (ci031),
+unique i31 (ci032),
+unique i32 (ci033),
+unique i33 (ci034),
+unique i34 (ci035),
+unique i35 (ci036),
+unique i36 (ci037),
+unique i37 (ci038),
+unique i38 (ci039),
+unique i39 (ci040),
+unique i40 (ci041),
+unique i41 (ci042),
+unique i42 (ci043),
+unique i43 (ci044),
+unique i44 (ci045),
+unique i45 (ci046),
+unique i46 (ci047),
+unique i47 (ci048),
+unique i48 (ci049),
+unique i49 (ci050),
+unique i50 (ci051),
+unique i51 (ci052),
+unique i52 (ci053),
+unique i53 (ci054),
+unique i54 (ci055),
+unique i55 (ci056),
+unique i56 (ci057),
+unique i57 (ci058),
+unique i58 (ci059),
+unique i59 (ci060),
+unique i60 (ci061),
+unique i61 (ci062),
+unique i62 (ci063),
+unique i63 (ci064)
+) engine=ndb;
+drop table t1;
 End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-09-07 22:50:01 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-10-08 16:56:43 +0000
@@ -88,6 +88,7 @@ Ndb_connect_count	#
 Ndb_execute_count	#
 Ndb_index_stat_cache_clean	#
 Ndb_index_stat_cache_query	#
+Ndb_index_stat_status	#
 Ndb_number_of_data_nodes	#
 Ndb_number_of_ready_data_nodes	#
 Ndb_pruned_scan_count	#

=== modified file 'mysql-test/suite/ndb/r/ndb_index_stat.result'
--- a/mysql-test/suite/ndb/r/ndb_index_stat.result	2011-09-02 06:43:38 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_stat.result	2011-10-08 16:56:43 +0000
@@ -21,18 +21,18 @@ Variable_name	Value
 ndb_index_stat_enable	ON
 show global variables like 'ndb_index_stat_option';
 Variable_name	Value
-ndb_index_stat_option	loop_checkon=1000ms,loop_idle=1000ms,loop_busy=100ms,update_batch=1,read_batch=4,idle_batch=32,check_batch=32,check_delay=1m,delete_batch=8,clean_delay=0,error_batch=4,error_delay=1m,evict_batch=8,evict_delay=1m,cache_limit=32M,cache_lowpct=90
+ndb_index_stat_option	loop_enable=1000ms,loop_idle=1000ms,loop_busy=100ms,update_batch=1,read_batch=4,idle_batch=32,check_batch=8,check_delay=10m,delete_batch=8,clean_delay=1m,error_batch=4,error_delay=1m,evict_batch=8,evict_delay=1m,cache_limit=32M,cache_lowpct=90
 set @save_option = @@global.ndb_index_stat_option;
 set @@global.ndb_index_stat_option = 'loop_idle=3333,cache_limit=44M';
 set @@global.ndb_index_stat_option = 'cache_lowpct=85,evict_delay=55';
 set @@global.ndb_index_stat_option = 'check_delay=234s';
 show global variables like 'ndb_index_stat_option';
 Variable_name	Value
-ndb_index_stat_option	loop_checkon=1000ms,loop_idle=3333ms,loop_busy=100ms,update_batch=1,read_batch=4,idle_batch=32,check_batch=32,check_delay=234s,delete_batch=8,clean_delay=0,error_batch=4,error_delay=1m,evict_batch=8,evict_delay=55s,cache_limit=44M,cache_lowpct=85
+ndb_index_stat_option	loop_enable=1000ms,loop_idle=3333ms,loop_busy=100ms,update_batch=1,read_batch=4,idle_batch=32,check_batch=8,check_delay=234s,delete_batch=8,clean_delay=1m,error_batch=4,error_delay=1m,evict_batch=8,evict_delay=55s,cache_limit=44M,cache_lowpct=85
 set @@global.ndb_index_stat_option = @save_option;
 show global variables like 'ndb_index_stat_option';
 Variable_name	Value
-ndb_index_stat_option	loop_checkon=1000ms,loop_idle=1000ms,loop_busy=100ms,update_batch=1,read_batch=4,idle_batch=32,check_batch=32,check_delay=1m,delete_batch=8,clean_delay=0,error_batch=4,error_delay=1m,evict_batch=8,evict_delay=1m,cache_limit=32M,cache_lowpct=90
+ndb_index_stat_option	loop_enable=1000ms,loop_idle=1000ms,loop_busy=100ms,update_batch=1,read_batch=4,idle_batch=32,check_batch=8,check_delay=10m,delete_batch=8,clean_delay=1m,error_batch=4,error_delay=1m,evict_batch=8,evict_delay=1m,cache_limit=32M,cache_lowpct=90
 create table t1 (
 a1 int unsigned not null,
 b1 int unsigned not null,

=== modified file 'mysql-test/suite/ndb/r/ndbinfo.result'
--- a/mysql-test/suite/ndb/r/ndbinfo.result	2011-05-23 13:45:57 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo.result	2011-10-13 10:17:51 +0000
@@ -37,9 +37,13 @@ table_id	table_name	comment
 8	counters	monotonic counters
 9	nodes	node status
 10	diskpagebuffer	disk page buffer info
+11	threadblocks	which blocks are run in which threads
+12	threadstat	Statistics on execution threads
+13	transactions	transactions
+14	operations	operations
 SELECT COUNT(*) FROM ndb$tables;
 COUNT(*)
-11
+15
 SELECT * FROM ndb$tables WHERE table_id = 2;
 table_id	table_name	comment
 2	test	for testing
@@ -50,11 +54,15 @@ table_id	table_name	comment
 8	counters	monotonic counters
 9	nodes	node status
 10	diskpagebuffer	disk page buffer info
+11	threadblocks	which blocks are run in which threads
+12	threadstat	Statistics on execution threads
+13	transactions	transactions
+14	operations	operations
 SELECT * FROM ndb$tables WHERE table_name = 'LOGDESTINATION';
 table_id	table_name	comment
 SELECT COUNT(*) FROM ndb$tables t1, ndb$tables t2 WHERE t1.table_id = t1.table_id;
 COUNT(*)
-121
+225
 
 SELECT table_id, table_name, comment from ndb$tables
   WHERE table_id > 2 AND table_id <= 5 ORDER BY table_id;
@@ -73,10 +81,14 @@ table_id	table_name
 6	logbuffers
 5	logspaces
 9	nodes
+14	operations
 3	pools
 7	resources
 0	tables
 2	test
+11	threadblocks
+12	threadstat
+13	transactions
 4	transporters
 
 SELECT table_id, column_id, column_name FROM ndb$columns LIMIT 7;
@@ -124,6 +136,10 @@ table_id
 8
 9
 10
+11
+12
+13
+14
 
 TRUNCATE ndb$tables;
 ERROR HY000: Table 'ndb$tables' is read only

=== modified file 'mysql-test/suite/ndb/r/ndbinfo_dump.result'
--- a/mysql-test/suite/ndb/r/ndbinfo_dump.result	2011-02-23 22:30:50 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo_dump.result	2011-10-07 09:28:24 +0000
@@ -1,7 +1,7 @@
 USE ndbinfo;
 select count(*) from blocks;
 count(*)
-21
+22
 select count(*) from blocks;
 count(*)
-21
+22

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table.test	2009-07-13 10:51:28 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table.test	2011-10-07 14:28:37 +0000
@@ -22,7 +22,7 @@ CREATE TABLE t1 (
 ) ENGINE=ndbcluster;
 
 INSERT INTO t1 VALUES (9410,9412);
-  
+
 ALTER TABLE t1 ADD COLUMN c int not null;
 SELECT * FROM t1;
 
@@ -144,7 +144,7 @@ drop table t1;
 #create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
 #engine=ndb;
 #insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
-#create index c on t1(c); 
+#create index c on t1(c);
 #connection server2;
 #select * from t1 where c = 'two';
 #connection server1;
@@ -194,7 +194,7 @@ drop table t1;
 ### This should work as transaction is ongoing...
 ##delete from t3;
 ##insert into t3 values (1);
-#commit; 
+#commit;
 
 ## This should fail as its a new transaction
 #--error 1146
@@ -318,7 +318,7 @@ c109 int(11) not null,
 primary key (ai),
 unique key tx1 (c002, c003, c004, c005)) engine=ndb;
 
-create index tx2 
+create index tx2
 on t1 (c010, c011, c012, c013);
 
 drop table t1;
@@ -384,4 +384,139 @@ alter table t1 order by b;
 select * from t1 order by b;
 drop table t1;
 
+# big table, 512 columns, 42 blob, 64x2 indices
+
+create table t1 (
+  ci001 int, ci002 int, ci003 int, ci004 int, ci005 int, ci006 int, ci007 int, ci008 int,
+  ci009 int, ci010 int, ci011 int, ci012 int, ci013 int, ci014 int, ci015 int, ci016 int,
+  ci017 int, ci018 int, ci019 int, ci020 int, ci021 int, ci022 int, ci023 int, ci024 int,
+  ci025 int, ci026 int, ci027 int, ci028 int, ci029 int, ci030 int, ci031 int, ci032 int,
+  ci033 int, ci034 int, ci035 int, ci036 int, ci037 int, ci038 int, ci039 int, ci040 int,
+  ci041 int, ci042 int, ci043 int, ci044 int, ci045 int, ci046 int, ci047 int, ci048 int,
+  ci049 int, ci050 int, ci051 int, ci052 int, ci053 int, ci054 int, ci055 int, ci056 int,
+  ci057 int, ci058 int, ci059 int, ci060 int, ci061 int, ci062 int, ci063 int, ci064 int,
+  ci065 int, ci066 int, ci067 int, ci068 int, ci069 int, ci070 int, ci071 int, ci072 int,
+  ci073 int, ci074 int, ci075 int, ci076 int, ci077 int, ci078 int, ci079 int, ci080 int,
+  ci081 int, ci082 int, ci083 int, ci084 int, ci085 int, ci086 int, ci087 int, ci088 int,
+  ci089 int, ci090 int, ci091 int, ci092 int, ci093 int, ci094 int, ci095 int, ci096 int,
+  ci097 int, ci098 int, ci099 int, ci100 int, ci101 int, ci102 int, ci103 int, ci104 int,
+  ci105 int, ci106 int, ci107 int, ci108 int, ci109 int, ci110 int, ci111 int, ci112 int,
+  ci113 int, ci114 int, ci115 int, ci116 int, ci117 int, ci118 int, ci119 int, ci120 int,
+  ci121 int, ci122 int, ci123 int, ci124 int, ci125 int, ci126 int, ci127 int, ci128 int,
+  ci129 int, ci130 int, ci131 int, ci132 int, ci133 int, ci134 int, ci135 int, ci136 int,
+  ci137 int, ci138 int, ci139 int, ci140 int, ci141 int, ci142 int, ci143 int, ci144 int,
+  ci145 int, ci146 int, ci147 int, ci148 int, ci149 int, ci150 int, ci151 int, ci152 int,
+  ci153 int, ci154 int, ci155 int, ci156 int, ci157 int, ci158 int, ci159 int, ci160 int,
+  ci161 int, ci162 int, ci163 int, ci164 int, ci165 int, ci166 int, ci167 int, ci168 int,
+  ci169 int, ci170 int, ci171 int, ci172 int, ci173 int, ci174 int, ci175 int, ci176 int,
+  ci177 int, ci178 int, ci179 int, ci180 int, ci181 int, ci182 int, ci183 int, ci184 int,
+  ci185 int, ci186 int, ci187 int, ci188 int, ci189 int, ci190 int, ci191 int, ci192 int,
+  ci193 int, ci194 int, ci195 int, ci196 int, ci197 int, ci198 int, ci199 int, ci200 int,
+  ci201 int, ci202 int, ci203 int, ci204 int, ci205 int, ci206 int, ci207 int, ci208 int,
+  ci209 int, ci210 int, ci211 int, ci212 int, ci213 int, ci214 int, ci215 int, ci216 int,
+  ci217 int, ci218 int, ci219 int, ci220 int, ci221 int, ci222 int, ci223 int, ci224 int,
+  ci225 int, ci226 int, ci227 int, ci228 int, ci229 int, ci230 int, ci231 int, ci232 int,
+  ci233 int, ci234 int, ci235 int, ci236 int, ci237 int, ci238 int, ci239 int, ci240 int,
+  ci241 int, ci242 int, ci243 int, ci244 int, ci245 int, ci246 int, ci247 int, ci248 int,
+  ci249 int, ci250 int, ci251 int, ci252 int, ci253 int, ci254 int, ci255 int, ci256 int,
+  ci257 int, ci258 int, ci259 int, ci260 int, ci261 int, ci262 int, ci263 int, ci264 int,
+  ci265 int, ci266 int, ci267 int, ci268 int, ci269 int, ci270 int, ci271 int, ci272 int,
+  ci273 int, ci274 int, ci275 int, ci276 int, ci277 int, ci278 int, ci279 int, ci280 int,
+  ci281 int, ci282 int, ci283 int, ci284 int, ci285 int, ci286 int, ci287 int, ci288 int,
+  ci289 int, ci290 int, ci291 int, ci292 int, ci293 int, ci294 int, ci295 int, ci296 int,
+  ci297 int, ci298 int, ci299 int, ci300 int, ci301 int, ci302 int, ci303 int, ci304 int,
+  ci305 int, ci306 int, ci307 int, ci308 int, ci309 int, ci310 int, ci311 int, ci312 int,
+  ci313 int, ci314 int, ci315 int, ci316 int, ci317 int, ci318 int, ci319 int, ci320 int,
+  ci321 int, ci322 int, ci323 int, ci324 int, ci325 int, ci326 int, ci327 int, ci328 int,
+  ci329 int, ci330 int, ci331 int, ci332 int, ci333 int, ci334 int, ci335 int, ci336 int,
+  ci337 int, ci338 int, ci339 int, ci340 int, ci341 int, ci342 int, ci343 int, ci344 int,
+  ci345 int, ci346 int, ci347 int, ci348 int, ci349 int, ci350 int, ci351 int, ci352 int,
+  ci353 int, ci354 int, ci355 int, ci356 int, ci357 int, ci358 int, ci359 int, ci360 int,
+  ci361 int, ci362 int, ci363 int, ci364 int, ci365 int, ci366 int, ci367 int, ci368 int,
+  ci369 int, ci370 int, ci371 int, ci372 int, ci373 int, ci374 int, ci375 int, ci376 int,
+  ci377 int, ci378 int, ci379 int, ci380 int, ci381 int, ci382 int, ci383 int, ci384 int,
+  ci385 int, ci386 int, ci387 int, ci388 int, ci389 int, ci390 int, ci391 int, ci392 int,
+  ci393 int, ci394 int, ci395 int, ci396 int, ci397 int, ci398 int, ci399 int, ci400 int,
+  ci401 int, ci402 int, ci403 int, ci404 int, ci405 int, ci406 int, ci407 int, ci408 int,
+  ci409 int, ci410 int, ci411 int, ci412 int, ci413 int, ci414 int, ci415 int, ci416 int,
+  ci417 int, ci418 int, ci419 int, ci420 int, ci421 int, ci422 int, ci423 int, ci424 int,
+  ci425 int, ci426 int, ci427 int, ci428 int, ci429 int, ci430 int, ci431 int, ci432 int,
+  ci433 int, ci434 int, ci435 int, ci436 int, ci437 int, ci438 int, ci439 int, ci440 int,
+  ci441 int, ci442 int, ci443 int, ci444 int, ci445 int, ci446 int, ci447 int, ci448 int,
+  ci449 int, ci450 int, ci451 int, ci452 int, ci453 int, ci454 int, ci455 int, ci456 int,
+  ci457 int, ci458 int, ci459 int, ci460 int, ci461 int, ci462 int, ci463 int, ci464 int,
+  ci465 int, ci466 int, ci467 int, ci468 int, ci469 int, ci470 int,
+  cb471 blob, cb472 blob, cb473 blob, cb474 blob, cb475 blob, cb476 blob, cb477 blob, cb478 blob,
+  cb479 blob, cb480 blob, cb481 blob, cb482 blob, cb483 blob, cb484 blob, cb485 blob, cb486 blob,
+  cb487 blob, cb488 blob, cb489 blob, cb490 blob, cb491 blob, cb492 blob, cb493 blob, cb494 blob,
+  cb495 blob, cb496 blob, cb497 blob, cb498 blob, cb499 blob, cb500 blob, cb501 blob, cb502 blob,
+  cb503 blob, cb504 blob, cb505 blob, cb506 blob, cb507 blob, cb508 blob, cb509 blob, cb510 blob,
+  cb511 blob, cb512 blob,
+  primary key (ci001),
+  unique i1 (ci002),
+  unique i2 (ci003),
+  unique i3 (ci004),
+  unique i4 (ci005),
+  unique i5 (ci006),
+  unique i6 (ci007),
+  unique i7 (ci008),
+  unique i8 (ci009),
+  unique i9 (ci010),
+  unique i10 (ci011),
+  unique i11 (ci012),
+  unique i12 (ci013),
+  unique i13 (ci014),
+  unique i14 (ci015),
+  unique i15 (ci016),
+  unique i16 (ci017),
+  unique i17 (ci018),
+  unique i18 (ci019),
+  unique i19 (ci020),
+  unique i20 (ci021),
+  unique i21 (ci022),
+  unique i22 (ci023),
+  unique i23 (ci024),
+  unique i24 (ci025),
+  unique i25 (ci026),
+  unique i26 (ci027),
+  unique i27 (ci028),
+  unique i28 (ci029),
+  unique i29 (ci030),
+  unique i30 (ci031),
+  unique i31 (ci032),
+  unique i32 (ci033),
+  unique i33 (ci034),
+  unique i34 (ci035),
+  unique i35 (ci036),
+  unique i36 (ci037),
+  unique i37 (ci038),
+  unique i38 (ci039),
+  unique i39 (ci040),
+  unique i40 (ci041),
+  unique i41 (ci042),
+  unique i42 (ci043),
+  unique i43 (ci044),
+  unique i44 (ci045),
+  unique i45 (ci046),
+  unique i46 (ci047),
+  unique i47 (ci048),
+  unique i48 (ci049),
+  unique i49 (ci050),
+  unique i50 (ci051),
+  unique i51 (ci052),
+  unique i52 (ci053),
+  unique i53 (ci054),
+  unique i54 (ci055),
+  unique i55 (ci056),
+  unique i56 (ci057),
+  unique i57 (ci058),
+  unique i58 (ci059),
+  unique i59 (ci060),
+  unique i60 (ci061),
+  unique i61 (ci062),
+  unique i62 (ci063),
+  unique i63 (ci064)
+) engine=ndb;
+drop table t1;
+
 --echo End of 5.1 tests

=== added file 'mysql-test/suite/ndb_big/bug37983-master.opt'
--- a/mysql-test/suite/ndb_big/bug37983-master.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_big/bug37983-master.opt	2011-10-10 07:40:12 +0000
@@ -0,0 +1 @@
+--testcase-timeout=30

=== added file 'mysql-test/suite/ndb_big/disabled.def'
--- a/mysql-test/suite/ndb_big/disabled.def	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_big/disabled.def	2011-10-10 07:40:12 +0000
@@ -0,0 +1 @@
+smoke : not ready for automated testing yet

=== modified file 'mysql-test/t/information_schema.test'
--- a/mysql-test/t/information_schema.test	2010-06-23 16:25:31 +0000
+++ b/mysql-test/t/information_schema.test	2011-10-17 14:16:56 +0000
@@ -44,8 +44,7 @@ insert into t5 values (10);
 create view v1 (c) as
  SELECT table_name FROM information_schema.TABLES
   WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND
-        table_name<>'ndb_binlog_index' AND
-        table_name<>'ndb_apply_status';
+        table_name not like 'ndb%';
 select * from v1;
 
 select c,table_name from v1
@@ -539,7 +538,7 @@ flush privileges;
 # Bug#9404 information_schema: Weird error messages
 # with SELECT SUM() ... GROUP BY queries
 #
-SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
+SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name not like 'ndb%' GROUP BY TABLE_SCHEMA;
 
 
 #
@@ -921,7 +920,8 @@ SELECT t.table_name, c1.column_name
        information_schema.columns c1
        ON t.table_schema = c1.table_schema AND
           t.table_name = c1.table_name
-  WHERE t.table_schema = 'information_schema' AND
+  WHERE t.table_name not like 'ndb%' AND
+        t.table_schema = 'information_schema' AND
         c1.ordinal_position =
         ( SELECT COALESCE(MIN(c2.ordinal_position),1)
             FROM information_schema.columns c2
@@ -935,7 +935,8 @@ SELECT t.table_name, c1.column_name
        information_schema.columns c1
        ON t.table_schema = c1.table_schema AND
           t.table_name = c1.table_name
-  WHERE t.table_schema = 'information_schema' AND
+  WHERE t.table_name not like 'ndb%' AND
+        t.table_schema = 'information_schema' AND
         c1.ordinal_position =
         ( SELECT COALESCE(MIN(c2.ordinal_position),1)
             FROM information_schema.columns c2
@@ -1032,7 +1033,8 @@ select t.table_name, group_concat(t.tabl
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+      t.table_schema = 'information_schema' and
         c1.ordinal_position =
         (select isnull(c2.column_type) -
          isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/t/information_schema_db.test'
--- a/mysql-test/t/information_schema_db.test	2009-09-28 11:25:47 +0000
+++ b/mysql-test/t/information_schema_db.test	2011-10-17 14:16:56 +0000
@@ -13,7 +13,7 @@ drop function if exists f2;
 
 use INFORMATION_SCHEMA;
 --replace_result Tables_in_INFORMATION_SCHEMA Tables_in_information_schema
-show tables;
+show tables where Tables_in_INFORMATION_SCHEMA NOT LIKE 'ndb%';
 --replace_result 'Tables_in_INFORMATION_SCHEMA (T%)' 'Tables_in_information_schema (T%)'
 show tables from INFORMATION_SCHEMA like 'T%';
 create database `inf%`;

=== modified file 'mysql-test/t/mysqlshow.test'
--- a/mysql-test/t/mysqlshow.test	2006-07-22 03:29:25 +0000
+++ b/mysql-test/t/mysqlshow.test	2011-10-17 14:16:56 +0000
@@ -1,6 +1,9 @@
 # Can't run test of external client with embedded server
 -- source include/not_embedded.inc
 
+# Test lists tables in Information_schema, and ndb adds some
+-- source include/not_ndb_is.inc
+
 --disable_warnings
 DROP TABLE IF EXISTS t1,t2,test1,test2;
 --enable_warnings

=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-09-19 09:26:42 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-10-17 12:43:31 +0000
@@ -64,7 +64,7 @@ struct Ndb_index_stat {
   uint sample_version;  /* goes with read_time */
   time_t check_time;    /* when checked for updated stats (>= read_time) */
   bool cache_clean;     /* old caches have been deleted */
-  uint force_update;    /* one-time force update from analyze table */
+  bool force_update;    /* one-time force update from analyze table */
   bool no_stats;        /* have detected that no stats exist */
   NdbIndexStat::Error error;
   time_t error_time;
@@ -115,11 +115,8 @@ bool ndb_index_stat_allow_flag= false;
 bool
 ndb_index_stat_allow(int flag= -1)
 {
-  if (flag != -1) {
-    pthread_mutex_lock(&ndb_index_stat_list_mutex);
+  if (flag != -1)
     ndb_index_stat_allow_flag= (bool)flag;
-    pthread_mutex_unlock(&ndb_index_stat_list_mutex);
-  }
   return ndb_index_stat_allow_flag;
 }
 
@@ -148,7 +145,7 @@ struct Ndb_index_stat_opt {
     uint flag;
   };
   enum Idx {
-    Iloop_checkon = 0,
+    Iloop_enable = 0,
     Iloop_idle = 1,
     Iloop_busy = 2,
     Iupdate_batch = 3,
@@ -186,15 +183,15 @@ Ndb_index_stat_opt::Ndb_index_stat_opt(c
   val[I##aname].maxval = amaxval; \
   val[I##aname].unit = aunit; \
   val[I##aname].flag = aflag
-  ival(loop_checkon, 1000, 0, ~0, Umsec, 0);
+  ival(loop_enable, 1000, 0, ~0, Umsec, 0);
   ival(loop_idle, 1000, 0, ~0, Umsec, 0);
   ival(loop_busy, 100, 0, ~0, Umsec, 0);
   ival(update_batch, 1, 1, ~0, Usize, 0);
   ival(read_batch, 4, 1, ~0, Usize, 0);
   ival(idle_batch, 32, 1, ~0, Usize, 0);
-  ival(check_batch, 32, 1, ~0, Usize, 0);
-  ival(check_delay, 60, 0, ~0, Utime, 0);
-  ival(clean_delay, 0, 0, ~0, Utime, 0);
+  ival(check_batch, 8, 1, ~0, Usize, 0);
+  ival(check_delay, 600, 0, ~0, Utime, 0);
+  ival(clean_delay, 60, 0, ~0, Utime, 0);
   ival(delete_batch, 8, 1, ~0, Usize, 0);
   ival(error_batch, 4, 1, ~0, Usize, 0);
   ival(error_delay, 60, 0, ~0, Utime, 0);
@@ -498,40 +495,99 @@ ndb_index_stat_option_update(MYSQL_THD,
 /* Global stuff */
 
 struct Ndb_index_stat_glob {
-  uint list_count[Ndb_index_stat::LT_Count]; /* Temporary use */
-  uint total_count;
+  bool th_allow;          /* Queries allowed */
+  bool th_enable;         /* Stats thread idea of ndb_index_stat_enable */
+  bool th_busy;           /* Stats thread is busy-looping */
+  uint th_loop;           /* Stats thread current loop wait in ms */
   uint force_update;
   uint wait_update;
   uint no_stats;
+  uint wait_stats;
+  uint event_ok;          /* Events received for known index */
+  uint event_miss;        /* Events received for unknown index */
+  char status[2][512];
+  uint status_i;
   uint cache_query_bytes; /* In use */
   uint cache_clean_bytes; /* Obsolete versions not yet removed */
-  Ndb_index_stat_glob() :
-    total_count(0),
-    force_update(0),
-    wait_update(0),
-    no_stats(0),
-    cache_query_bytes(0),
-    cache_clean_bytes(0)
-  {
-  }
-  void set_list_count()
-  {
-    total_count= 0;
-    int lt;
-    for (lt= 0; lt < Ndb_index_stat::LT_Count; lt++)
-    {
-      const Ndb_index_stat_list &list= ndb_index_stat_list[lt];
-      list_count[lt]= list.count;
-      total_count++;
-    }
-  }
-  void set_status_variables()
-  {
-    g_ndb_status_index_stat_cache_query= cache_query_bytes;
-    g_ndb_status_index_stat_cache_clean= cache_clean_bytes;
-  }
+
+  Ndb_index_stat_glob();
+  void set_status();
 };
 
+Ndb_index_stat_glob::Ndb_index_stat_glob()
+{
+  th_allow= false;
+  th_enable= false;
+  th_busy= false;
+  th_loop= 0;
+  force_update= 0;
+  wait_update= 0;
+  no_stats= 0;
+  wait_stats= 0;
+  event_ok= 0;
+  event_miss= 0;
+  memset(status, 0, sizeof(status));
+  status_i= 0;
+  cache_query_bytes= 0;
+  cache_clean_bytes= 0;
+}
+
+/* Update status variable (must hold stat_mutex) */
+void
+Ndb_index_stat_glob::set_status()
+{
+  const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
+  char* p= status[status_i];
+
+  // stats thread
+  th_allow= ndb_index_stat_allow();
+  sprintf(p, "allow:%d,enable:%d,busy:%d,loop:%ums",
+             th_allow, th_enable, th_busy, th_loop);
+  p+= strlen(p);
+
+  // entry lists
+  strcpy(p, ",list:(");
+  p+= strlen(p);
+  uint list_count= 0;
+  for (int lt= 1; lt < Ndb_index_stat::LT_Count; lt++)
+  {
+    const Ndb_index_stat_list &list= ndb_index_stat_list[lt];
+    sprintf(p, "%s:%u,", list.name, list.count);
+    p+= strlen(p);
+    list_count+= list.count;
+  }
+  sprintf(p, "%s:%u)", "total", list_count);
+  p+= strlen(p);
+
+  // special counters
+  sprintf(p, ",analyze:(queue:%u,wait:%u)", force_update, wait_update);
+  p+= strlen(p);
+  sprintf(p, ",stats:(none:%u,wait:%u)", no_stats, wait_stats);
+  p+= strlen(p);
+
+  // events
+  sprintf(p, ",events:(ok:%u,miss:%u)", event_ok, event_miss);
+  p+= strlen(p);
+
+  // cache size
+  const uint cache_limit= opt.get(Ndb_index_stat_opt::Icache_limit);
+  const uint cache_total= cache_query_bytes + cache_clean_bytes;
+  double cache_pct= (double)0.0;
+  if (cache_limit != 0)
+    cache_pct= (double)100.0 * (double)cache_total / (double)cache_limit;
+  sprintf(p, ",cache:(query:%u,clean:%u,total:%.2f%%)",
+             cache_query_bytes, cache_clean_bytes, cache_pct);
+  p+= strlen(p);
+
+  // alternating status buffers to keep this lock short
+  pthread_mutex_lock(&LOCK_global_system_variables);
+  g_ndb_status_index_stat_status= status[status_i];
+  status_i= (status_i + 1) % 2;
+  g_ndb_status_index_stat_cache_query= cache_query_bytes;
+  g_ndb_status_index_stat_cache_clean= cache_clean_bytes;
+  pthread_mutex_unlock(&LOCK_global_system_variables);
+}
+
 Ndb_index_stat_glob ndb_index_stat_glob;
 
 /* Shared index entries */
@@ -550,7 +606,7 @@ Ndb_index_stat::Ndb_index_stat()
   sample_version= 0;
   check_time= 0;
   cache_clean= false;
-  force_update= 0;
+  force_update= false;
   no_stats= false;
   error_time= 0;
   error_count= 0;
@@ -603,13 +659,13 @@ Ndb_index_stat_list::Ndb_index_stat_list
 
 Ndb_index_stat_list ndb_index_stat_list[Ndb_index_stat::LT_Count] = {
   Ndb_index_stat_list(0, 0),
-  Ndb_index_stat_list(Ndb_index_stat::LT_New,    "New"),
-  Ndb_index_stat_list(Ndb_index_stat::LT_Update, "Update"),
-  Ndb_index_stat_list(Ndb_index_stat::LT_Read,   "Read"),
-  Ndb_index_stat_list(Ndb_index_stat::LT_Idle,   "Idle"),
-  Ndb_index_stat_list(Ndb_index_stat::LT_Check,  "Check"),
-  Ndb_index_stat_list(Ndb_index_stat::LT_Delete, "Delete"),
-  Ndb_index_stat_list(Ndb_index_stat::LT_Error,  "Error")
+  Ndb_index_stat_list(Ndb_index_stat::LT_New,    "new"),
+  Ndb_index_stat_list(Ndb_index_stat::LT_Update, "update"),
+  Ndb_index_stat_list(Ndb_index_stat::LT_Read,   "read"),
+  Ndb_index_stat_list(Ndb_index_stat::LT_Idle,   "idle"),
+  Ndb_index_stat_list(Ndb_index_stat::LT_Check,  "check"),
+  Ndb_index_stat_list(Ndb_index_stat::LT_Delete, "delete"),
+  Ndb_index_stat_list(Ndb_index_stat::LT_Error,  "error")
 };
 
 void
@@ -687,16 +743,22 @@ ndb_index_stat_force_update(Ndb_index_st
   Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   if (onoff)
   {
-    /* One more request */
-    glob.force_update++;
-    st->force_update++;
+    if (!st->force_update)
+    {
+      glob.force_update++;
+      st->force_update= true;
+      glob.set_status();
+    }
   }
   else
   {
-    /* All done */
-    assert(glob.force_update >= st->force_update);
-    glob.force_update-= st->force_update;
-    st->force_update= 0;
+    if (st->force_update)
+    {
+      assert(glob.force_update != 0);
+      glob.force_update--;
+      st->force_update= false;
+      glob.set_status();
+    }
   }
 }
 
@@ -717,6 +779,7 @@ ndb_index_stat_no_stats(Ndb_index_stat *
       glob.no_stats-= 1;
       st->no_stats= false;
     }
+    glob.set_status();
   }
 }
 
@@ -798,6 +861,8 @@ ndb_index_stat_get_share(NDB_SHARE *shar
                          bool allow_add,
                          bool force_update)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
+
   pthread_mutex_lock(&share->mutex);
   pthread_mutex_lock(&ndb_index_stat_list_mutex);
   pthread_mutex_lock(&ndb_index_stat_stat_mutex);
@@ -829,6 +894,7 @@ ndb_index_stat_get_share(NDB_SHARE *shar
       }
       ndb_index_stat_add_share(share, st, st_last);
       ndb_index_stat_list_add(st, Ndb_index_stat::LT_New);
+      glob.set_status();
     }
     if (force_update)
       ndb_index_stat_force_update(st, true);
@@ -845,6 +911,7 @@ ndb_index_stat_get_share(NDB_SHARE *shar
 void
 ndb_index_stat_free(Ndb_index_stat *st)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   pthread_mutex_lock(&ndb_index_stat_list_mutex);
   NDB_SHARE *share= st->share;
   assert(share != 0);
@@ -853,8 +920,10 @@ ndb_index_stat_free(Ndb_index_stat *st)
   Ndb_index_stat *st_tail= 0;
   Ndb_index_stat *st_loop= share->index_stat_list;
   bool found= false;
-  while (st_loop != 0) {
-    if (st == st_loop) {
+  while (st_loop != 0)
+  {
+    if (st == st_loop)
+    {
       st->share= 0;
       assert(st->lt != 0);
       assert(st->lt != Ndb_index_stat::LT_Delete);
@@ -862,7 +931,9 @@ ndb_index_stat_free(Ndb_index_stat *st)
       st_loop= st_loop->share_next;
       assert(!found);
       found++;
-    } else {
+    }
+    else
+    {
       if (st_head == 0)
         st_head= st_loop;
       else
@@ -874,12 +945,17 @@ ndb_index_stat_free(Ndb_index_stat *st)
   }
   assert(found);
   share->index_stat_list= st_head;
+
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
   pthread_mutex_unlock(&ndb_index_stat_list_mutex);
 }
 
 void
 ndb_index_stat_free(NDB_SHARE *share)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   pthread_mutex_lock(&ndb_index_stat_list_mutex);
   Ndb_index_stat *st;
   while ((st= share->index_stat_list) != 0)
@@ -890,6 +966,9 @@ ndb_index_stat_free(NDB_SHARE *share)
     assert(st->lt != Ndb_index_stat::LT_Delete);
     ndb_index_stat_list_move(st, Ndb_index_stat::LT_Delete);
   }
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
   pthread_mutex_unlock(&ndb_index_stat_list_mutex);
 }
 
@@ -946,7 +1025,6 @@ ndb_index_stat_cache_move(Ndb_index_stat
   glob.cache_query_bytes-= old_query_bytes;
   glob.cache_query_bytes+= new_query_bytes;
   glob.cache_clean_bytes+= old_query_bytes;
-  glob.set_status_variables();
 }
 
 void
@@ -962,7 +1040,6 @@ ndb_index_stat_cache_clean(Ndb_index_sta
   st->is->clean_cache();
   assert(glob.cache_clean_bytes >= old_clean_bytes);
   glob.cache_clean_bytes-= old_clean_bytes;
-  glob.set_status_variables();
 }
 
 /* Misc in/out parameters for process steps */
@@ -986,9 +1063,8 @@ struct Ndb_index_stat_proc {
 void
 ndb_index_stat_proc_new(Ndb_index_stat_proc &pr, Ndb_index_stat *st)
 {
-  if (st->error.code != 0)
-    pr.lt= Ndb_index_stat::LT_Error;
-  else if (st->force_update)
+  assert(st->error.code == 0);
+  if (st->force_update)
     pr.lt= Ndb_index_stat::LT_Update;
   else
     pr.lt= Ndb_index_stat::LT_Read;
@@ -997,6 +1073,7 @@ ndb_index_stat_proc_new(Ndb_index_stat_p
 void
 ndb_index_stat_proc_new(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   pthread_mutex_lock(&ndb_index_stat_list_mutex);
   const int lt= Ndb_index_stat::LT_New;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
@@ -1008,8 +1085,12 @@ ndb_index_stat_proc_new(Ndb_index_stat_p
     st_loop= st_loop->list_next;
     DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name));
     ndb_index_stat_proc_new(pr, st);
+    assert(pr.lt != lt);
     ndb_index_stat_list_move(st, pr.lt);
   }
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
   pthread_mutex_unlock(&ndb_index_stat_list_mutex);
 }
 
@@ -1028,6 +1109,7 @@ ndb_index_stat_proc_update(Ndb_index_sta
 void
 ndb_index_stat_proc_update(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   const int lt= Ndb_index_stat::LT_Update;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
   const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
@@ -1041,7 +1123,12 @@ ndb_index_stat_proc_update(Ndb_index_sta
     st_loop= st_loop->list_next;
     DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name));
     ndb_index_stat_proc_update(pr, st);
+    assert(pr.lt != lt);
     ndb_index_stat_list_move(st, pr.lt);
+    // db op so update status after each
+    pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+    glob.set_status();
+    pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
     cnt++;
   }
   if (cnt == batch)
@@ -1056,12 +1143,12 @@ ndb_index_stat_proc_read(Ndb_index_stat_
   {
     pthread_mutex_lock(&ndb_index_stat_stat_mutex);
     ndb_index_stat_error(st, "read_stat", __LINE__);
-    const uint force_update= st->force_update;
+    const bool force_update= st->force_update;
     ndb_index_stat_force_update(st, false);
 
     /* no stats is not unexpected error, unless analyze was done */
     if (st->is->getNdbError().code == NdbIndexStat::NoIndexStats &&
-        force_update == 0)
+        !force_update)
     {
       ndb_index_stat_no_stats(st, true);
       pr.lt= Ndb_index_stat::LT_Idle;
@@ -1096,6 +1183,7 @@ ndb_index_stat_proc_read(Ndb_index_stat_
 void
 ndb_index_stat_proc_read(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   const int lt= Ndb_index_stat::LT_Read;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
   const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
@@ -1109,14 +1197,18 @@ ndb_index_stat_proc_read(Ndb_index_stat_
     st_loop= st_loop->list_next;
     DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name));
     ndb_index_stat_proc_read(pr, st);
+    assert(pr.lt != lt);
     ndb_index_stat_list_move(st, pr.lt);
+    // db op so update status after each
+    pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+    glob.set_status();
+    pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
     cnt++;
   }
   if (cnt == batch)
     pr.busy= true;
 }
 
-// wl4124_todo detect force_update faster
 void
 ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr, Ndb_index_stat *st)
 {
@@ -1141,12 +1233,20 @@ ndb_index_stat_proc_idle(Ndb_index_stat_
   if (st->force_update)
   {
     pr.lt= Ndb_index_stat::LT_Update;
+    pr.busy= true;
     return;
   }
   if (check_wait <= 0)
   {
-    pr.lt= Ndb_index_stat::LT_Check;
-    return;
+    // avoid creating "idle" entries on Check list
+    const int lt_check= Ndb_index_stat::LT_Check;
+    const Ndb_index_stat_list &list_check= ndb_index_stat_list[lt_check];
+    const uint check_batch= opt.get(Ndb_index_stat_opt::Icheck_batch);
+    if (list_check.count < check_batch)
+    {
+      pr.lt= Ndb_index_stat::LT_Check;
+      return;
+    }
   }
   pr.lt= Ndb_index_stat::LT_Idle;
 }
@@ -1154,10 +1254,26 @@ ndb_index_stat_proc_idle(Ndb_index_stat_
 void
 ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   const int lt= Ndb_index_stat::LT_Idle;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
   const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
-  const uint batch= opt.get(Ndb_index_stat_opt::Iidle_batch);
+  uint batch= opt.get(Ndb_index_stat_opt::Iidle_batch);
+  {
+    pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+    const Ndb_index_stat_glob &glob= ndb_index_stat_glob;
+    const int lt_update= Ndb_index_stat::LT_Update;
+    const Ndb_index_stat_list &list_update= ndb_index_stat_list[lt_update];
+    if (glob.force_update > list_update.count)
+    {
+      // probably there is a force update waiting on Idle list
+      batch= ~0;
+    }
+    pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
+  }
+  // entry may be moved to end of this list
+  if (batch > list.count)
+    batch= list.count;
   pr.now= ndb_index_stat_time();
 
   Ndb_index_stat *st_loop= list.head;
@@ -1172,8 +1288,10 @@ ndb_index_stat_proc_idle(Ndb_index_stat_
     ndb_index_stat_list_move(st, pr.lt);
     cnt++;
   }
-  if (cnt == batch)
-    pr.busy= true;
+  // full batch does not set pr.busy
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
 }
 
 void
@@ -1213,6 +1331,7 @@ ndb_index_stat_proc_check(Ndb_index_stat
 void
 ndb_index_stat_proc_check(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   const int lt= Ndb_index_stat::LT_Check;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
   const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
@@ -1226,7 +1345,12 @@ ndb_index_stat_proc_check(Ndb_index_stat
     st_loop= st_loop->list_next;
     DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name));
     ndb_index_stat_proc_check(pr, st);
+    assert(pr.lt != lt);
     ndb_index_stat_list_move(st, pr.lt);
+    // db op so update status after each
+    pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+    glob.set_status();
+    pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
     cnt++;
   }
   if (cnt == batch)
@@ -1236,6 +1360,8 @@ ndb_index_stat_proc_check(Ndb_index_stat
 void
 ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr, Ndb_index_stat *st)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
+
   NdbIndexStat::Head head;
   NdbIndexStat::CacheInfo infoBuild;
   NdbIndexStat::CacheInfo infoQuery;
@@ -1257,6 +1383,10 @@ ndb_index_stat_proc_evict(Ndb_index_stat
   ndb_index_stat_cache_move(st);
   ndb_index_stat_cache_move(st);
   ndb_index_stat_cache_clean(st);
+
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
 }
 
 bool
@@ -1349,6 +1479,7 @@ ndb_index_stat_proc_evict(Ndb_index_stat
 void
 ndb_index_stat_proc_delete(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   const int lt= Ndb_index_stat::LT_Delete;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
   const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
@@ -1370,6 +1501,10 @@ ndb_index_stat_proc_delete(Ndb_index_sta
   }
   if (cnt == batch)
     pr.busy= true;
+
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
 }
 
 void
@@ -1400,10 +1535,14 @@ ndb_index_stat_proc_error(Ndb_index_stat
 void
 ndb_index_stat_proc_error(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   const int lt= Ndb_index_stat::LT_Error;
   Ndb_index_stat_list &list= ndb_index_stat_list[lt];
   const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
-  const uint batch= opt.get(Ndb_index_stat_opt::Ierror_batch);
+  uint batch= opt.get(Ndb_index_stat_opt::Ierror_batch);
+  // entry may be moved to end of this list
+  if (batch > list.count)
+    batch= list.count;
   pr.now= ndb_index_stat_time();
 
   Ndb_index_stat *st_loop= list.head;
@@ -1414,11 +1553,14 @@ ndb_index_stat_proc_error(Ndb_index_stat
     st_loop= st_loop->list_next;
     DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name));
     ndb_index_stat_proc_error(pr, st);
+    // rotates list if entry remains LT_Error
     ndb_index_stat_list_move(st, pr.lt);
     cnt++;
   }
-  if (cnt == batch)
-    pr.busy= true;
+  // full batch does not set pr.busy
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
 }
 
 void
@@ -1437,6 +1579,7 @@ ndb_index_stat_proc_event(Ndb_index_stat
 void
 ndb_index_stat_proc_event(Ndb_index_stat_proc &pr)
 {
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   NdbIndexStat *is= pr.is_util;
   Ndb *ndb= pr.ndb;
   int ret;
@@ -1482,75 +1625,90 @@ ndb_index_stat_proc_event(Ndb_index_stat
       ndb_index_stat_proc_event(pr, st);
       if (pr.lt != st->lt)
         ndb_index_stat_list_move(st, pr.lt);
+      glob.event_ok++;
     }
     else
     {
       DBUG_PRINT("index_stat", ("entry not found in this mysqld"));
+      glob.event_miss++;
     }
   }
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
 }
 
 #ifndef DBUG_OFF
 void
-ndb_index_stat_report(const Ndb_index_stat_glob& old_glob)
+ndb_index_stat_list_verify(int lt)
 {
-  Ndb_index_stat_glob new_glob= ndb_index_stat_glob;
-  new_glob.set_list_count();
-
-  /* List counts */
+  const Ndb_index_stat_list &list= ndb_index_stat_list[lt];
+  const Ndb_index_stat *st= list.head;
+  uint count= 0;
+  while (st != 0)
   {
-    const uint (&old_count)[Ndb_index_stat::LT_Count]= old_glob.list_count;
-    const uint (&new_count)[Ndb_index_stat::LT_Count]= new_glob.list_count;
-    bool any= false;
-    int lt;
-    for (lt=1; lt < Ndb_index_stat::LT_Count; lt++)
-    {
-      const Ndb_index_stat_list &list= ndb_index_stat_list[lt];
-      const char* name= list.name;
-      if (old_count[lt] != new_count[lt])
-      {
-        DBUG_PRINT("index_stat", ("%s: %u -> %u",
-                                  name, old_count[lt], new_count[lt]));
-        any= true;
-      }
+    count++;
+    assert(count <= list.count);
+    if (st->list_prev != 0)
+    {
+      assert(st->list_prev->list_next == st);
     }
-    if (any)
+    if (st->list_next != 0)
     {
-      const uint bufsz= 20 * Ndb_index_stat::LT_Count;
-      char buf[bufsz];
-      char *ptr= buf;
-      for (lt= 1; lt < Ndb_index_stat::LT_Count; lt++)
-      {
-        const Ndb_index_stat_list &list= ndb_index_stat_list[lt];
-        const char* name= list.name;
-        sprintf(ptr, " %s:%u", name, new_count[lt]);
-        ptr+= strlen(ptr);
-      }
-      DBUG_PRINT("index_stat", ("list:%s", buf));
+      assert(st->list_next->list_prev == st);
+    }
+    if (count == 1)
+    {
+      assert(st == list.head);
+    }
+    if (count == list.count)
+    {
+      assert(st == list.tail);
+    }
+    if (st == list.head)
+    {
+      assert(count == 1);
+      assert(st->list_prev == 0);
+    }
+    if (st == list.tail)
+    {
+      assert(count == list.count);
+      assert(st->list_next == 0);
     }
+    const Ndb_index_stat *st2= st->list_next;
+    uint guard= 0;
+    while (st2 != 0)
+    {
+      assert(st != st2);
+      guard++;
+      assert(guard <= list.count);
+      st2= st2->list_next;
+    }
+    st= st->list_next;
   }
+  assert(count == list.count);
+}
+
+void
+ndb_index_stat_list_verify()
+{
+  pthread_mutex_lock(&ndb_index_stat_list_mutex);
+  for (int lt= 1; lt < Ndb_index_stat::LT_Count; lt++)
+    ndb_index_stat_list_verify(lt);
+  pthread_mutex_unlock(&ndb_index_stat_list_mutex);
+}
 
-  /* Cache summary */
+void
+ndb_index_stat_report(const Ndb_index_stat_glob& old_glob)
+{
+  const Ndb_index_stat_glob &new_glob= ndb_index_stat_glob;
+  const char *old_status= old_glob.status[old_glob.status_i];
+  const char *new_status= new_glob.status[new_glob.status_i];
+
+  if (strcmp(old_status, new_status) != 0)
   {
-    const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
-    uint query_size= new_glob.cache_query_bytes;
-    uint clean_size= new_glob.cache_clean_bytes;
-    uint total_size= query_size + clean_size;
-    const uint limit= opt.get(Ndb_index_stat_opt::Icache_limit);
-    double pct= 100.0;
-    if (limit != 0)
-      pct= 100.0 * (double)total_size / (double)limit;
-    DBUG_PRINT("index_stat", ("cache query:%u clean:%u (%.2f pct)",
-                              query_size, clean_size, pct));
-  }
-
-  /* Updates waited for and forced updates */
-  {
-    uint wait_update= new_glob.wait_update;
-    uint force_update= new_glob.force_update;
-    uint no_stats= new_glob.no_stats;
-    DBUG_PRINT("index_stat", ("wait update:%u force update:%u no stats:%u",
-                              wait_update, force_update, no_stats));
+    DBUG_PRINT("index_stat", ("old_status: %s", old_status));
+    DBUG_PRINT("index_stat", ("new_status: %s", new_status));
   }
 }
 #endif
@@ -1558,13 +1716,12 @@ ndb_index_stat_report(const Ndb_index_st
 void
 ndb_index_stat_proc(Ndb_index_stat_proc &pr)
 {
+  DBUG_ENTER("ndb_index_stat_proc");
 #ifndef DBUG_OFF
+  ndb_index_stat_list_verify();
   Ndb_index_stat_glob old_glob= ndb_index_stat_glob;
-  old_glob.set_list_count();
 #endif
 
-  DBUG_ENTER("ndb_index_stat_proc");
-
   ndb_index_stat_proc_new(pr);
   ndb_index_stat_proc_update(pr);
   ndb_index_stat_proc_read(pr);
@@ -1576,6 +1733,7 @@ ndb_index_stat_proc(Ndb_index_stat_proc
   ndb_index_stat_proc_event(pr);
 
 #ifndef DBUG_OFF
+  ndb_index_stat_list_verify();
   ndb_index_stat_report(old_glob);
 #endif
   DBUG_VOID_RETURN;
@@ -1593,7 +1751,9 @@ ndb_index_stat_end()
    * in LT_Delete.  The first two steps here should be unnecessary.
    */
 
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
   ndb_index_stat_allow(0);
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
 
   int lt;
   for (lt= 1; lt < Ndb_index_stat::LT_Count; lt++)
@@ -1739,6 +1899,7 @@ ndb_index_stat_thread_func(void *arg __a
   my_thread_init();
   DBUG_ENTER("ndb_index_stat_thread_func");
 
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   Ndb_index_stat_proc pr;
 
   bool have_listener;
@@ -1823,7 +1984,7 @@ ndb_index_stat_thread_func(void *arg __a
   }
 
   /* Get thd_ndb for this thread */
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     pthread_mutex_lock(&LOCK_ndb_index_stat_thread);
@@ -1840,7 +2001,15 @@ ndb_index_stat_thread_func(void *arg __a
   }
   pr.ndb= thd_ndb->ndb;
 
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
   ndb_index_stat_allow(1);
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
+
+  /* Fill in initial status variable */
+  pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+  glob.set_status();
+  pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
+
   bool enable_ok;
   enable_ok= false;
 
@@ -1907,7 +2076,7 @@ ndb_index_stat_thread_func(void *arg __a
     const Ndb_index_stat_opt &opt= ndb_index_stat_opt;
     uint msecs= 0;
     if (!enable_ok)
-      msecs= opt.get(Ndb_index_stat_opt::Iloop_checkon);
+      msecs= opt.get(Ndb_index_stat_opt::Iloop_enable);
     else if (!pr.busy)
       msecs= opt.get(Ndb_index_stat_opt::Iloop_idle);
     else
@@ -1915,6 +2084,14 @@ ndb_index_stat_thread_func(void *arg __a
     DBUG_PRINT("index_stat", ("sleep %dms", msecs));
 
     set_timespec_nsec(abstime, msecs * 1000000ULL);
+
+    /* Update status variable */
+    glob.th_enable= enable_ok;
+    glob.th_busy= pr.busy;
+    glob.th_loop= msecs;
+    pthread_mutex_lock(&ndb_index_stat_stat_mutex);
+    glob.set_status();
+    pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
   }
 
 ndb_index_stat_thread_end:
@@ -1973,6 +2150,7 @@ ndb_index_stat_wait(Ndb_index_stat *st,
 {
   DBUG_ENTER("ndb_index_stat_wait");
 
+  Ndb_index_stat_glob &glob= ndb_index_stat_glob;
   pthread_mutex_lock(&ndb_index_stat_stat_mutex);
   int err= 0;
   uint count= 0;
@@ -1982,6 +2160,10 @@ ndb_index_stat_wait(Ndb_index_stat *st,
     int ret= 0;
     if (count == 0)
     {
+      if (!from_analyze)
+        glob.wait_stats++;
+      else
+        glob.wait_update++;
       if (st->lt == Ndb_index_stat::LT_Error && !from_analyze)
       {
         err= Ndb_index_stat_error_HAS_ERROR;
@@ -2019,6 +2201,16 @@ ndb_index_stat_wait(Ndb_index_stat *st,
       break;
     }
   }
+  if (!from_analyze)
+  {
+    assert(glob.wait_stats != 0);
+    glob.wait_stats--;
+  }
+  else
+  {
+    assert(glob.wait_update != 0);
+    glob.wait_update--;
+  }
   pthread_mutex_unlock(&ndb_index_stat_stat_mutex);
   if (err != 0)
   {

=== modified file 'sql/ha_ndb_index_stat.h'
--- a/sql/ha_ndb_index_stat.h	2011-08-31 10:53:27 +0000
+++ b/sql/ha_ndb_index_stat.h	2011-10-08 16:54:19 +0000
@@ -34,6 +34,7 @@ extern pthread_cond_t ndb_index_stat_sta
 
 /* these have to live in ha_ndbcluster.cc */
 extern bool ndb_index_stat_get_enable(THD *thd);
+extern const char* g_ndb_status_index_stat_status;
 extern long g_ndb_status_index_stat_cache_query;
 extern long g_ndb_status_index_stat_cache_clean;
 

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-09-22 14:43:45 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-17 12:43:31 +0000
@@ -446,6 +446,7 @@ extern void ndb_index_stat_end();
 
 struct st_ndb_status g_ndb_status;
 
+const char *g_ndb_status_index_stat_status = "";
 long g_ndb_status_index_stat_cache_query = 0;
 long g_ndb_status_index_stat_cache_clean = 0;
 
@@ -781,6 +782,7 @@ static int show_ndb_server_api_stats(THD
 }
 
 SHOW_VAR ndb_status_index_stat_variables[]= {
+  {"status",          (char*) &g_ndb_status_index_stat_status, SHOW_CHAR_PTR},
   {"cache_query",     (char*) &g_ndb_status_index_stat_cache_query, SHOW_LONG},
   {"cache_clean",     (char*) &g_ndb_status_index_stat_cache_clean, SHOW_LONG},
   {NullS, NullS, SHOW_LONG}
@@ -11417,7 +11419,8 @@ int ha_ndbcluster::close(void)
   wait on condition for a Ndb object to be released.
   - Alt.2 Seize/release from pool, wait until next release 
 */
-Thd_ndb* ha_ndbcluster::seize_thd_ndb()
+Thd_ndb*
+ha_ndbcluster::seize_thd_ndb(THD * thd)
 {
   Thd_ndb *thd_ndb;
   DBUG_ENTER("seize_thd_ndb");
@@ -11440,6 +11443,10 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb()
     delete thd_ndb;
     thd_ndb= NULL;
   }
+  else
+  {
+    thd_ndb->ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(thd_ndb);
 }
 
@@ -11476,7 +11483,10 @@ bool Thd_ndb::recycle_ndb(THD* thd)
                          ndb->getNdbError().message));
     DBUG_RETURN(false);
   }
-
+  else
+  {
+   ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(true);
 }
 
@@ -11514,7 +11524,7 @@ Ndb* check_ndb_in_thd(THD* thd, bool val
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   if (!thd_ndb)
   {
-    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
       return NULL;
     set_thd_ndb(thd, thd_ndb);
   }
@@ -14848,7 +14858,7 @@ pthread_handler_t ndb_util_thread_func(v
   pthread_mutex_unlock(&LOCK_ndb_util_thread);
 
   /* Get thd_ndb for this thread */
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     pthread_mutex_lock(&LOCK_ndb_util_thread);
@@ -17308,12 +17318,8 @@ struct st_mysql_storage_engine ndbcluste
 { MYSQL_HANDLERTON_INTERFACE_VERSION };
 
 
-#include "ha_ndbinfo.h"
-
-extern struct st_mysql_sys_var* ndbinfo_system_variables[];
-
-struct st_mysql_storage_engine ndbinfo_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+extern struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin;
+extern struct st_mysql_plugin ndbinfo_plugin;
 
 mysql_declare_plugin(ndbcluster)
 {
@@ -17330,20 +17336,9 @@ mysql_declare_plugin(ndbcluster)
   system_variables,           /* system variables */
   NULL                        /* config options                  */
 },
-{
-  MYSQL_STORAGE_ENGINE_PLUGIN,
-  &ndbinfo_storage_engine,
-  "ndbinfo",
-  "Sun Microsystems Inc.",
-  "MySQL Cluster system information storage engine",
-  PLUGIN_LICENSE_GPL,
-  ndbinfo_init,               /* plugin init */
-  ndbinfo_deinit,             /* plugin deinit */
-  0x0001,                     /* plugin version */
-  NULL,                       /* status variables */
-  ndbinfo_system_variables,   /* system variables */
-  NULL                        /* config options */
-}
+ndbinfo_plugin, /* ndbinfo plugin */
+/* IS plugin table which maps between mysql connection id and ndb trans-id */
+i_s_ndb_transid_mysql_connection_map_plugin
 mysql_declare_plugin_end;
 
 #endif

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-09-07 22:50:01 +0000
+++ b/sql/ha_ndbcluster.h	2011-10-17 12:43:31 +0000
@@ -682,7 +682,7 @@ class ha_ndbcluster: public handler
   int ndb_update_row(const uchar *old_data, uchar *new_data,
                      int is_bulk_update);
 
-  static Thd_ndb* seize_thd_ndb();
+  static Thd_ndb* seize_thd_ndb(THD*);
   static void release_thd_ndb(Thd_ndb* thd_ndb);
  
 static void set_dbname(const char *pathname, char *dbname);

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-09-21 10:11:58 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-17 12:43:31 +0000
@@ -2098,7 +2098,7 @@ int ndbcluster_log_schema_op(THD *thd,
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   if (!thd_ndb)
   {
-    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
     {
       sql_print_error("Could not allocate Thd_ndb object");
       DBUG_RETURN(1);
@@ -3508,7 +3508,7 @@ ndb_binlog_index_table__open(THD *thd,
   if (simple_open_n_lock_tables(thd, tables))
   {
     if (thd->killed)
-      sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
+      DBUG_PRINT("error", ("NDB Binlog: Opening ndb_binlog_index: killed"));
     else
       sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
                       thd_stmt_da(thd)->sql_errno(),
@@ -3543,7 +3543,10 @@ ndb_binlog_index_table__write_rows(THD *
 
   if (ndb_binlog_index_table__open(thd, &binlog_tables, &ndb_binlog_index))
   {
-    sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
+    if (thd->killed)
+      DBUG_PRINT("error", ("NDB Binlog: Unable to lock table ndb_binlog_index, killed"));
+    else
+      sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
     error= -1;
     goto add_ndb_binlog_index_err;
   }
@@ -6762,7 +6765,7 @@ restart_cluster_failure:
   int have_injector_mutex_lock= 0;
   do_ndbcluster_binlog_close_connection= BCCC_exit;
 
-  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+  if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb(thd)))
   {
     sql_print_error("Could not allocate Thd_ndb object");
     ndb_binlog_thread_running= -1;
@@ -7492,6 +7495,7 @@ restart_cluster_failure:
               */
               if (thd->killed)
               {
+                DBUG_PRINT("error", ("Failed to write to ndb_binlog_index at shutdown, retrying"));
                 (void) mysql_mutex_lock(&LOCK_thread_count);
                 volatile THD::killed_state killed= thd->killed;
                 /* We are cleaning up, allow for flushing last epoch */

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-10-17 12:43:31 +0000
@@ -306,4 +306,115 @@ void ndb_get_connection_stats(Uint64* st
   }
 }
 
+static ST_FIELD_INFO ndb_transid_mysql_connection_map_fields_info[] =
+{
+  {
+    "mysql_connection_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  {
+    "node_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+  {
+    "ndb_transid",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  { 0, 0, MYSQL_TYPE_NULL, 0, 0, "", SKIP_OPEN_TABLE }
+};
+
+static
+int
+ndb_transid_mysql_connection_map_fill_table(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
+
+  if (check_global_access(thd, PROCESS_ACL))
+  {
+    DBUG_RETURN(0);
+  }
+
+  TABLE* table= tables->table;
+  for (uint i = 0; i<g_pool_alloc; i++)
+  {
+    if (g_pool[i])
+    {
+      g_pool[i]->lock_ndb_objects();
+      const Ndb * p = g_pool[i]->get_next_ndb_object(0);
+      while (p)
+      {
+        table->field[0]->set_notnull();
+        table->field[0]->store(p->getCustomData64(), true);
+        table->field[1]->set_notnull();
+        table->field[1]->store(g_pool[i]->node_id());
+        table->field[2]->set_notnull();
+        table->field[2]->store(p->getNextTransactionId(), true);
+        schema_table_store_record(thd, table);
+        p = g_pool[i]->get_next_ndb_object(p);
+      }
+      g_pool[i]->unlock_ndb_objects();
+    }
+  }
+
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_init(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_init");
+  ST_SCHEMA_TABLE* schema = reinterpret_cast<ST_SCHEMA_TABLE*>(p);
+  schema->fields_info = ndb_transid_mysql_connection_map_fields_info;
+  schema->fill_table = ndb_transid_mysql_connection_map_fill_table;
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_deinit(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_deinit");
+  DBUG_RETURN(0);
+}
+
+#include <mysql/plugin.h>
+static struct st_mysql_information_schema i_s_info =
+{
+  MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin =
+{
+  MYSQL_INFORMATION_SCHEMA_PLUGIN,
+  &i_s_info,
+  "ndb_transid_mysql_connection_map",
+  "Oracle Corporation",
+  "Map between mysql connection id and ndb transaction id",
+  PLUGIN_LICENSE_GPL,
+  ndb_transid_mysql_connection_map_init,
+  ndb_transid_mysql_connection_map_deinit,
+  0x0001,
+  NULL,
+  NULL,
+  NULL
+};
+
 #endif /* WITH_NDBCLUSTER_STORAGE_ENGINE */

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2011-08-27 09:54:26 +0000
+++ b/sql/ha_ndbinfo.cc	2011-10-17 12:43:31 +0000
@@ -736,7 +736,9 @@ ndbinfo_find_files(handlerton *hton, THD
 
 handlerton* ndbinfo_hton;
 
-int ndbinfo_init(void *plugin)
+static
+int
+ndbinfo_init(void *plugin)
 {
   DBUG_ENTER("ndbinfo_init");
 
@@ -779,7 +781,9 @@ int ndbinfo_init(void *plugin)
   DBUG_RETURN(0);
 }
 
-int ndbinfo_deinit(void *plugin)
+static
+int
+ndbinfo_deinit(void *plugin)
 {
   DBUG_ENTER("ndbinfo_deinit");
 
@@ -804,6 +808,27 @@ struct st_mysql_sys_var* ndbinfo_system_
   NULL
 };
 
+struct st_mysql_storage_engine ndbinfo_storage_engine=
+{
+  MYSQL_HANDLERTON_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin ndbinfo_plugin =
+{
+  MYSQL_STORAGE_ENGINE_PLUGIN,
+  &ndbinfo_storage_engine,
+  "ndbinfo",
+  "Sun Microsystems Inc.",
+  "MySQL Cluster system information storage engine",
+  PLUGIN_LICENSE_GPL,
+  ndbinfo_init,               /* plugin init */
+  ndbinfo_deinit,             /* plugin deinit */
+  0x0001,                     /* plugin version */
+  NULL,                       /* status variables */
+  ndbinfo_system_variables,   /* system variables */
+  NULL                        /* config options */
+};
+
 template class Vector<const NdbInfoRecAttr*>;
 
 #endif

=== modified file 'sql/ha_ndbinfo.h'
--- a/sql/ha_ndbinfo.h	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbinfo.h	2011-10-17 12:43:31 +0000
@@ -20,9 +20,6 @@
 
 #include <mysql/plugin.h>
 
-int ndbinfo_init(void *plugin);
-int ndbinfo_deinit(void *plugin);
-
 class ha_ndbinfo: public handler
 {
 public:

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2011-06-30 15:59:25 +0000
+++ b/sql/sql_parse.cc	2011-10-17 09:17:54 +0000
@@ -1476,6 +1476,14 @@ bool dispatch_command(enum enum_server_c
   case COM_REFRESH:
   {
     int not_used;
+#ifndef MCP_BUG13001491
+    /*
+      Initialize thd->lex since it's used in many base functions, such as
+      open_tables(). Otherwise, it remains unitialized and may cause crash
+      during execution of COM_REFRESH.
+    */
+    lex_start(thd);
+#endif
     status_var_increment(thd->status_var.com_stat[SQLCOM_FLUSH]);
     ulong options= (ulong) (uchar) packet[0];
     if (check_global_access(thd,RELOAD_ACL))
@@ -6978,7 +6986,18 @@ bool reload_acl_and_cache(THD *thd, ulon
     if (ha_flush_logs(NULL))
       result=1;
     if (flush_error_log())
+#ifndef MCP_BUG13001491
+    {
+      /*
+        When flush_error_log() failed, my_error() has not been called.
+        So, we have to do it here to keep the protocol.
+      */
+      my_error(ER_UNKNOWN_ERROR, MYF(0));
+      result= 1;
+    }
+#else
       result=1;
+#endif
   }
 #ifdef HAVE_QUERY_CACHE
   if (options & REFRESH_QUERY_CACHE_FREE)

=== modified file 'storage/ndb/include/kernel/BlockNumbers.h'
--- a/storage/ndb/include/kernel/BlockNumbers.h	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/kernel/BlockNumbers.h	2011-10-07 08:07:21 +0000
@@ -60,6 +60,7 @@
 #define RESTORE    0x106
 #define DBINFO     0x107
 #define DBSPJ      0x108
+#define THRMAN     0x109
 
 const BlockReference BACKUP_REF  = numberToRef(BACKUP, 0);
 const BlockReference DBTC_REF    = numberToRef(DBTC, 0);
@@ -82,6 +83,7 @@ const BlockReference PGMAN_REF   = numbe
 const BlockReference RESTORE_REF = numberToRef(RESTORE, 0);
 const BlockReference DBINFO_REF  = numberToRef(DBINFO, 0);
 const BlockReference DBSPJ_REF  = numberToRef(DBSPJ, 0);
+const BlockReference THRMAN_REF  = numberToRef(THRMAN, 0);
 
 static inline void __hide_warnings_unused_ref_vars(void) {
   // Hide annoying warnings about unused variables
@@ -92,10 +94,11 @@ static inline void __hide_warnings_unuse
   (void)DBUTIL_REF;  (void)SUMA_REF;    (void)DBTUX_REF;
   (void)TSMAN_REF;   (void)LGMAN_REF;   (void)PGMAN_REF;
   (void)RESTORE_REF; (void)DBINFO_REF;  (void)DBSPJ_REF;
+  (void)THRMAN_REF;
 }
 
 const BlockNumber MIN_BLOCK_NO = BACKUP;
-const BlockNumber MAX_BLOCK_NO = DBSPJ;
+const BlockNumber MAX_BLOCK_NO = THRMAN;
 const BlockNumber NO_OF_BLOCKS = (MAX_BLOCK_NO - MIN_BLOCK_NO + 1);
 
 /**

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2011-10-07 13:15:08 +0000
@@ -172,7 +172,6 @@
 /*
  * Schema transactions
  */
-#define MAX_SCHEMA_TRANSACTIONS 5
 #define MAX_SCHEMA_OPERATIONS 256
 
 /*

=== modified file 'storage/ndb/include/kernel/signaldata/SchemaTrans.hpp'
--- a/storage/ndb/include/kernel/signaldata/SchemaTrans.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/SchemaTrans.hpp	2011-10-07 13:15:08 +0000
@@ -44,7 +44,8 @@ struct SchemaTransBeginRef {
     BusyWithNR = 711,
     TooManySchemaTrans = 780,
     IncompatibleVersions = 763,
-    Nodefailure = 786
+    Nodefailure = 786,
+    OutOfSchemaTransMemory = 796
   };
   Uint32 senderRef;
   Uint32 transId;

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-08-30 09:40:52 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-10-07 16:12:13 +0000
@@ -197,6 +197,8 @@
 #define CFG_DB_MAX_DML_OPERATIONS_PER_TRANSACTION 627
 #define CFG_DB_MT_THREAD_CONFIG          628
 
+#define CFG_DB_CRASH_ON_CORRUPTED_TUPLE  629
+
 #define CFG_NODE_ARBIT_RANK           200
 #define CFG_NODE_ARBIT_DELAY          201
 #define CFG_RESERVED_SEND_BUFFER_MEMORY 202

=== modified file 'storage/ndb/include/ndb_constants.h'
--- a/storage/ndb/include/ndb_constants.h	2011-08-11 17:11:30 +0000
+++ b/storage/ndb/include/ndb_constants.h	2011-10-11 08:11:15 +0000
@@ -124,4 +124,22 @@
 
 #define NDB_INDEX_STAT_PREFIX        "ndb_index_stat"
 
+/**
+ * Defines for NDB$INFO.OPERATIONS
+ */
+#define NDB_INFO_OP_UNKNOWN  0
+#define NDB_INFO_OP_READ     1
+#define NDB_INFO_OP_READ_SH  2
+#define NDB_INFO_OP_READ_EX  3
+#define NDB_INFO_OP_INSERT   4
+#define NDB_INFO_OP_UPDATE   5
+#define NDB_INFO_OP_DELETE   6
+#define NDB_INFO_OP_WRITE    7
+#define NDB_INFO_OP_UNLOCK   8
+#define NDB_INFO_OP_REFRESH  9
+#define NDB_INFO_OP_SCAN_UNKNOWN (256 + 0)
+#define NDB_INFO_OP_SCAN         (256 + 1)
+#define NDB_INFO_OP_SCAN_SH      (256 + 2)
+#define NDB_INFO_OP_SCAN_EX      (256 + 3)
+
 #endif

=== modified file 'storage/ndb/include/ndbapi/Ndb.hpp'
--- a/storage/ndb/include/ndbapi/Ndb.hpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/include/ndbapi/Ndb.hpp	2011-10-17 12:43:31 +0000
@@ -1762,7 +1762,19 @@ public:
   /* Get/Set per-Ndb custom data pointer */
   void setCustomData(void*);
   void* getCustomData() const;
-  
+
+  /* Get/Set per-Ndb custom data pointer */
+  /* NOTE: shares storage with void*
+   * i.e can not be used together with setCustomData
+   */
+  void setCustomData64(Uint64);
+  Uint64 getCustomData64() const;
+
+  /**
+   * transid next startTransaction() on this ndb-object will get
+   */
+  Uint64 getNextTransactionId() const;
+
   /* Some client behaviour counters to assist
    * optimisation
    */

=== added file 'storage/ndb/include/portlib/NdbGetRUsage.h'
--- a/storage/ndb/include/portlib/NdbGetRUsage.h	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/include/portlib/NdbGetRUsage.h	2011-10-07 08:07:21 +0000
@@ -0,0 +1,46 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_GET_RUSAGE_H
+#define NDB_GET_RUSAGE_H
+
+#include <ndb_global.h>
+
+struct ndb_rusage
+{
+  Uint64 ru_utime;
+  Uint64 ru_stime;
+  Uint64 ru_minflt;
+  Uint64 ru_majflt;
+  Uint64 ru_nvcsw;
+  Uint64 ru_nivcsw;
+};
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+  /**
+   * Get resource usage for calling thread
+   */
+  int Ndb_GetRUSage(ndb_rusage * dst);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif

=== modified file 'storage/ndb/src/common/debugger/BlockNames.cpp'
--- a/storage/ndb/src/common/debugger/BlockNames.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/debugger/BlockNames.cpp	2011-10-07 08:07:21 +0000
@@ -40,6 +40,7 @@ const BlockName BlockNames[] = {
   ,{ "RESTORE", RESTORE }
   ,{ "DBINFO", DBINFO }
   ,{ "DBSPJ", DBSPJ }
+  ,{ "THRMAN", THRMAN }
 };
 
 const BlockNumber NO_OF_BLOCK_NAMES = sizeof(BlockNames) / sizeof(BlockName);

=== modified file 'storage/ndb/src/common/debugger/signaldata/ScanTab.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp	2011-09-29 11:31:28 +0000
@@ -78,9 +78,9 @@ printSCANTABCONF(FILE * output, const Ui
   size_t op_count= requestInfo & (~ScanTabConf::EndOfData);
   if (op_count)
   {
-    fprintf(output, " Operation(s) [api tc rows len]:\n");
     if (len == ScanTabConf::SignalLength + 4 * op_count)
     {
+      fprintf(output, " Operation(s) [api tc rows len]:\n");
       ScanTabConf::OpData * op = (ScanTabConf::OpData*)
         (theData + ScanTabConf::SignalLength);
       for(size_t i = 0; i<op_count; i++)
@@ -91,9 +91,9 @@ printSCANTABCONF(FILE * output, const Ui
         op++;
       }
     }
-    else
+    else if (len == ScanTabConf::SignalLength + 3 * op_count)
     {
-      assert(len == ScanTabConf::SignalLength + 3 * op_count);
+      fprintf(output, " Operation(s) [api tc rows len]:\n");      
       for(size_t i = 0; i<op_count; i++)
       {
         ScanTabConf::OpData * op = (ScanTabConf::OpData*)
@@ -104,6 +104,12 @@ printSCANTABCONF(FILE * output, const Ui
                 ScanTabConf::getLength(op->rows));
       }
     }
+    else
+    {
+      // ScanTabConf::OpData stored in section 0 of signal.
+      assert(len == ScanTabConf::SignalLength);
+      fprintf(output, " Long signal. Cannot print operations.");
+    }
     fprintf(output, "\n");
   }
   return false;

=== modified file 'storage/ndb/src/common/portlib/CMakeLists.txt'
--- a/storage/ndb/src/common/portlib/CMakeLists.txt	2011-09-27 17:28:13 +0000
+++ b/storage/ndb/src/common/portlib/CMakeLists.txt	2011-10-07 08:07:21 +0000
@@ -27,7 +27,8 @@ ADD_CONVENIENCE_LIBRARY(ndbportlib
             NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp
             NdbMem.c NdbConfig.c NdbTick.c NdbDir.cpp
             ndb_daemon.cc ${EXTRA_SRC}
-            NdbNuma.cpp NdbMutex_DeadlockDetector.cpp)
+            NdbNuma.cpp NdbMutex_DeadlockDetector.cpp
+            NdbGetRUsage.cpp)
 TARGET_LINK_LIBRARIES(ndbportlib mysys ${LIBSOCKET})
 
 ADD_EXECUTABLE(NdbDir-t

=== modified file 'storage/ndb/src/common/portlib/Makefile.am'
--- a/storage/ndb/src/common/portlib/Makefile.am	2011-09-27 17:28:13 +0000
+++ b/storage/ndb/src/common/portlib/Makefile.am	2011-10-07 08:07:21 +0000
@@ -22,7 +22,8 @@ libportlib_la_SOURCES = \
 	NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp	    \
 	ndb_daemon.cc NdbMem.c \
 	NdbConfig.c NdbDir.cpp ndb_socket.cpp \
-        NdbMutex_DeadlockDetector.cpp
+        NdbMutex_DeadlockDetector.cpp \
+        NdbGetRUsage.cpp
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_util.mk.am

=== added file 'storage/ndb/src/common/portlib/NdbGetRUsage.cpp'
--- a/storage/ndb/src/common/portlib/NdbGetRUsage.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/common/portlib/NdbGetRUsage.cpp	2011-10-07 08:07:21 +0000
@@ -0,0 +1,65 @@
+/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+
+#include <NdbGetRUsage.h>
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+
+#ifndef _WIN32
+static
+Uint64
+micros(struct timeval val)
+{
+  return
+    (Uint64)val.tv_sec * (Uint64)1000000 + val.tv_usec;
+}
+#endif
+
+extern "C"
+int
+Ndb_GetRUSage(ndb_rusage* dst)
+{
+  int res = -1;
+#ifdef HAVE_GETRUSAGE
+  struct rusage tmp;
+#ifdef RUSAGE_THREAD
+  res = getrusage(RUSAGE_THREAD, &tmp);
+#elif defined RUSAGE_LWP
+  res = getrusage(RUSAGE_LWP, &tmp);
+#endif
+
+  if (res == 0)
+  {
+    dst->ru_utime = micros(tmp.ru_utime);
+    dst->ru_stime = micros(tmp.ru_stime);
+    dst->ru_minflt = tmp.ru_minflt;
+    dst->ru_majflt = tmp.ru_majflt;
+    dst->ru_nvcsw = tmp.ru_nvcsw;
+    dst->ru_nivcsw = tmp.ru_nivcsw;
+  }
+#endif
+
+  if (res != 0)
+  {
+    bzero(dst, sizeof(* dst));
+  }
+  return res;
+}

=== modified file 'storage/ndb/src/common/portlib/NdbThread.c'
--- a/storage/ndb/src/common/portlib/NdbThread.c	2011-09-27 17:28:13 +0000
+++ b/storage/ndb/src/common/portlib/NdbThread.c	2011-10-07 07:37:47 +0000
@@ -176,6 +176,7 @@ NdbThread_CreateObject(const char * name
 
   if (g_main_thread != 0)
   {
+    settid(g_main_thread);
     if (name)
     {
       strnmov(g_main_thread->thread_name, name, sizeof(tmpThread->thread_name));

=== modified file 'storage/ndb/src/kernel/SimBlockList.cpp'
--- a/storage/ndb/src/kernel/SimBlockList.cpp	2011-09-23 09:13:22 +0000
+++ b/storage/ndb/src/kernel/SimBlockList.cpp	2011-10-07 08:07:21 +0000
@@ -51,6 +51,7 @@
 #include <PgmanProxy.hpp>
 #include <DbtcProxy.hpp>
 #include <DbspjProxy.hpp>
+#include <thrman.hpp>
 #include <mt.hpp>
 
 #ifndef VM_TRACE
@@ -89,6 +90,10 @@ void * operator new (size_t sz, SIMBLOCK
 void
 SimBlockList::load(EmulatorData& data){
   noOfBlocks = NO_OF_BLOCKS;
+#define THR 1
+#ifndef THR
+  noOfBlocks--;
+#endif
   theList = new SimulatedBlock * [noOfBlocks];
   if (!theList)
   {
@@ -160,7 +165,14 @@ SimBlockList::load(EmulatorData& data){
     theList[20]  = NEW_BLOCK(Dbspj)(ctx);
   else
     theList[20]  = NEW_BLOCK(DbspjProxy)(ctx);
-  assert(NO_OF_BLOCKS == 21);
+#ifdef THR
+  if (NdbIsMultiThreaded() == false)
+    theList[21] = NEW_BLOCK(Thrman)(ctx);
+  else
+    theList[21] = NEW_BLOCK(ThrmanProxy)(ctx);
+
+  assert(NO_OF_BLOCKS == 22);
+#endif
 
   // Check that all blocks could be created
   for (int i = 0; i < noOfBlocks; i++)
@@ -174,10 +186,10 @@ SimBlockList::load(EmulatorData& data){
 
   if (globalData.isNdbMt)
   {
-    add_main_thr_map();
+    mt_init_thr_map();
     for (int i = 0; i < noOfBlocks; i++)
       theList[i]->loadWorkers();
-    finalize_thr_map();
+    mt_finalize_thr_map();
   }
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/CMakeLists.txt	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/CMakeLists.txt	2011-10-07 08:07:21 +0000
@@ -72,7 +72,8 @@ ADD_LIBRARY(ndbblocks STATIC
     dblqh/DblqhCommon.cpp
     PgmanProxy.cpp
     dbtup/DbtupClient.cpp
-    ${EXTRA_SRC})
+    ${EXTRA_SRC}
+    thrman.cpp)
 
 MYSQL_ADD_EXECUTABLE(ndb_print_file
   print_file.cpp

=== modified file 'storage/ndb/src/kernel/blocks/LocalProxy.cpp'
--- a/storage/ndb/src/kernel/blocks/LocalProxy.cpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/blocks/LocalProxy.cpp	2011-10-07 08:07:21 +0000
@@ -22,8 +22,6 @@ LocalProxy::LocalProxy(BlockNumber block
   BLOCK_CONSTRUCTOR(LocalProxy);
 
   ndbrequire(instance() == 0); // this is main block
-  c_lqhWorkers = 0;
-  c_extraWorkers = 0; // sub-class constructor can set
   c_workers = 0;
   Uint32 i;
   for (i = 0; i < MaxWorkers; i++)
@@ -187,13 +185,13 @@ LocalProxy::lastReply(const SsSequential
 }
 
 void
-LocalProxy::sendREQ(Signal* signal, SsParallel& ss)
+LocalProxy::sendREQ(Signal* signal, SsParallel& ss, bool skipLast)
 {
   ndbrequire(ss.m_sendREQ != 0);
 
   ss.m_workerMask.clear();
   ss.m_worker = 0;
-  const Uint32 count = ss.m_extraLast ? c_lqhWorkers : c_workers;
+  const Uint32 count = skipLast ? c_workers - 1 : c_workers;
   SectionHandle handle(this);
   restoreHandle(handle, ss);
   while (ss.m_worker < count) {
@@ -266,21 +264,6 @@ LocalProxy::lastReply(const SsParallel&
   return ss.m_workerMask.isclear();
 }
 
-bool
-LocalProxy::lastExtra(Signal* signal, SsParallel& ss)
-{
-  SectionHandle handle(this);
-  if (c_lqhWorkers + ss.m_extraSent < c_workers) {
-    jam();
-    ss.m_worker = c_lqhWorkers + ss.m_extraSent;
-    ss.m_workerMask.set(ss.m_worker);
-    (this->*ss.m_sendREQ)(signal, ss.m_ssId, &handle);
-    ss.m_extraSent++;
-    return false;
-  }
-  return true;
-}
-
 // used in "reverse" proxying (start with worker REQs)
 void
 LocalProxy::setMask(SsParallel& ss)
@@ -301,11 +284,9 @@ LocalProxy::setMask(SsParallel& ss, cons
 void
 LocalProxy::loadWorkers()
 {
-  c_lqhWorkers = getLqhWorkers();
-  c_workers = c_lqhWorkers + c_extraWorkers;
-
-  Uint32 i;
-  for (i = 0; i < c_workers; i++) {
+  c_workers = mt_get_instance_count(number());
+  for (Uint32 i = 0; i < c_workers; i++)
+  {
     jam();
     Uint32 instanceNo = workerInstance(i);
 
@@ -314,31 +295,7 @@ LocalProxy::loadWorkers()
     ndbrequire(this->getInstance(instanceNo) == worker);
     c_worker[i] = worker;
 
-    if (i < c_lqhWorkers) {
-      add_lqh_worker_thr_map(number(), instanceNo);
-    } else {
-      add_extra_worker_thr_map(number(), instanceNo);
-    }
-  }
-}
-
-void
-LocalProxy::tc_loadWorkers()
-{
-  c_workers = globalData.ndbMtTcThreads;
-  c_lqhWorkers = globalData.ndbMtTcThreads;
-  c_extraWorkers = 0;
-
-  Uint32 i;
-  for (i = 0; i < c_workers; i++) {
-    jam();
-    Uint32 instanceNo = workerInstance(i);
-
-    SimulatedBlock* worker = newWorker(instanceNo);
-    ndbrequire(worker->instance() == instanceNo);
-    ndbrequire(this->getInstance(instanceNo) == worker);
-    c_worker[i] = worker;
-    add_tc_worker_thr_map(number(), instanceNo);
+    mt_add_thr_map(number(), instanceNo);
   }
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/LocalProxy.hpp'
--- a/storage/ndb/src/kernel/blocks/LocalProxy.hpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/blocks/LocalProxy.hpp	2011-10-07 08:07:21 +0000
@@ -56,19 +56,14 @@ public:
   BLOCK_DEFINES(LocalProxy);
 
 protected:
-  enum { MaxLqhWorkers = MAX_NDBMT_LQH_WORKERS };
-  enum { MaxExtraWorkers = 1 };
-  enum { MaxWorkers = MaxLqhWorkers + MaxExtraWorkers };
+  enum { MaxWorkers = SimulatedBlock::MaxInstances };
   typedef Bitmask<(MaxWorkers+31)/32> WorkerMask;
-  Uint32 c_lqhWorkers;
-  Uint32 c_extraWorkers;
   Uint32 c_workers;
   // no gaps - extra worker has index c_lqhWorkers (not MaxLqhWorkers)
   SimulatedBlock* c_worker[MaxWorkers];
 
   virtual SimulatedBlock* newWorker(Uint32 instanceNo) = 0;
   virtual void loadWorkers();
-  virtual void tc_loadWorkers();
 
   // get worker block by index (not by instance)
 
@@ -78,43 +73,22 @@ protected:
     return c_worker[i];
   }
 
-  SimulatedBlock* extraWorkerBlock() {
-    return workerBlock(c_lqhWorkers);
-  }
-
   // get worker block reference by index (not by instance)
 
   BlockReference workerRef(Uint32 i) {
     return numberToRef(number(), workerInstance(i), getOwnNodeId());
   }
 
-  BlockReference extraWorkerRef() {
-    ndbrequire(c_workers == c_lqhWorkers + 1);
-    Uint32 i = c_lqhWorkers;
-    return workerRef(i);
-  }
-
   // convert between worker index and worker instance
 
   Uint32 workerInstance(Uint32 i) const {
     ndbrequire(i < c_workers);
-    Uint32 ino;
-    if (i < c_lqhWorkers)
-      ino = 1 + i;
-    else
-      ino = 1 + MaxLqhWorkers;
-    return ino;
+    return i + 1;
   }
 
   Uint32 workerIndex(Uint32 ino) const {
     ndbrequire(ino != 0);
-    Uint32 i;
-    if (ino != 1 + MaxLqhWorkers)
-      i = ino - 1;
-    else
-      i = c_lqhWorkers;
-    ndbrequire(i < c_workers);
-    return i;
+    return ino - 1;
   }
 
   // support routines and classes ("Ss" = signal state)
@@ -161,14 +135,10 @@ protected:
   // run workers in parallel
   struct SsParallel : SsCommon {
     WorkerMask m_workerMask;
-    bool m_extraLast;   // run extra after LQH workers
-    Uint32 m_extraSent;
     SsParallel() {
-      m_extraLast = false;
-      m_extraSent = 0;
     }
   };
-  void sendREQ(Signal*, SsParallel& ss);
+  void sendREQ(Signal*, SsParallel& ss, bool skipLast = false);
   void recvCONF(Signal*, SsParallel& ss);
   void recvREF(Signal*, SsParallel& ss, Uint32 error);
   // for use in sendREQ

=== modified file 'storage/ndb/src/kernel/blocks/Makefile.am'
--- a/storage/ndb/src/kernel/blocks/Makefile.am	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/Makefile.am	2011-10-07 08:07:21 +0000
@@ -68,7 +68,8 @@ libblocks_a_SOURCES = tsman.cpp lgman.cp
   dblqh/DblqhCommon.cpp \
   PgmanProxy.cpp \
   dbtup/DbtupClient.cpp \
-  dbtc/DbtcProxy.cpp
+  dbtc/DbtcProxy.cpp \
+  thrman.cpp
 
 ndbtools_PROGRAMS = ndb_print_file
 ndb_print_file_SOURCES = print_file.cpp diskpage.cpp dbtup/tuppage.cpp

=== modified file 'storage/ndb/src/kernel/blocks/PgmanProxy.cpp'
--- a/storage/ndb/src/kernel/blocks/PgmanProxy.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/blocks/PgmanProxy.cpp	2011-10-07 08:07:21 +0000
@@ -20,8 +20,6 @@
 PgmanProxy::PgmanProxy(Block_context& ctx) :
   LocalProxy(PGMAN, ctx)
 {
-  c_extraWorkers = 1;
-
   // GSN_LCP_FRAG_ORD
   addRecSignal(GSN_LCP_FRAG_ORD, &PgmanProxy::execLCP_FRAG_ORD);
 
@@ -88,11 +86,15 @@ PgmanProxy::execEND_LCP_REQ(Signal* sign
     req->senderRef = reference();
     req->requestType = ReleasePagesReq::RT_RELEASE_UNLOCKED;
     req->requestData = 0;
-    sendSignal(extraWorkerRef(), GSN_RELEASE_PAGES_REQ,
+    // Extra worker
+    sendSignal(workerRef(c_workers - 1), GSN_RELEASE_PAGES_REQ,
                signal, ReleasePagesReq::SignalLength, JBB);
     return;
   }
-  sendREQ(signal, ss);
+  /**
+   * Send to extra PGMAN *after* all other PGMAN has completed
+   */
+  sendREQ(signal, ss, /* skip last */ true);
 }
 
 void
@@ -137,8 +139,14 @@ PgmanProxy::sendEND_LCP_CONF(Signal* sig
     return;
   }
 
-  if (!lastExtra(signal, ss)) {
+  if (!ss.m_extraLast)
+  {
     jam();
+    ss.m_extraLast = true;
+    ss.m_worker = c_workers - 1; // send to last PGMAN
+    ss.m_workerMask.set(ss.m_worker);
+    SectionHandle handle(this);
+    (this->*ss.m_sendREQ)(signal, ss.m_ssId, &handle);
     return;
   }
 
@@ -170,7 +178,7 @@ PgmanProxy::get_page(Page_cache_client&
 {
   ndbrequire(blockToInstance(caller.m_block) == 0);
   SimulatedBlock* block = globalData.getBlock(caller.m_block);
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   Page_cache_client pgman(block, worker);
   int ret = pgman.get_page(signal, req, flags);
   caller.m_ptr = pgman.m_ptr;
@@ -183,7 +191,7 @@ PgmanProxy::update_lsn(Page_cache_client
 {
   ndbrequire(blockToInstance(caller.m_block) == 0);
   SimulatedBlock* block = globalData.getBlock(caller.m_block);
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   Page_cache_client pgman(block, worker);
   pgman.update_lsn(key, lsn);
 }
@@ -194,7 +202,7 @@ PgmanProxy::drop_page(Page_cache_client&
 {
   ndbrequire(blockToInstance(caller.m_block) == 0);
   SimulatedBlock* block = globalData.getBlock(caller.m_block);
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   Page_cache_client pgman(block, worker);
   int ret = pgman.drop_page(key, page_id);
   return ret;
@@ -209,10 +217,10 @@ PgmanProxy::drop_page(Page_cache_client&
 Uint32
 PgmanProxy::create_data_file(Signal* signal)
 {
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   Uint32 ret = worker->create_data_file();
   Uint32 i;
-  for (i = 0; i < c_lqhWorkers; i++) {
+  for (i = 0; i < c_workers - 1; i++) {
     jam();
     send_data_file_ord(signal, i, ret,
                        DataFileOrd::CreateDataFile);
@@ -223,10 +231,10 @@ PgmanProxy::create_data_file(Signal* sig
 Uint32
 PgmanProxy::alloc_data_file(Signal* signal, Uint32 file_no)
 {
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   Uint32 ret = worker->alloc_data_file(file_no);
   Uint32 i;
-  for (i = 0; i < c_lqhWorkers; i++) {
+  for (i = 0; i < c_workers - 1; i++) {
     jam();
     send_data_file_ord(signal, i, ret,
                        DataFileOrd::AllocDataFile, file_no);
@@ -237,10 +245,10 @@ PgmanProxy::alloc_data_file(Signal* sign
 void
 PgmanProxy::map_file_no(Signal* signal, Uint32 file_no, Uint32 fd)
 {
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   worker->map_file_no(file_no, fd);
   Uint32 i;
-  for (i = 0; i < c_lqhWorkers; i++) {
+  for (i = 0; i < c_workers - 1; i++) {
     jam();
     send_data_file_ord(signal, i, ~(Uint32)0,
                        DataFileOrd::MapFileNo, file_no, fd);
@@ -250,10 +258,10 @@ PgmanProxy::map_file_no(Signal* signal,
 void
 PgmanProxy::free_data_file(Signal* signal, Uint32 file_no, Uint32 fd)
 {
-  Pgman* worker = (Pgman*)extraWorkerBlock();
+  Pgman* worker = (Pgman*)workerBlock(c_workers - 1); // extraWorkerBlock();
   worker->free_data_file(file_no, fd);
   Uint32 i;
-  for (i = 0; i < c_lqhWorkers; i++) {
+  for (i = 0; i < c_workers - 1; i++) {
     jam();
     send_data_file_ord(signal, i, ~(Uint32)0,
                        DataFileOrd::FreeDataFile, file_no, fd);

=== modified file 'storage/ndb/src/kernel/blocks/PgmanProxy.hpp'
--- a/storage/ndb/src/kernel/blocks/PgmanProxy.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/blocks/PgmanProxy.hpp	2011-10-07 08:07:21 +0000
@@ -62,11 +62,12 @@ protected:
      */
     static const char* name() { return "END_LCP_REQ"; }
     EndLcpReq m_req;
+    bool m_extraLast;
     Ss_END_LCP_REQ() {
       m_sendREQ = (SsFUNCREQ)&PgmanProxy::sendEND_LCP_REQ;
       m_sendCONF = (SsFUNCREP)&PgmanProxy::sendEND_LCP_CONF;
       // extra worker (for extent pages) must run after others
-      m_extraLast = true;
+      m_extraLast = false;
     }
     enum { poolSize = 1 };
     static SsPool<Ss_END_LCP_REQ>& pool(LocalProxy* proxy) {

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-09-02 17:24:52 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-10-14 13:24:26 +0000
@@ -51,6 +51,7 @@
 
 #include <NdbSleep.h>
 #include <SafeCounter.hpp>
+#include <SectionReader.hpp>
 
 #define ZREPORT_MEMORY_USAGE 1000
 
@@ -2678,6 +2679,31 @@ Cmvmi::execTESTSIG(Signal* signal){
     return;
   }
 
+  /**
+   * Testing Api fragmented signal send/receive
+   */
+  if (testType == 40)
+  {
+    /* Fragmented signal sent from Api, we'll check it and return it */
+    Uint32 expectedVal = 0;
+    for (Uint32 s = 0; s < handle.m_cnt; s++)
+    {
+      SectionReader sr(handle.m_ptr[s].i, getSectionSegmentPool());
+      Uint32 received;
+      while (sr.getWord(&received))
+      {
+        ndbrequire(received == expectedVal ++);
+      }
+    }
+
+    /* Now return it back to the Api, no callback, so framework
+     * can time-slice the send
+     */
+    sendFragmentedSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB, &handle);
+
+    return;
+  }
+
   if(signal->getSendersBlockRef() == ref){
     /**
      * Signal from API (not via NodeReceiverGroup)

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-08-19 09:38:29 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-18 12:45:50 +0000
@@ -322,120 +322,12 @@ void Dbdict::execDBINFO_SCANREQ(Signal *
           CFG_DB_NO_ORDERED_INDEXES,
           CFG_DB_NO_UNIQUE_HASH_INDEXES,
           CFG_DB_NO_TRIGGERS }},
-      { "Schema Operation",
-        c_schemaOpPool.getUsed(),
-        c_schemaOpPool.getSize(),
-        c_schemaOpPool.getEntrySize(),
-        c_schemaOpPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Schema Transaction",
-        c_schemaTransPool.getUsed(),
-        c_schemaTransPool.getSize(),
-        c_schemaTransPool.getEntrySize(),
-        c_schemaTransPool.getUsedHi(),
-        { 0,0,0,0 }},
       { "Transaction Handle",
         c_txHandlePool.getUsed(),
         c_txHandlePool.getSize(),
         c_txHandlePool.getEntrySize(),
         c_txHandlePool.getUsedHi(),
         { 0,0,0,0 }},
-      { "Create Table Record",
-        c_createTableRecPool.getUsed(),
-        c_createTableRecPool.getSize(),
-        c_createTableRecPool.getEntrySize(),
-        c_createTableRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Drop Table Record",
-        c_dropTableRecPool.getUsed(),
-        c_dropTableRecPool.getSize(),
-        c_dropTableRecPool.getEntrySize(),
-        c_dropTableRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Alter Table Record",
-        c_alterTableRecPool.getUsed(),
-        c_alterTableRecPool.getSize(),
-        c_alterTableRecPool.getEntrySize(),
-        c_alterTableRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Create Index Record",
-        c_createIndexRecPool.getUsed(),
-        c_createIndexRecPool.getSize(),
-        c_createIndexRecPool.getEntrySize(),
-        c_createIndexRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Drop Index Record",
-        c_dropIndexRecPool.getUsed(),
-        c_dropIndexRecPool.getSize(),
-        c_dropIndexRecPool.getEntrySize(),
-        c_dropIndexRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Alter Index Record",
-        c_alterIndexRecPool.getUsed(),
-        c_alterIndexRecPool.getSize(),
-        c_alterIndexRecPool.getEntrySize(),
-        c_alterIndexRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Build Index Record",
-        c_buildIndexRecPool.getUsed(),
-        c_buildIndexRecPool.getSize(),
-        c_buildIndexRecPool.getEntrySize(),
-        c_buildIndexRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Index Stat Record",
-        c_indexStatRecPool.getUsed(),
-        c_indexStatRecPool.getSize(),
-        c_indexStatRecPool.getEntrySize(),
-        c_indexStatRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Create Hash Map Record",
-        c_createHashMapRecPool.getUsed(),
-        c_createHashMapRecPool.getSize(),
-        c_createHashMapRecPool.getEntrySize(),
-        c_createHashMapRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Copy Data Record",
-        c_copyDataRecPool.getUsed(),
-        c_copyDataRecPool.getSize(),
-        c_copyDataRecPool.getEntrySize(),
-        c_copyDataRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Create Trigger Record",
-        c_createTriggerRecPool.getUsed(),
-        c_createTriggerRecPool.getSize(),
-        c_createTriggerRecPool.getEntrySize(),
-        c_createTriggerRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Drop Trigger Record",
-        c_dropTriggerRecPool.getUsed(),
-        c_dropTriggerRecPool.getSize(),
-        c_dropTriggerRecPool.getEntrySize(),
-        c_dropTriggerRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Create Filegroup Record",
-        c_createFilegroupRecPool.getUsed(),
-        c_createFilegroupRecPool.getSize(),
-        c_createFilegroupRecPool.getEntrySize(),
-        c_createFilegroupRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Create File Record",
-        c_createFileRecPool.getUsed(),
-        c_createFileRecPool.getSize(),
-        c_createFileRecPool.getEntrySize(),
-        c_createFileRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Drop Filegroup Record",
-        c_dropFilegroupRecPool.getUsed(),
-        c_dropFilegroupRecPool.getSize(),
-        c_dropFilegroupRecPool.getEntrySize(),
-        c_dropFilegroupRecPool.getUsedHi(),
-        { 0,0,0,0 }},
-      { "Drop File Record",
-        c_dropFileRecPool.getUsed(),
-        c_dropFileRecPool.getSize(),
-        c_dropFileRecPool.getEntrySize(),
-        c_dropFileRecPool.getUsedHi(),
-        { 0,0,0,0 }},
       { "Operation Record",
         c_opRecordPool.getUsed(),
         c_opRecordPool.getSize(),
@@ -1030,7 +922,8 @@ Dbdict::execCREATE_FRAGMENTATION_REQ(Sig
   Uint32 *theData = &signal->theData[0];
   const OpSection& fragSection =
     getOpSection(op_ptr, CreateTabReq::FRAGMENTATION);
-  copyOut(fragSection, &theData[25], ZNIL);
+  LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena,c_opSectionBufferPool);
+  copyOut(op_sec_pool, fragSection, &theData[25], ZNIL);
   theData[0] = 0;
 }
 
@@ -1279,7 +1172,7 @@ Dbdict::writeTableFile(Signal* signal, U
 
 // SchemaTrans variant
 void
-Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
+Dbdict::writeTableFile(Signal* signal, SchemaOpPtr op_ptr, Uint32 tableId,
                        OpSection tabInfoSec, Callback* callback)
 {
   ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
@@ -1300,7 +1193,8 @@ Dbdict::writeTableFile(Signal* signal, U
     Uint32* dst = &pageRecPtr.p->word[ZPAGE_HEADER_SIZE];
     Uint32 dstSize = (ZMAX_PAGES_OF_TABLE_DEFINITION * ZSIZE_OF_PAGES_IN_WORDS)
       - ZPAGE_HEADER_SIZE;
-    bool ok = copyOut(tabInfoSec, dst, dstSize);
+    LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
+    bool ok = copyOut(op_sec_pool, tabInfoSec, dst, dstSize);
     ndbrequire(ok);
 
     memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
@@ -2755,6 +2649,11 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ndb_mgm_get_int_parameter(p, CFG_DB_INDEX_STAT_AUTO_UPDATE,
                             &c_indexStatAutoUpdate);
 
+  Pool_context pc;
+  pc.m_block = this;
+
+  c_arenaAllocator.init(796, RT_DBDICT_SCHEMA_TRANS_ARENA, pc); // TODO: set size automagical? INFO: 796 is about 1/41 of a page, and bigger than CreateIndexRec (784 bytes)
+
   c_attributeRecordPool.setSize(attributesize);
   c_attributeRecordHash.setSize(64);
   c_fsConnectRecordPool.setSize(ZFS_CONNECT_SIZE);
@@ -2765,9 +2664,12 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   g_key_descriptor_pool.setSize(tablerecSize);
   c_triggerRecordPool.setSize(c_maxNoOfTriggers);
 
-  c_opSectionBufferPool.setSize(1024); // units OpSectionSegmentSize
+  Record_info ri;
+  OpSectionBuffer::createRecordInfo(ri, RT_DBDICT_OP_SECTION_BUFFER);
+  c_opSectionBufferPool.init(&c_arenaAllocator, ri, pc);
+
   c_schemaOpHash.setSize(MAX_SCHEMA_OPERATIONS);
-  c_schemaTransPool.setSize(MAX_SCHEMA_TRANSACTIONS);
+  c_schemaTransPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_SCHEMA_TRANSACTION, pc);
   c_schemaTransHash.setSize(2);
   c_txHandlePool.setSize(2);
   c_txHandleHash.setSize(2);
@@ -2776,9 +2678,6 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   c_obj_hash.setSize((tablerecSize+c_maxNoOfTriggers+1)/2);
   m_dict_lock_pool.setSize(MAX_NDB_NODES);
 
-  Pool_context pc;
-  pc.m_block = this;
-
   c_file_hash.setSize(16);
   c_filegroup_hash.setSize(16);
 
@@ -2792,31 +2691,31 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   /**
    * TODO: Use arena-allocator for schema-transactions
    */
-  c_createTableRecPool.setSize(1 + 2 * MAX_INDEXES);
-  c_dropTableRecPool.setSize(1 + 2 * MAX_INDEXES);
-  c_alterTableRecPool.setSize(32);
-  c_createTriggerRecPool.setSize(4 * 2 * MAX_INDEXES);
-  c_dropTriggerRecPool.setSize(3 * 2 * MAX_INDEXES);
-  c_createIndexRecPool.setSize(2*MAX_INDEXES);
-  c_dropIndexRecPool.setSize(2 * MAX_INDEXES);
-  c_alterIndexRecPool.setSize(2 * MAX_INDEXES);
-  c_buildIndexRecPool.setSize(2 * 2 * MAX_INDEXES);
-  c_indexStatRecPool.setSize((1 + 4) * MAX_INDEXES); //main + 4 subs
-  c_createFilegroupRecPool.setSize(32);
-  c_createFileRecPool.setSize(32);
-  c_dropFilegroupRecPool.setSize(32);
-  c_dropFileRecPool.setSize(32);
-  c_createHashMapRecPool.setSize(32);
-  c_copyDataRecPool.setSize(32);
-  c_schemaOpPool.setSize(1 + 32 * MAX_INDEXES);
+  c_createTableRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_TABLE, pc);
+  c_dropTableRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_DROP_TABLE, pc);
+  c_alterTableRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_ALTER_TABLE, pc);
+  c_createTriggerRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_TRIGGER, pc);
+  c_dropTriggerRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_DROP_TRIGGER, pc);
+  c_createIndexRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_INDEX, pc);
+  c_dropIndexRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_DROP_INDEX, pc);
+  c_alterIndexRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_ALTER_INDEX, pc);
+  c_buildIndexRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_BUILD_INDEX, pc);
+  c_indexStatRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_INDEX_STAT, pc);
+  c_createFilegroupRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_FILEGROUP, pc);
+  c_createFileRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_FILE, pc);
+  c_dropFilegroupRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_DROP_FILEGROUP, pc);
+  c_dropFileRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_DROP_FILE, pc);
+  c_createHashMapRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_HASH_MAP, pc);
+  c_copyDataRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_COPY_DATA, pc);
+  c_schemaOpPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_SCHEMA_OPERATION, pc);
 
   c_hash_map_hash.setSize(4);
   c_hash_map_pool.setSize(32);
   g_hash_map.setSize(32);
 
-  c_createNodegroupRecPool.setSize(2);
-  c_dropNodegroupRecPool.setSize(2);
-  
+  c_createNodegroupRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_CREATE_NODEGROUP, pc);
+  c_dropNodegroupRecPool.arena_pool_init(&c_arenaAllocator, RT_DBDICT_DROP_NODEGROUP, pc);
+
   c_opRecordPool.setSize(256);   // XXX need config params
   c_opCreateEvent.setSize(2);
   c_opSubEvent.setSize(2);
@@ -4010,8 +3909,11 @@ Dbdict::restart_nextOp(Signal* signal, b
 {
   c_restartRecord.m_op_cnt++;
 
-  if (OpSectionBuffer::getSegmentSize() *
-      c_opSectionBufferPool.getNoOfFree() < MAX_WORDS_META_FILE)
+  Resource_limit rl;
+  Uint32 free_words;
+  m_ctx.m_mm.get_resource_limit(RG_SCHEMA_TRANS_MEMORY, rl);
+  free_words = (rl.m_min - rl.m_curr) * GLOBAL_PAGE_SIZE_WORDS; // underestimate
+  if (free_words < 2*MAX_WORDS_META_FILE)
   {
     jam();
     /**
@@ -4381,6 +4283,12 @@ Dbdict::restartCreateObj_parse(Signal* s
   jam();
   Ptr<SchemaOp> op_ptr;
   
+  Ptr<TxHandle> tx_ptr;
+  c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
+
+  Ptr<SchemaTrans> trans_ptr;
+  findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
+
   switch(c_restartRecord.m_entry.m_tableType){
   case DictTabInfo::SystemTable:
   case DictTabInfo::UserTable:
@@ -4390,37 +4298,31 @@ Dbdict::restartCreateObj_parse(Signal* s
   case DictTabInfo::OrderedIndex:
   {
     Ptr<CreateTableRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::Undofile:
   case DictTabInfo::Datafile:
   {
     Ptr<CreateFileRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::Tablespace:
   case DictTabInfo::LogfileGroup:
   {
     Ptr<CreateFilegroupRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::HashMap:
   {
     Ptr<CreateHashMapRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   }
 
-  Ptr<TxHandle> tx_ptr;
-  c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
-
-  Ptr<SchemaTrans> trans_ptr;
-  findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
-  addSchemaOp(trans_ptr, op_ptr);
   Uint32 ownVersion = c_restartRecord.m_old_entry.m_tableVersion;
   Uint32 newVersion = c_restartRecord.m_entry.m_tableVersion;
   if (file)
@@ -4490,6 +4392,12 @@ Dbdict::restartDropObj(Signal* signal,
   jam();
   Ptr<SchemaOp> op_ptr;
 
+  Ptr<TxHandle> tx_ptr;
+  c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
+
+  Ptr<SchemaTrans> trans_ptr;
+  findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
+
   switch(c_restartRecord.m_entry.m_tableType){
   case DictTabInfo::SystemTable:
   case DictTabInfo::UserTable:
@@ -4498,14 +4406,14 @@ Dbdict::restartDropObj(Signal* signal,
   case DictTabInfo::UniqueOrderedIndex:
   case DictTabInfo::OrderedIndex:
     Ptr<DropTableRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     ndbrequire(false);
     break;
   case DictTabInfo::Undofile:
   case DictTabInfo::Datafile:
   {
     Ptr<DropFileRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     opRecPtr.p->m_request.file_id = tableId;
     opRecPtr.p->m_request.file_version = entry->m_tableVersion;
     break;
@@ -4514,7 +4422,7 @@ Dbdict::restartDropObj(Signal* signal,
   case DictTabInfo::LogfileGroup:
   {
     Ptr<DropFilegroupRec> opRecPtr;
-    seizeSchemaOp(op_ptr, opRecPtr);
+    seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     opRecPtr.p->m_request.filegroup_id = tableId;
     opRecPtr.p->m_request.filegroup_version = entry->m_tableVersion;
     break;
@@ -4523,12 +4431,6 @@ Dbdict::restartDropObj(Signal* signal,
 
   ndbout_c("restartDropObj(%u)", tableId);
   
-  Ptr<TxHandle> tx_ptr;
-  c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
-
-  Ptr<SchemaTrans> trans_ptr;
-  findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
-  addSchemaOp(trans_ptr, op_ptr);
   op_ptr.p->m_restart = SchemaOp::RI_RESTART_MAJOR_CHANGE; //
   op_ptr.p->m_state = SchemaOp::OS_PARSE_MASTER;
   
@@ -5650,6 +5552,7 @@ void Dbdict::execWAIT_GCP_REF(Signal* si
 const Dbdict::OpInfo
 Dbdict::CreateTableRec::g_opInfo = {
   { 'C', 'T', 'a', 0 },
+  ~RT_DBDICT_CREATE_TABLE,
   GSN_CREATE_TAB_REQ,
   CreateTabReq::SignalLength,
   //
@@ -6185,7 +6088,7 @@ Dbdict::createTable_prepare(Signal* sign
     jam();
     const OpSection& tabInfoSec =
       getOpSection(op_ptr, CreateTabReq::DICT_TAB_INFO);
-    writeTableFile(signal, createTabPtr.p->m_request.tableId,
+    writeTableFile(signal, op_ptr, createTabPtr.p->m_request.tableId,
                    tabInfoSec, &cb);
   }
   else
@@ -6267,7 +6170,6 @@ Dbdict::createTab_local(Signal* signal,
   sendSignal(DBLQH_REF, GSN_CREATE_TAB_REQ, signal,
              CreateTabReq::SignalLengthLDM, JBB);
 
-
   /**
    * Create KeyDescriptor
    */
@@ -6564,7 +6466,8 @@ Dbdict::createTab_dih(Signal* signal, Sc
     // wl3600_todo add ndbrequire on SR, NR
     if (size != 0) {
       jam();
-      bool ok = copyOut(fragSec, page, 1024);
+      LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena,c_opSectionBufferPool);
+      bool ok = copyOut(op_sec_pool, fragSec, page, 1024);
       ndbrequire(ok);
       ptr[noOfSections].sz = size;
       ptr[noOfSections].p = page;
@@ -7117,11 +7020,11 @@ Dbdict::createTable_abortPrepare(Signal*
 
   // create drop table operation  wl3600_todo must pre-allocate
 
-  SchemaOpPtr& oplnk_ptr = op_ptr.p->m_oplnk_ptr;
-  ndbrequire(oplnk_ptr.isNull());
+  SchemaOpPtr oplnk_ptr;
   DropTableRecPtr dropTabPtr;
-  seizeSchemaOp(oplnk_ptr, dropTabPtr);
-  ndbrequire(!oplnk_ptr.isNull());
+  bool ok = seizeLinkedSchemaOp(op_ptr, oplnk_ptr, dropTabPtr);
+  ndbrequire(ok);
+
   DropTabReq* aux_impl_req = &dropTabPtr.p->m_request;
 
   aux_impl_req->senderRef = impl_req->senderRef;
@@ -7130,9 +7033,6 @@ Dbdict::createTable_abortPrepare(Signal*
   aux_impl_req->tableId = impl_req->tableId;
   aux_impl_req->tableVersion = impl_req->tableVersion;
 
-  // link other way too
-  oplnk_ptr.p->m_opbck_ptr = op_ptr;
-
   // wl3600_todo use ref count
   unlinkDictObject(op_ptr);
 
@@ -7302,6 +7202,7 @@ void Dbdict::releaseTableObject(Uint32 t
 const Dbdict::OpInfo
 Dbdict::DropTableRec::g_opInfo = {
   { 'D', 'T', 'a', 0 },
+  ~RT_DBDICT_DROP_TABLE,
   GSN_DROP_TAB_REQ,
   DropTabReq::SignalLength,
   //
@@ -7931,6 +7832,7 @@ void Dbdict::execDROP_TABLE_REF(Signal*
 const Dbdict::OpInfo
 Dbdict::AlterTableRec::g_opInfo = {
   { 'A', 'T', 'a', 0 },
+  ~RT_DBDICT_ALTER_TABLE,
   GSN_ALTER_TAB_REQ,
   AlterTabReq::SignalLength,
   //
@@ -7968,6 +7870,8 @@ Dbdict::alterTable_release(SchemaOpPtr o
     Rope r(c_rope_pool, alterTabPtr.p->m_oldFrmData);
     r.erase();
   }
+  LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
+  release(op_sec_pool, alterTabPtr.p->m_newAttrData);
   releaseOpRec<AlterTableRec>(op_ptr);
 }
 
@@ -8281,12 +8185,19 @@ Dbdict::alterTable_parse(Signal* signal,
     AttributeRecordPtr attrPtr;
     list.first(attrPtr);
     Uint32 i = 0;
+    LocalArenaPoolImpl op_sec_pool(trans_ptr.p->m_arena, c_opSectionBufferPool);
     for (i = 0; i < newTablePtr.p->noOfAttributes; i++) {
       if (i >= tablePtr.p->noOfAttributes) {
         jam();
-        Uint32 j = 2 * (i - tablePtr.p->noOfAttributes);
-        alterTabPtr.p->m_newAttrData[j + 0] = attrPtr.p->attributeDescriptor;
-        alterTabPtr.p->m_newAttrData[j + 1] = attrPtr.p->extPrecision & ~0xFFFF;
+        Uint32 attrData[2];
+        attrData[0] = attrPtr.p->attributeDescriptor;
+        attrData[1] = attrPtr.p->extPrecision & ~0xFFFF;
+        if(!copyIn(op_sec_pool, alterTabPtr.p->m_newAttrData, attrData, 2))
+        {
+          jam();
+          setError(error, SchemaTransBeginRef::OutOfSchemaTransMemory, __LINE__);
+          return;
+        }
       }
       list.next(attrPtr);
     }
@@ -9043,7 +8954,7 @@ Dbdict::alterTable_prepare(Signal* signa
      */
     {
       Ptr<SchemaOp> tmp = op_ptr;
-      LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       for (list.prev(tmp); !tmp.isNull(); list.prev(tmp))
       {
         jam();
@@ -9118,7 +9029,7 @@ Dbdict::alterTable_backup_mutex_locked(S
 
   bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary);
   if (savetodisk) {
-    writeTableFile(signal, impl_req->tableId, tabInfoSec, &callback);
+    writeTableFile(signal, op_ptr, impl_req->tableId, tabInfoSec, &callback);
   } else {
     execute(signal, callback, 0);
   }
@@ -9196,7 +9107,14 @@ Dbdict::alterTable_toLocal(Signal* signa
   {
     jam();
     LinearSectionPtr ptr[3];
-    ptr[0].p = alterTabPtr.p->m_newAttrData;
+    Uint32 newAttrData[2 * MAX_ATTRIBUTES_IN_TABLE];
+    ndbrequire(impl_req->noOfNewAttr <= MAX_ATTRIBUTES_IN_TABLE);
+    ndbrequire(2 * impl_req->noOfNewAttr == alterTabPtr.p->m_newAttrData.getSize());
+    LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
+    bool ok = copyOut(op_sec_pool, alterTabPtr.p->m_newAttrData, newAttrData, 2 * impl_req->noOfNewAttr);
+    ndbrequire(ok);
+
+    ptr[0].p = newAttrData;
     ptr[0].sz = 2 * impl_req->noOfNewAttr;
     sendSignal(blockRef, GSN_ALTER_TAB_REQ, signal,
                AlterTabReq::SignalLength, JBB, ptr, 1);
@@ -9207,7 +9125,8 @@ Dbdict::alterTable_toLocal(Signal* signa
     const OpSection& fragInfoSec =
       getOpSection(op_ptr, AlterTabReq::FRAGMENTATION);
     SegmentedSectionPtr fragInfoPtr;
-    bool ok = copyOut(fragInfoSec, fragInfoPtr);
+    LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena,c_opSectionBufferPool);
+    bool ok = copyOut(op_sec_pool, fragInfoSec, fragInfoPtr);
     ndbrequire(ok);
 
     if (AlterTableReq::getReorgFragFlag(req->changeMask))
@@ -9559,7 +9478,8 @@ Dbdict::alterTable_fromCommitComplete(Si
     const OpSection& tabInfoSec =
       getOpSection(op_ptr, AlterTabReq::DICT_TAB_INFO);
     SegmentedSectionPtr tabInfoPtr;
-    bool ok = copyOut(tabInfoSec, tabInfoPtr);
+    LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena,c_opSectionBufferPool);
+    bool ok = copyOut(op_sec_pool, tabInfoSec, tabInfoPtr);
     ndbrequire(ok);
 
     SectionHandle handle(this, tabInfoPtr.i);
@@ -10726,6 +10646,7 @@ flush:
 const Dbdict::OpInfo
 Dbdict::CreateIndexRec::g_opInfo = {
   { 'C', 'I', 'n', 0 },
+  ~RT_DBDICT_CREATE_INDEX,
   GSN_CREATE_INDX_IMPL_REQ,
   CreateIndxImplReq::SignalLength,
   //
@@ -11480,6 +11401,7 @@ Dbdict::execCREATE_INDX_IMPL_REF(Signal*
 const Dbdict::OpInfo
 Dbdict::DropIndexRec::g_opInfo = {
   { 'D', 'I', 'n', 0 },
+  ~RT_DBDICT_DROP_INDEX,
   GSN_DROP_INDX_IMPL_REQ,
   DropIndxImplReq::SignalLength,
   //
@@ -11923,6 +11845,7 @@ Dbdict::execDROP_INDX_IMPL_REF(Signal* s
 const Dbdict::OpInfo
 Dbdict::AlterIndexRec::g_opInfo = {
   { 'A', 'I', 'n', 0 },
+  ~RT_DBDICT_ALTER_INDEX,
   GSN_ALTER_INDX_IMPL_REQ,
   AlterIndxImplReq::SignalLength,
   //
@@ -12172,7 +12095,7 @@ Dbdict::alterIndex_parse(Signal* signal,
        *   (i.e recursivly, assuming that no operation can come inbetween)
        */
       Ptr<SchemaOp> baseop = op_ptr;
-      LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       ndbrequire(list.prev(baseop));
       Uint32 sz = sizeof(baseop.p->m_oprec_ptr.p->m_opType);
       const char * opType = baseop.p->m_oprec_ptr.p->m_opType;
@@ -12871,7 +12794,8 @@ Dbdict::alterIndex_toAddPartitions(Signa
   const OpSection& fragInfoSec =
     getOpSection(base_op, AlterTabReq::FRAGMENTATION);
   SegmentedSectionPtr fragInfoPtr;
-  bool ok = copyOut(fragInfoSec, fragInfoPtr);
+  LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
+  bool ok = copyOut(op_sec_pool, fragInfoSec, fragInfoPtr);
   ndbrequire(ok);
   SectionHandle handle(this, fragInfoPtr.i);
 
@@ -13143,6 +13067,7 @@ Dbdict::execALTER_INDX_IMPL_REF(Signal*
 const Dbdict::OpInfo
 Dbdict::BuildIndexRec::g_opInfo = {
   { 'B', 'I', 'n', 0 },
+  ~RT_DBDICT_BUILD_INDEX,
   GSN_BUILD_INDX_IMPL_REQ,
   BuildIndxImplReq::SignalLength,
   //
@@ -13916,6 +13841,7 @@ Dbdict::execBUILD_INDX_IMPL_REF(Signal*
 const Dbdict::OpInfo
 Dbdict::IndexStatRec::g_opInfo = {
   { 'S', 'I', 'n', 0 },
+  ~RT_DBDICT_INDEX_STAT,
   GSN_INDEX_STAT_IMPL_REQ,
   IndexStatImplReq::SignalLength,
   //
@@ -14665,6 +14591,7 @@ Dbdict::indexStatBg_sendContinueB(Signal
 const Dbdict::OpInfo
 Dbdict::CopyDataRec::g_opInfo = {
   { 'D', 'C', 'D', 0 },
+  ~RT_DBDICT_COPY_DATA,
   GSN_COPY_DATA_IMPL_REQ,
   CopyDataImplReq::SignalLength,
 
@@ -17666,6 +17593,7 @@ void Dbdict::dropEvent_sendReply(Signal*
 const Dbdict::OpInfo
 Dbdict::CreateTriggerRec::g_opInfo = {
   { 'C', 'T', 'r', 0 },
+  ~RT_DBDICT_CREATE_TRIGGER,
   GSN_CREATE_TRIG_IMPL_REQ,
   CreateTrigImplReq::SignalLength,
   //
@@ -18053,7 +17981,6 @@ Dbdict::createTrigger_create_drop_trigge
 {
   jam();
 
-  SchemaTransPtr trans_ptr = op_ptr.p->m_trans_ptr;
   CreateTriggerRecPtr createTriggerPtr;
   getOpRec(op_ptr, createTriggerPtr);
   CreateTrigImplReq* impl_req = &createTriggerPtr.p->m_request;
@@ -18061,11 +17988,9 @@ Dbdict::createTrigger_create_drop_trigge
   /**
    * Construct a dropTrigger operation
    */
-  SchemaOpPtr& oplnk_ptr = op_ptr.p->m_oplnk_ptr;
-  ndbrequire(oplnk_ptr.isNull());
+  SchemaOpPtr oplnk_ptr;
   DropTriggerRecPtr dropTriggerPtr;
-  seizeSchemaOp(oplnk_ptr, dropTriggerPtr);
-  if (oplnk_ptr.isNull())
+  if(!seizeLinkedSchemaOp(op_ptr, oplnk_ptr, dropTriggerPtr))
   {
     jam();
     setError(error, CreateTrigRef::TooManyTriggers, __LINE__);
@@ -18085,9 +18010,6 @@ Dbdict::createTrigger_create_drop_trigge
   aux_impl_req->triggerId = impl_req->triggerId;
   aux_impl_req->triggerInfo = impl_req->triggerInfo;
 
-  // link other way too
-  oplnk_ptr.p->m_opbck_ptr = op_ptr;
-  oplnk_ptr.p->m_trans_ptr = trans_ptr;
   dropTriggerPtr.p->m_main_op = createTriggerPtr.p->m_main_op;
 
   if (createTriggerPtr.p->m_main_op)
@@ -18617,6 +18539,7 @@ Dbdict::execCREATE_TRIG_IMPL_REF(Signal*
 const Dbdict::OpInfo
 Dbdict::DropTriggerRec::g_opInfo = {
   { 'D', 'T', 'r', 0 },
+  ~RT_DBDICT_DROP_TRIGGER,
   GSN_DROP_TRIG_IMPL_REQ,
   DropTrigImplReq::SignalLength,
   //
@@ -19591,7 +19514,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
 #endif
      
      SchemaOpPtr op_ptr;
-     LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+     LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
      bool pending_op = list.first(op_ptr);
      if (pending_op &&
          (trans_ptr.p->m_state == SchemaTrans::TS_COMPLETING ||
@@ -20168,7 +20091,8 @@ void Dbdict::check_takeover_replies(Sign
               SchemaOpPtr missing_op_ptr;
               const OpInfo& info =
                 *findOpInfo(nodePtr.p->takeOverConf.highest_op_impl_req_gsn);
-              if (seizeSchemaOp(missing_op_ptr,
+              if (seizeSchemaOp(trans_ptr,
+                                missing_op_ptr,
                                 nodePtr.p->takeOverConf.highest_op,
                                 info))
               {
@@ -20176,7 +20100,6 @@ void Dbdict::check_takeover_replies(Sign
 #ifdef VM_TRACE
                 ndbout_c("Created missing operation %u, on new master", missing_op_ptr.p->op_key);
 #endif
-                addSchemaOp(trans_ptr, missing_op_ptr);
                 missing_op_ptr.p->m_state = nodePtr.p->takeOverConf.highest_op_state;
                 masterNodePtr.p->recoveryState = NodeRecord::RS_PARTIAL_ROLLBACK;
                 masterNodePtr.p->start_op = masterNodePtr.p->takeOverConf.highest_op;
@@ -20211,7 +20134,8 @@ void Dbdict::check_takeover_replies(Sign
             Uint32 op_state = nodePtr.p->takeOverConf.lowest_op_state;
             const OpInfo& info =
               *findOpInfo(nodePtr.p->takeOverConf.lowest_op_impl_req_gsn);
-            if (seizeSchemaOp(missing_op_ptr,
+            if (seizeSchemaOp(trans_ptr,
+                              missing_op_ptr,
                               op_key,
                               info))
             {
@@ -20219,7 +20143,6 @@ void Dbdict::check_takeover_replies(Sign
 #ifdef VM_TRACE
               ndbout_c("Created ressurected operation %u, on new master", op_key);
 #endif
-              addSchemaOp(trans_ptr, missing_op_ptr);
               trans_ptr.p->ressurected_op = true;
               missing_op_ptr.p->m_state = op_state;
               nodePtr.p->recoveryState = NodeRecord::RS_PARTIAL_ROLLFORWARD;
@@ -20680,6 +20603,7 @@ Dbdict::getTableEntry(const XSchemaFile
 const Dbdict::OpInfo
 Dbdict::CreateFileRec::g_opInfo = {
   { 'C', 'F', 'l', 0 },
+  ~RT_DBDICT_CREATE_FILE,
   GSN_CREATE_FILE_IMPL_REQ,
   CreateFileImplReq::SignalLength,
   //
@@ -20804,31 +20728,6 @@ Dbdict::createFile_parse(Signal* signal,
     return;
   }
 
-  /**
-   * auto-connect
-   */
-  if (f.FilegroupId == RNIL && f.FilegroupVersion == RNIL)
-  {
-    jam();
-    Filegroup_hash::Iterator it;
-    c_filegroup_hash.first(it);
-    while (!it.isNull())
-    {
-      jam();
-      if ((f.FileType == DictTabInfo::Undofile &&
-           it.curr.p->m_type == DictTabInfo::LogfileGroup) ||
-          (f.FileType == DictTabInfo::Datafile &&
-           it.curr.p->m_type == DictTabInfo::Tablespace))
-      {
-        jam();
-        f.FilegroupId = it.curr.p->key;
-        f.FilegroupVersion = it.curr.p->m_version;
-        break;
-      }
-      c_filegroup_hash.next(it);
-    }
-  }
-
   // Get Filegroup
   FilegroupPtr fg_ptr;
   if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId))
@@ -21198,7 +21097,7 @@ Dbdict::createFile_prepare(Signal* signa
   }
 
   const OpSection& objInfoSec = getOpSection(op_ptr, 0);
-  writeTableFile(signal, impl_req->file_id, objInfoSec, &cb);
+  writeTableFile(signal, op_ptr, impl_req->file_id, objInfoSec, &cb);
 }
 
 void
@@ -21432,6 +21331,7 @@ Dbdict::execCREATE_FILE_IMPL_CONF(Signal
 const Dbdict::OpInfo
 Dbdict::CreateFilegroupRec::g_opInfo = {
   { 'C', 'F', 'G', 0 },
+  ~RT_DBDICT_CREATE_FILEGROUP,
   GSN_CREATE_FILEGROUP_IMPL_REQ,
   CreateFilegroupImplReq::SignalLength,
   //
@@ -21558,21 +21458,6 @@ Dbdict::createFilegroup_parse(Signal* si
       setError(error, CreateFilegroupRef::InvalidExtentSize, __LINE__);
       return;
     }
-
-    /**
-     * auto-connect
-     */
-    if (fg.TS_LogfileGroupId == RNIL && fg.TS_LogfileGroupVersion == RNIL)
-    {
-      jam();
-      Filegroup_hash::Iterator it;
-      if (c_filegroup_hash.first(it))
-      {
-        jam();
-        fg.TS_LogfileGroupId = it.curr.p->key;
-        fg.TS_LogfileGroupVersion = it.curr.p->m_version;
-      }
-    }
   }
   else if(fg.FilegroupType == DictTabInfo::LogfileGroup)
   {
@@ -21886,7 +21771,7 @@ Dbdict::createFilegroup_prepare(Signal*
   }
 
   const OpSection& objInfoSec = getOpSection(op_ptr, 0);
-  writeTableFile(signal, impl_req->filegroup_id, objInfoSec, &cb);
+  writeTableFile(signal, op_ptr, impl_req->filegroup_id, objInfoSec, &cb);
 }
 
 void
@@ -22046,6 +21931,7 @@ Dbdict::execCREATE_FILEGROUP_IMPL_CONF(S
 const Dbdict::OpInfo
 Dbdict::DropFileRec::g_opInfo = {
   { 'D', 'F', 'l', 0 },
+  ~RT_DBDICT_DROP_FILE,
   GSN_DROP_FILE_IMPL_REQ,
   DropFileImplReq::SignalLength,
   //
@@ -22403,6 +22289,7 @@ Dbdict::send_drop_file(Signal* signal, U
 const Dbdict::OpInfo
 Dbdict::DropFilegroupRec::g_opInfo = {
   { 'D', 'F', 'g', 0 },
+  ~RT_DBDICT_DROP_FILEGROUP,
   GSN_DROP_FILEGROUP_IMPL_REQ,
   DropFilegroupImplReq::SignalLength,
   //
@@ -22830,6 +22717,7 @@ Dbdict::send_drop_fg(Signal* signal, Uin
 const Dbdict::OpInfo
 Dbdict::CreateNodegroupRec::g_opInfo = {
   { 'C', 'N', 'G', 0 },
+  ~RT_DBDICT_CREATE_NODEGROUP,
   GSN_CREATE_NODEGROUP_IMPL_REQ,
   CreateNodegroupImplReq::SignalLength,
   //
@@ -23429,6 +23317,7 @@ Dbdict::execCREATE_HASH_MAP_CONF(Signal*
 const Dbdict::OpInfo
 Dbdict::DropNodegroupRec::g_opInfo = {
   { 'D', 'N', 'G', 0 },
+  ~RT_DBDICT_DROP_NODEGROUP,
   GSN_DROP_NODEGROUP_IMPL_REQ,
   DropNodegroupImplReq::SignalLength,
   //
@@ -24109,7 +23998,7 @@ Dbdict::findOpInfo(Uint32 gsn)
 // OpSection
 
 bool
-Dbdict::copyIn(OpSection& op_sec, const SegmentedSectionPtr& ss_ptr)
+Dbdict::copyIn(OpSectionBufferPool& pool, OpSection& op_sec, const SegmentedSectionPtr& ss_ptr)
 {
   const Uint32 size = 1024;
   Uint32 buf[size];
@@ -24121,7 +24010,7 @@ Dbdict::copyIn(OpSection& op_sec, const
   {
     jam();
     ndbrequire(reader.getWords(buf, size));
-    if (!copyIn(op_sec, buf, size))
+    if (!copyIn(pool, op_sec, buf, size))
     {
       jam();
       return false;
@@ -24130,7 +24019,7 @@ Dbdict::copyIn(OpSection& op_sec, const
   }
 
   ndbrequire(reader.getWords(buf, len));
-  if (!copyIn(op_sec, buf, len))
+  if (!copyIn(pool, op_sec, buf, len))
   {
     jam();
     return false;
@@ -24140,9 +24029,9 @@ Dbdict::copyIn(OpSection& op_sec, const
 }
 
 bool
-Dbdict::copyIn(OpSection& op_sec, const Uint32* src, Uint32 srcSize)
+Dbdict::copyIn(OpSectionBufferPool& pool, OpSection& op_sec, const Uint32* src, Uint32 srcSize)
 {
-  OpSectionBuffer buffer(c_opSectionBufferPool, op_sec.m_head);
+  OpSectionBuffer buffer(pool, op_sec.m_head);
   if (!buffer.append(src, srcSize)) {
     jam();
     return false;
@@ -24167,14 +24056,15 @@ Dbdict::copyOut(Dbdict::OpSectionBuffer
 }
 
 bool
-Dbdict::copyOut(const OpSection& op_sec, SegmentedSectionPtr& ss_ptr)
+Dbdict::copyOut(OpSectionBufferPool& pool, const OpSection& op_sec, SegmentedSectionPtr& ss_ptr)
 {
   const Uint32 size = 1024;
   Uint32 buf[size];
 
   Uint32 len = op_sec.getSize();
   OpSectionBufferHead tmp_head = op_sec.m_head;
-  OpSectionBuffer buffer(c_opSectionBufferPool, tmp_head);
+
+  OpSectionBuffer buffer(pool, tmp_head);
 
   OpSectionBufferConstIterator iter;
   buffer.first(iter);
@@ -24215,7 +24105,7 @@ fail:
 }
 
 bool
-Dbdict::copyOut(const OpSection& op_sec, Uint32* dst, Uint32 dstSize)
+Dbdict::copyOut(OpSectionBufferPool& pool, const OpSection& op_sec, Uint32* dst, Uint32 dstSize)
 {
   if (op_sec.getSize() > dstSize) {
     jam();
@@ -24224,7 +24114,7 @@ Dbdict::copyOut(const OpSection& op_sec,
 
   // there is no const version of LocalDataBuffer
   OpSectionBufferHead tmp_head = op_sec.m_head;
-  OpSectionBuffer buffer(c_opSectionBufferPool, tmp_head);
+  OpSectionBuffer buffer(pool, tmp_head);
 
   OpSectionBufferConstIterator iter;
   Uint32 n = 0;
@@ -24238,9 +24128,9 @@ Dbdict::copyOut(const OpSection& op_sec,
 }
 
 void
-Dbdict::release(OpSection& op_sec)
+Dbdict::release(OpSectionBufferPool& pool, OpSection& op_sec)
 {
-  OpSectionBuffer buffer(c_opSectionBufferPool, op_sec.m_head);
+  OpSectionBuffer buffer(pool, op_sec.m_head);
   buffer.release();
 }
 
@@ -24256,7 +24146,7 @@ Dbdict::getOpInfo(SchemaOpPtr op_ptr)
 }
 
 bool
-Dbdict::seizeSchemaOp(SchemaOpPtr& op_ptr, Uint32 op_key, const OpInfo& info)
+Dbdict::seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Uint32 op_key, const OpInfo& info, bool linked)
 {
   if ((ERROR_INSERTED(6111) && 
        (info.m_impl_req_gsn == GSN_CREATE_TAB_REQ ||
@@ -24281,14 +24171,20 @@ Dbdict::seizeSchemaOp(SchemaOpPtr& op_pt
 
   if (!findSchemaOp(op_ptr, op_key)) {
     jam();
-    if (c_schemaOpHash.seize(op_ptr)) {
+    if (c_schemaOpPool.seize(trans_ptr.p->m_arena, op_ptr)) {
       jam();
       new (op_ptr.p) SchemaOp();
       op_ptr.p->op_key = op_key;
+      op_ptr.p->m_trans_ptr = trans_ptr;
       if ((this->*(info.m_seize))(op_ptr)) {
         jam();
+
+        if(!linked) {
+          jam();
+          addSchemaOp(op_ptr);
+        }
+
         c_schemaOpHash.add(op_ptr);
-        op_ptr.p->m_magic = SchemaOp::DICT_MAGIC;
         D("seizeSchemaOp" << V(op_key) << V(info.m_opType));
         return true;
       }
@@ -24340,8 +24236,8 @@ Dbdict::releaseSchemaOp(SchemaOpPtr& op_
   }
 
   ndbrequire(op_ptr.p->m_magic == SchemaOp::DICT_MAGIC);
-  op_ptr.p->m_magic = 0;
-  c_schemaOpHash.release(op_ptr);
+  c_schemaOpHash.remove(op_ptr);
+  c_schemaOpPool.release(op_ptr);
   op_ptr.setNull();
 }
 
@@ -24372,7 +24268,8 @@ Dbdict::saveOpSection(SchemaOpPtr op_ptr
   OpSection& op_sec = op_ptr.p->m_section[ss_no];
   op_ptr.p->m_sections++;
 
-  bool ok = copyIn(op_sec, ss_ptr);
+  LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
+  bool ok =  copyIn(op_sec_pool, op_sec, ss_ptr);
   ndbrequire(ok);
   return true;
 }
@@ -24382,20 +24279,20 @@ Dbdict::releaseOpSection(SchemaOpPtr op_
 {
   ndbrequire(ss_no + 1 == op_ptr.p->m_sections);
   OpSection& op_sec = op_ptr.p->m_section[ss_no];
-  release(op_sec);
+  LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
+  release(op_sec_pool, op_sec);
   op_ptr.p->m_sections = ss_no;
 }
 
 // add schema op to trans during parse phase
 
 void
-Dbdict::addSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr)
+Dbdict::addSchemaOp(SchemaOpPtr op_ptr)
 {
-  LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+  SchemaTransPtr trans_ptr = op_ptr.p->m_trans_ptr;
+  LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
   list.addLast(op_ptr);
 
-  op_ptr.p->m_trans_ptr = trans_ptr;
-
   // jonas_todo REMOVE side effect
   // add global flags from trans
   const Uint32& src_info = trans_ptr.p->m_requestInfo;
@@ -24572,7 +24469,7 @@ Dbdict::findDictObjectOp(SchemaOpPtr& op
     D("found" << *trans_ptr.p);
 
     {
-      LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       SchemaOpPtr loop_ptr;
       list.first(loop_ptr);
       while (!loop_ptr.isNull()) {
@@ -24605,17 +24502,21 @@ Dbdict::seizeSchemaTrans(SchemaTransPtr&
   }
   if (!findSchemaTrans(trans_ptr, trans_key)) {
     jam();
-    if (c_schemaTransHash.seize(trans_ptr)) {
+    ArenaHead arena;
+    bool ok = c_arenaAllocator.seize(arena);
+    ndbrequire(ok); // TODO: report error
+    if (c_schemaTransPool.seize(arena, trans_ptr)) {
       jam();
       new (trans_ptr.p) SchemaTrans();
       trans_ptr.p->trans_key = trans_key;
+      trans_ptr.p->m_arena = arena;
       c_schemaTransHash.add(trans_ptr);
       c_schemaTransList.addLast(trans_ptr);
       c_schemaTransCount++;
-      trans_ptr.p->m_magic = SchemaTrans::DICT_MAGIC;
       D("seizeSchemaTrans" << V(trans_key));
       return true;
     }
+    c_arenaAllocator.release(arena);
   }
   trans_ptr.setNull();
   return false;
@@ -24657,26 +24558,35 @@ Dbdict::releaseSchemaTrans(SchemaTransPt
 {
   D("releaseSchemaTrans" << V(trans_ptr.p->trans_key));
 
-  LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
-  SchemaOpPtr op_ptr;
-  while (list.first(op_ptr)) {
-    list.remove(op_ptr);
-    releaseSchemaOp(op_ptr);
+  {
+    /**
+     * Put in own scope...since LocalSchemaOp_list stores back head
+     *   in destructor
+     */
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    SchemaOpPtr op_ptr;
+    while (list.first(op_ptr)) {
+      list.remove(op_ptr);
+      releaseSchemaOp(op_ptr);
+    }
   }
   ndbrequire(trans_ptr.p->m_magic == SchemaTrans::DICT_MAGIC);
-  trans_ptr.p->m_magic = 0;
   ndbrequire(c_schemaTransCount != 0);
   c_schemaTransCount--;
   c_schemaTransList.remove(trans_ptr);
-  c_schemaTransHash.release(trans_ptr);
+  c_schemaTransHash.remove(trans_ptr);
+  ArenaHead arena = trans_ptr.p->m_arena;
+  c_schemaTransPool.release(trans_ptr);
+  c_arenaAllocator.release(arena);
   trans_ptr.setNull();
 
   if (c_schemaTransCount == 0)
   {
     jam();
 
-    ndbrequire(c_schemaOpPool.getNoOfFree() == c_schemaOpPool.getSize());
-    ndbrequire(c_opSectionBufferPool.getNoOfFree() == c_opSectionBufferPool.getSize());
+    Resource_limit rl;
+    m_ctx.m_mm.get_resource_limit(RG_SCHEMA_TRANS_MEMORY, rl);
+    ndbrequire(rl.m_curr <= 1); // ArenaAllocator can keep one page for empty pool
 #ifdef VM_TRACE
     if (getNodeState().startLevel == NodeState::SL_STARTED)
       check_consistency();
@@ -25495,7 +25405,7 @@ Dbdict::trans_prepare_first(Signal* sign
   {
     bool first;
     {
-      LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       first = list.first(op_ptr);
     }
     if (first)
@@ -25531,7 +25441,7 @@ Dbdict::trans_prepare_next(Signal* signa
   if (ERROR_INSERTED(6143))
   {
     jam();
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     if (!list.hasNext(op_ptr))
     {
       /*
@@ -25580,7 +25490,7 @@ Dbdict::trans_prepare_recv_reply(Signal*
   {
     bool next;
     {
-      LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       next = list.next(op_ptr);
     }
     if (next)
@@ -25626,7 +25536,7 @@ Dbdict::trans_abort_parse_start(Signal*
   SchemaOpPtr op_ptr;
   bool last = false;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     last =  list.last(op_ptr);
   }
 
@@ -25651,7 +25561,7 @@ Dbdict::trans_abort_parse_recv_reply(Sig
     SchemaOpPtr last_op = op_ptr;
     bool prev = false;
     {
-      LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       prev = list.prev(op_ptr);
       list.remove(last_op);         // Release aborted op
     }
@@ -25736,7 +25646,7 @@ Dbdict::trans_abort_parse_next(Signal* s
   if (ERROR_INSERTED(6144))
   {
     jam();
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     if (!list.hasNext(op_ptr))
     {
       /*
@@ -25783,7 +25693,7 @@ Dbdict::trans_abort_prepare_start(Signal
   bool last = false;
   SchemaOpPtr op_ptr;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     last = list.last(op_ptr);
   }
 
@@ -25811,7 +25721,7 @@ Dbdict::trans_abort_prepare_recv_reply(S
 
   bool prev = false;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     prev = list.prev(op_ptr);
   }
 
@@ -25925,7 +25835,7 @@ Dbdict::trans_abort_prepare_next(Signal*
   if (ERROR_INSERTED(6145))
   {
     jam();
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     if (!list.hasPrev(op_ptr))
     {
       /*
@@ -25980,7 +25890,7 @@ Dbdict::trans_rollback_sp_start(Signal*
   SchemaOpPtr op_ptr;
 
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     ndbrequire(list.last(op_ptr));
   }
 
@@ -26027,7 +25937,7 @@ Dbdict::trans_rollback_sp_recv_reply(Sig
   }
 
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
 
     SchemaOpPtr last_op = op_ptr;
     ndbrequire(list.prev(op_ptr)); // Must have prev, as not SP
@@ -26059,7 +25969,7 @@ Dbdict::trans_rollback_sp_next(Signal* s
   if (ERROR_INSERTED(6144))
   {
     jam();
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     if (!list.hasPrev(op_ptr))
     {
       /*
@@ -26101,7 +26011,7 @@ Dbdict::trans_rollback_sp_done(Signal* s
   const OpInfo info = getOpInfo(op_ptr);
   (this->*(info.m_reply))(signal, op_ptr, error);
 
-  LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+  LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
   list.remove(op_ptr);
   releaseSchemaOp(op_ptr);
 
@@ -26338,7 +26248,7 @@ Dbdict::trans_commit_mutex_locked(Signal
   bool first = false;
   SchemaOpPtr op_ptr;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     first = list.first(op_ptr);
   }
 
@@ -26429,7 +26339,7 @@ Dbdict::trans_commit_next(Signal* signal
 
   if (ERROR_INSERTED(6147))
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     if (!list.hasNext(op_ptr))
     {
       jam();
@@ -26500,7 +26410,7 @@ Dbdict::trans_commit_recv_reply(Signal*
 
   bool next = false;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     next = list.next(op_ptr);
   }
 
@@ -26676,7 +26586,7 @@ Dbdict::trans_complete_first(Signal * si
   bool first = false;
   SchemaOpPtr op_ptr;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     first = list.first(op_ptr);
   }
 
@@ -26754,7 +26664,7 @@ Dbdict::trans_complete_recv_reply(Signal
 
   bool next = false;
   {
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     next = list.next(op_ptr);
   }
   
@@ -26841,7 +26751,7 @@ Dbdict::check_partial_trans_end_recv_rep
      */
     jam();
     SchemaOpPtr op_ptr;
-    LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+    LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     list.remove(op_ptr);
 #ifdef VM_TRACE
     ndbout_c("Releasing ressurected op %u", op_ptr.p->op_key);
@@ -27160,7 +27070,7 @@ Dbdict::execSCHEMA_TRANS_IMPL_REQ(Signal
         /**
          * Remove op (except at coordinator
          */
-        LocalDLFifoList<SchemaOp> list(c_schemaOpPool, trans_ptr.p->m_op_list);
+        LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
         list.remove(op_ptr);
         releaseSchemaOp(op_ptr);
       }
@@ -27286,7 +27196,7 @@ Dbdict::slave_run_parse(Signal *signal,
       jam();
       setError(error, AlterTableRef::SingleUser, __LINE__);
     }
-    else if (seizeSchemaOp(op_ptr, op_key, info))
+    else if (seizeSchemaOp(trans_ptr, op_ptr, op_key, info))
     {
       jam();
 
@@ -27297,7 +27207,6 @@ Dbdict::slave_run_parse(Signal *signal,
       Uint32* dst = oprec_ptr.p->m_impl_req_data;
       memcpy(dst, src, len << 2);
 
-      addSchemaOp(trans_ptr, op_ptr);
       op_ptr.p->m_state = SchemaOp::OS_PARSING;
       (this->*(info.m_parse))(signal, false, op_ptr, handle, error);
     } else {
@@ -28353,6 +28262,7 @@ ArrayPool<Hash2FragmentMap> g_hash_map;
 const Dbdict::OpInfo
 Dbdict::CreateHashMapRec::g_opInfo = {
   { 'C', 'H', 'M', 0 },
+  ~RT_DBDICT_CREATE_HASH_MAP,
   GSN_CREATE_HASH_MAP_REQ,
   CreateHashMapReq::SignalLength,
   //
@@ -28915,7 +28825,7 @@ Dbdict::createHashMap_prepare(Signal* si
   cb.m_callbackFunction = safe_cast(&Dbdict::createHashMap_writeObjConf);
 
   const OpSection& tabInfoSec = getOpSection(op_ptr, 0);
-  writeTableFile(signal, impl_req->objectId, tabInfoSec, &cb);
+  writeTableFile(signal, op_ptr, impl_req->objectId, tabInfoSec, &cb);
 }
 
 void

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-08-19 09:38:29 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-10-18 12:45:50 +0000
@@ -24,6 +24,8 @@
 #include <ndb_limits.h>
 #include <trigger_definitions.h>
 #include <pc.hpp>
+#include <ArenaPool.hpp>
+#include <DataBuffer2.hpp>
 #include <DLHashTable.hpp>
 #include <DLFifoList.hpp>
 #include <CArray.hpp>
@@ -1359,7 +1361,8 @@ private:
   // OpInfo
 
   struct OpInfo {
-    const char m_opType[4]; // e.g. CTa for CreateTable
+    const char m_opType[4]; // e.g. CTa for CreateTable. TODO: remove. use only m_magic?
+    Uint32 m_magic;
     Uint32 m_impl_req_gsn;
     Uint32 m_impl_req_length;
 
@@ -1390,10 +1393,12 @@ private:
 
   struct OpRec
   {
-    char m_opType[4];
+    char m_opType[4]; // TODO: remove. only use m_magic
 
     Uint32 nextPool;
 
+    Uint32 m_magic;
+
     // reference to the static member in subclass
     const OpInfo& m_opInfo;
 
@@ -1404,6 +1409,7 @@ private:
     Uint32 m_obj_ptr_i;
 
     OpRec(const OpInfo& info, Uint32* impl_req_data) :
+      m_magic(info.m_magic),
       m_opInfo(info),
       m_impl_req_data(impl_req_data) {
       m_obj_ptr_i = RNIL;
@@ -1421,19 +1427,19 @@ private:
 
   enum { OpSectionSegmentSize = 127 };
   typedef
-    LocalDataBuffer<OpSectionSegmentSize>
+    LocalDataBuffer2<OpSectionSegmentSize, LocalArenaPoolImpl>
     OpSectionBuffer;
   typedef
-    OpSectionBuffer::Head
+    DataBuffer2<OpSectionSegmentSize, LocalArenaPoolImpl>::Head
     OpSectionBufferHead;
   typedef
     OpSectionBuffer::DataBufferPool
     OpSectionBufferPool;
   typedef
-    DataBuffer<OpSectionSegmentSize>::ConstDataBufferIterator
+    DataBuffer2<OpSectionSegmentSize, LocalArenaPoolImpl>::ConstDataBufferIterator
     OpSectionBufferConstIterator;
 
-  OpSectionBufferPool c_opSectionBufferPool;
+  ArenaPool c_opSectionBufferPool;
 
   struct OpSection {
     OpSectionBufferHead m_head;
@@ -1442,13 +1448,13 @@ private:
     }
   };
 
-  bool copyIn(OpSection&, const SegmentedSectionPtr&);
-  bool copyIn(OpSection&, const Uint32* src, Uint32 srcSize);
-  bool copyOut(const OpSection&, SegmentedSectionPtr&);
-  bool copyOut(const OpSection&, Uint32* dst, Uint32 dstSize);
+  bool copyIn(OpSectionBufferPool&, OpSection&, const SegmentedSectionPtr&);
+  bool copyIn(OpSectionBufferPool&, OpSection&, const Uint32* src, Uint32 srcSize);
+  bool copyOut(OpSectionBufferPool&, const OpSection&, SegmentedSectionPtr&);
+  bool copyOut(OpSectionBufferPool&, const OpSection&, Uint32* dst, Uint32 dstSize);
   bool copyOut(OpSectionBuffer & buffer, OpSectionBufferConstIterator & iter,
                Uint32 * dst, Uint32 len);
-  void release(OpSection&);
+  void release(OpSectionBufferPool&, OpSection&);
 
   // SchemaOp
 
@@ -1567,7 +1573,7 @@ private:
     SchemaFile::TableEntry m_orig_entry;
 
     // magic is on when record is seized
-    enum { DICT_MAGIC = 0xd1c70001 };
+    enum { DICT_MAGIC = ~RT_DBDICT_SCHEMA_OPERATION };
     Uint32 m_magic;
 
     SchemaOp() {
@@ -1582,7 +1588,7 @@ private:
       m_callback.m_callbackData = 0;
       m_oplnk_ptr.setNull();
       m_opbck_ptr.setNull();
-      m_magic = 0;
+      m_magic = DICT_MAGIC;
       m_base_op_ptr_i = RNIL;
 
       m_orig_entry_id = RNIL;
@@ -1598,8 +1604,13 @@ private:
 #endif
   };
 
-  ArrayPool<SchemaOp> c_schemaOpPool;
-  DLHashTable<SchemaOp> c_schemaOpHash;
+  typedef RecordPool<SchemaOp,ArenaPool> SchemaOp_pool;
+  typedef LocalDLFifoList<SchemaOp,SchemaOp,SchemaOp_pool> LocalSchemaOp_list;
+  typedef DLHashTable<SchemaOp,SchemaOp,SchemaOp_pool> SchemaOp_hash;
+  typedef DLFifoList<SchemaOp,SchemaOp,SchemaOp_pool>::Head  SchemaOp_head;
+
+  SchemaOp_pool c_schemaOpPool;
+  SchemaOp_hash c_schemaOpHash;
 
   const OpInfo& getOpInfo(SchemaOpPtr op_ptr);
 
@@ -1631,9 +1642,9 @@ private:
   inline bool
   seizeOpRec(SchemaOpPtr op_ptr) {
     OpRecPtr& oprec_ptr = op_ptr.p->m_oprec_ptr;
-    ArrayPool<T>& pool = T::getPool(this);
+    RecordPool<T,ArenaPool>& pool = T::getPool(this);
     Ptr<T> t_ptr;
-    if (pool.seize(t_ptr)) {
+    if (pool.seize(op_ptr.p->m_trans_ptr.p->m_arena, t_ptr)) {
       new (t_ptr.p) T();
       setOpRec<T>(op_ptr, t_ptr);
       return true;
@@ -1646,7 +1657,7 @@ private:
   inline void
   releaseOpRec(SchemaOpPtr op_ptr) {
     OpRecPtr& oprec_ptr = op_ptr.p->m_oprec_ptr;
-    ArrayPool<T>& pool = T::getPool(this);
+    RecordPool<T,ArenaPool>& pool = T::getPool(this);
     Ptr<T> t_ptr;
     getOpRec<T>(op_ptr, t_ptr);
     pool.release(t_ptr);
@@ -1655,18 +1666,18 @@ private:
 
   // seize / find / release, atomic on op rec + data rec
 
-  bool seizeSchemaOp(SchemaOpPtr& op_ptr, Uint32 op_key, const OpInfo& info);
+  bool seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Uint32 op_key, const OpInfo& info, bool linked=false);
 
   template <class T>
   inline bool
-  seizeSchemaOp(SchemaOpPtr& op_ptr, Uint32 op_key) {
-    return seizeSchemaOp(op_ptr, op_key, T::g_opInfo);
+  seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Uint32 op_key, bool linked) {
+    return seizeSchemaOp(trans_ptr, op_ptr, op_key, T::g_opInfo, linked);
   }
 
   template <class T>
   inline bool
-  seizeSchemaOp(SchemaOpPtr& op_ptr, Ptr<T>& t_ptr, Uint32 op_key) {
-    if (seizeSchemaOp<T>(op_ptr, op_key)) {
+  seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Ptr<T>& t_ptr, Uint32 op_key) {
+    if (seizeSchemaOp<T>(trans_ptr, op_ptr, op_key)) {
       getOpRec<T>(op_ptr, t_ptr);
       return true;
     }
@@ -1675,14 +1686,14 @@ private:
 
   template <class T>
   inline bool
-  seizeSchemaOp(SchemaOpPtr& op_ptr) {
+  seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, bool linked) {
     /*
       Store node id in high 8 bits to make op_key globally unique
      */
     Uint32 op_key = 
       (getOwnNodeId() << 24) +
       ((c_opRecordSequence + 1) & 0x00FFFFFF);
-    if (seizeSchemaOp<T>(op_ptr, op_key)) {
+    if (seizeSchemaOp<T>(trans_ptr, op_ptr, op_key, linked)) {
       c_opRecordSequence++;
       return true;
     }
@@ -1691,14 +1702,28 @@ private:
 
   template <class T>
   inline bool
-  seizeSchemaOp(SchemaOpPtr& op_ptr, Ptr<T>& t_ptr) {
-    if (seizeSchemaOp<T>(op_ptr)) {
+  seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Ptr<T>& t_ptr, bool linked=false) {
+    if (seizeSchemaOp<T>(trans_ptr, op_ptr, linked)) {
       getOpRec<T>(op_ptr, t_ptr);
       return true;
     }
     return false;
   }
 
+  template <class T>
+  inline bool
+  seizeLinkedSchemaOp(SchemaOpPtr op_ptr, SchemaOpPtr& oplnk_ptr, Ptr<T>& t_ptr) {
+    ndbrequire(op_ptr.p->m_oplnk_ptr.isNull());
+    if (seizeSchemaOp<T>(op_ptr.p->m_trans_ptr, oplnk_ptr, true)) {
+      op_ptr.p->m_oplnk_ptr = oplnk_ptr;
+      oplnk_ptr.p->m_opbck_ptr = op_ptr;
+      getOpRec<T>(oplnk_ptr, t_ptr);
+      return true;
+    }
+    oplnk_ptr.setNull();
+    return false;
+  }
+
   bool findSchemaOp(SchemaOpPtr& op_ptr, Uint32 op_key);
 
   template <class T>
@@ -1720,7 +1745,7 @@ private:
   void releaseOpSection(SchemaOpPtr, Uint32 ss_no);
 
   // add operation to transaction OpList
-  void addSchemaOp(SchemaTransPtr, SchemaOpPtr&);
+  void addSchemaOp(SchemaOpPtr);
 
   void updateSchemaOpStep(SchemaTransPtr, SchemaOpPtr);
 
@@ -1857,8 +1882,9 @@ private:
     NdbNodeBitmask m_ref_nodes;  // Nodes replying REF to req
     SafeCounterHandle m_counter; // Outstanding REQ's
 
+    ArenaHead m_arena;
     Uint32 m_curr_op_ptr_i;
-    DLFifoList<SchemaOp>::Head m_op_list;
+    SchemaOp_head m_op_list;
 
     // Master takeover
     enum TakeoverRecoveryState
@@ -1902,7 +1928,7 @@ private:
     bool m_wait_gcp_on_commit;
 
     // magic is on when record is seized
-    enum { DICT_MAGIC = 0xd1c70002 };
+    enum { DICT_MAGIC = ~RT_DBDICT_SCHEMA_TRANSACTION };
     Uint32 m_magic;
 
     SchemaTrans() {
@@ -1918,7 +1944,7 @@ private:
       bzero(&m_lockReq, sizeof(m_lockReq));
       m_callback.m_callbackFunction = 0;
       m_callback.m_callbackData = 0;
-      m_magic = 0;
+      m_magic = DICT_MAGIC;
       m_obj_id = RNIL;
       m_flush_prepare = false;
       m_flush_commit = false;
@@ -1941,9 +1967,10 @@ private:
   Uint32 check_write_obj(Uint32 objId, Uint32 transId = 0);
   Uint32 check_write_obj(Uint32, Uint32, SchemaFile::EntryState, ErrorInfo&);
 
-  ArrayPool<SchemaTrans> c_schemaTransPool;
-  DLHashTable<SchemaTrans> c_schemaTransHash;
-  DLFifoList<SchemaTrans> c_schemaTransList;
+  typedef RecordPool<SchemaTrans,ArenaPool> SchemaTrans_pool;
+  SchemaTrans_pool c_schemaTransPool;
+  DLHashTable<SchemaTrans,SchemaTrans,SchemaTrans_pool> c_schemaTransHash;
+  DLFifoList<SchemaTrans,SchemaTrans,SchemaTrans_pool> c_schemaTransList;
   Uint32 c_schemaTransCount;
 
   bool seizeSchemaTrans(SchemaTransPtr&, Uint32 trans_key);
@@ -2084,7 +2111,7 @@ private:
       return;
     }
 
-    if (!seizeSchemaOp(op_ptr, t_ptr)) {
+    if (!seizeSchemaOp(trans_ptr, op_ptr, t_ptr)) {
       jam();
       setError(error, SchemaTransImplRef::TooManySchemaOps, __LINE__);
       return;
@@ -2095,9 +2122,6 @@ private:
     DictSignal::setRequestExtra(op_ptr.p->m_requestInfo, requestExtra);
     DictSignal::addRequestFlags(op_ptr.p->m_requestInfo, requestInfo);
 
-    // add op and global flags from trans level
-    addSchemaOp(trans_ptr, op_ptr);
-
     // impl_req was passed via reference
     impl_req = &t_ptr.p->m_request;
 
@@ -2236,10 +2260,13 @@ private:
 
   // MODULE: CreateTable
 
+  struct CreateTableRec;
+  typedef RecordPool<CreateTableRec,ArenaPool> CreateTableRec_pool;
+
   struct CreateTableRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateTableRec>&
+    static CreateTableRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createTableRecPool;
     }
@@ -2279,7 +2306,7 @@ private:
   };
 
   typedef Ptr<CreateTableRec> CreateTableRecPtr;
-  ArrayPool<CreateTableRec> c_createTableRecPool;
+  CreateTableRec_pool c_createTableRecPool;
 
   // OpInfo
   bool createTable_seize(SchemaOpPtr);
@@ -2312,10 +2339,13 @@ private:
 
   // MODULE: DropTable
 
+  struct DropTableRec;
+  typedef RecordPool<DropTableRec,ArenaPool> DropTableRec_pool;
+
   struct DropTableRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::DropTableRec>&
+    static DropTableRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_dropTableRecPool;
     }
@@ -2342,7 +2372,7 @@ private:
   };
 
   typedef Ptr<DropTableRec> DropTableRecPtr;
-  ArrayPool<DropTableRec> c_dropTableRecPool;
+  DropTableRec_pool c_dropTableRecPool;
 
   // OpInfo
   bool dropTable_seize(SchemaOpPtr);
@@ -2375,10 +2405,13 @@ private:
 
   // MODULE: AlterTable
 
+  struct AlterTableRec;
+  typedef RecordPool<AlterTableRec,ArenaPool> AlterTableRec_pool;
+
   struct AlterTableRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::AlterTableRec>&
+    static AlterTableRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_alterTableRecPool;
     }
@@ -2386,7 +2419,7 @@ private:
     AlterTabReq m_request;
 
     // added attributes
-    Uint32 m_newAttrData[2 * MAX_ATTRIBUTES_IN_TABLE];
+    OpSection m_newAttrData;
 
     // wl3600_todo check mutex name and number later
     MutexHandle2<BACKUP_DEFINE_MUTEX> m_define_backup_mutex;
@@ -2422,7 +2455,6 @@ private:
     AlterTableRec() :
       OpRec(g_opInfo, (Uint32*)&m_request) {
       memset(&m_request, 0, sizeof(m_request));
-      memset(&m_newAttrData, 0, sizeof(m_newAttrData));
       m_tablePtr.setNull();
       m_newTablePtr.setNull();
       m_dihAddFragPtr = RNIL;
@@ -2446,7 +2478,7 @@ private:
   };
 
   typedef Ptr<AlterTableRec> AlterTableRecPtr;
-  ArrayPool<AlterTableRec> c_alterTableRecPool;
+  AlterTableRec_pool c_alterTableRecPool;
 
   // OpInfo
   bool alterTable_seize(SchemaOpPtr);
@@ -2503,6 +2535,9 @@ private:
     Uint32 attr_ptr_i;
   } AttributeMap[MAX_ATTRIBUTES_IN_INDEX];
 
+  struct CreateIndexRec;
+  typedef RecordPool<CreateIndexRec,ArenaPool> CreateIndexRec_pool;
+
   struct CreateIndexRec : public OpRec {
     CreateIndxImplReq m_request;
     char m_indexName[MAX_TAB_NAME_SIZE];
@@ -2516,7 +2551,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateIndexRec>&
+    static CreateIndexRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createIndexRecPool;
     }
@@ -2544,7 +2579,7 @@ private:
   };
 
   typedef Ptr<CreateIndexRec> CreateIndexRecPtr;
-  ArrayPool<CreateIndexRec> c_createIndexRecPool;
+  CreateIndexRec_pool c_createIndexRecPool;
 
   // OpInfo
   bool createIndex_seize(SchemaOpPtr);
@@ -2570,13 +2605,16 @@ private:
 
   // MODULE: DropIndex
 
+  struct DropIndexRec;
+  typedef RecordPool<DropIndexRec,ArenaPool> DropIndexRec_pool;
+
   struct DropIndexRec : public OpRec {
     DropIndxImplReq m_request;
 
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::DropIndexRec>&
+    static DropIndexRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_dropIndexRecPool;
     }
@@ -2597,7 +2635,7 @@ private:
   };
 
   typedef Ptr<DropIndexRec> DropIndexRecPtr;
-  ArrayPool<DropIndexRec> c_dropIndexRecPool;
+  DropIndexRec_pool c_dropIndexRecPool;
 
   // OpInfo
   bool dropIndex_seize(SchemaOpPtr);
@@ -2633,6 +2671,9 @@ private:
   static const TriggerTmpl g_buildIndexConstraintTmpl[1];
   static const TriggerTmpl g_reorgTriggerTmpl[1];
 
+  struct AlterIndexRec;
+  typedef RecordPool<AlterIndexRec,ArenaPool> AlterIndexRec_pool;
+
   struct AlterIndexRec : public OpRec {
     AlterIndxImplReq m_request;
     IndexAttributeList m_attrList;
@@ -2641,7 +2682,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::AlterIndexRec>&
+    static AlterIndexRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_alterIndexRecPool;
     }
@@ -2679,7 +2720,7 @@ private:
   };
 
   typedef Ptr<AlterIndexRec> AlterIndexRecPtr;
-  ArrayPool<AlterIndexRec> c_alterIndexRecPool;
+  AlterIndexRec_pool c_alterIndexRecPool;
 
   // OpInfo
   bool alterIndex_seize(SchemaOpPtr);
@@ -2728,10 +2769,13 @@ private:
   // this prepends 1 column used for FRAGMENT in hash index table key
   typedef Id_array<1 + MAX_ATTRIBUTES_IN_INDEX> FragAttributeList;
 
+  struct BuildIndexRec;
+  typedef RecordPool<BuildIndexRec,ArenaPool> BuildIndexRec_pool;
+
   struct BuildIndexRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::BuildIndexRec>&
+    static BuildIndexRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_buildIndexRecPool;
     }
@@ -2764,7 +2808,7 @@ private:
   };
 
   typedef Ptr<BuildIndexRec> BuildIndexRecPtr;
-  ArrayPool<BuildIndexRec> c_buildIndexRecPool;
+  BuildIndexRec_pool c_buildIndexRecPool;
 
   // OpInfo
   bool buildIndex_seize(SchemaOpPtr);
@@ -2804,10 +2848,13 @@ private:
 
   // MODULE: IndexStat
 
+  struct IndexStatRec;
+  typedef RecordPool<IndexStatRec,ArenaPool> IndexStatRec_pool;
+
   struct IndexStatRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::IndexStatRec>&
+    static IndexStatRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_indexStatRecPool;
     }
@@ -2828,7 +2875,7 @@ private:
   };
 
   typedef Ptr<IndexStatRec> IndexStatRecPtr;
-  ArrayPool<IndexStatRec> c_indexStatRecPool;
+  IndexStatRec_pool c_indexStatRecPool;
 
   Uint32 c_indexStatAutoCreate;
   Uint32 c_indexStatAutoUpdate;
@@ -2903,10 +2950,13 @@ private:
   RSS_AP_SNAPSHOT(c_hash_map_pool);
   RSS_AP_SNAPSHOT(g_hash_map);
 
+  struct CreateHashMapRec;
+  typedef RecordPool<CreateHashMapRec,ArenaPool> CreateHashMapRec_pool;
+
   struct CreateHashMapRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateHashMapRec>&
+    static CreateHashMapRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createHashMapRecPool;
     }
@@ -2920,7 +2970,7 @@ private:
   };
 
   typedef Ptr<CreateHashMapRec> CreateHashMapRecPtr;
-  ArrayPool<CreateHashMapRec> c_createHashMapRecPool;
+  CreateHashMapRec_pool c_createHashMapRecPool;
   void execCREATE_HASH_MAP_REQ(Signal* signal);
 
   // OpInfo
@@ -2944,10 +2994,13 @@ private:
 
   // MODULE: CopyData
 
+  struct CopyDataRec;
+  typedef RecordPool<CopyDataRec,ArenaPool> CopyDataRec_pool;
+
   struct CopyDataRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CopyDataRec>&
+    static CopyDataRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_copyDataRecPool;
     }
@@ -2961,7 +3014,7 @@ private:
   };
 
   typedef Ptr<CopyDataRec> CopyDataRecPtr;
-  ArrayPool<CopyDataRec> c_copyDataRecPool;
+  CopyDataRec_pool c_copyDataRecPool;
   void execCOPY_DATA_REQ(Signal* signal);
   void execCOPY_DATA_REF(Signal* signal);
   void execCOPY_DATA_CONF(Signal* signal);
@@ -3106,10 +3159,14 @@ private:
   typedef Ptr<OpDropEvent> OpDropEventPtr;
 
   // MODULE: CreateTrigger
+
+  struct CreateTriggerRec;
+  typedef RecordPool<CreateTriggerRec,ArenaPool> CreateTriggerRec_pool;
+
   struct CreateTriggerRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateTriggerRec>&
+    static CreateTriggerRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createTriggerRecPool;
     }
@@ -3136,7 +3193,7 @@ private:
   };
 
   typedef Ptr<CreateTriggerRec> CreateTriggerRecPtr;
-  ArrayPool<CreateTriggerRec> c_createTriggerRecPool;
+  CreateTriggerRec_pool c_createTriggerRecPool;
 
   // OpInfo
   bool createTrigger_seize(SchemaOpPtr);
@@ -3167,10 +3224,13 @@ private:
 
   // MODULE: DropTrigger
 
+  struct DropTriggerRec;
+  typedef RecordPool<DropTriggerRec,ArenaPool> DropTriggerRec_pool;
+
   struct DropTriggerRec : public OpRec {
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::DropTriggerRec>&
+    static DropTriggerRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_dropTriggerRecPool;
     }
@@ -3195,7 +3255,7 @@ private:
   };
 
   typedef Ptr<DropTriggerRec> DropTriggerRecPtr;
-  ArrayPool<DropTriggerRec> c_dropTriggerRecPool;
+  DropTriggerRec_pool c_dropTriggerRecPool;
 
   // OpInfo
   bool dropTrigger_seize(SchemaOpPtr);
@@ -3223,6 +3283,9 @@ private:
 
   // MODULE: CreateFilegroup
 
+  struct CreateFilegroupRec;
+  typedef RecordPool<CreateFilegroupRec,ArenaPool> CreateFilegroupRec_pool;
+
   struct CreateFilegroupRec : public OpRec {
     bool m_parsed, m_prepared;
     CreateFilegroupImplReq m_request;
@@ -3231,7 +3294,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateFilegroupRec>&
+    static CreateFilegroupRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createFilegroupRecPool;
     }
@@ -3245,7 +3308,7 @@ private:
   };
 
   typedef Ptr<CreateFilegroupRec> CreateFilegroupRecPtr;
-  ArrayPool<CreateFilegroupRec> c_createFilegroupRecPool;
+  CreateFilegroupRec_pool c_createFilegroupRecPool;
 
   // OpInfo
   bool createFilegroup_seize(SchemaOpPtr);
@@ -3268,6 +3331,9 @@ private:
 
   // MODULE: CreateFile
 
+  struct CreateFileRec;
+  typedef RecordPool<CreateFileRec,ArenaPool> CreateFileRec_pool;
+
   struct CreateFileRec : public OpRec {
     bool m_parsed, m_prepared;
     CreateFileImplReq m_request;
@@ -3276,7 +3342,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateFileRec>&
+    static CreateFileRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createFileRecPool;
     }
@@ -3290,7 +3356,7 @@ private:
   };
 
   typedef Ptr<CreateFileRec> CreateFileRecPtr;
-  ArrayPool<CreateFileRec> c_createFileRecPool;
+  CreateFileRec_pool c_createFileRecPool;
 
   // OpInfo
   bool createFile_seize(SchemaOpPtr);
@@ -3313,6 +3379,9 @@ private:
 
   // MODULE: DropFilegroup
 
+  struct DropFilegroupRec;
+  typedef RecordPool<DropFilegroupRec,ArenaPool> DropFilegroupRec_pool;
+
   struct DropFilegroupRec : public OpRec {
     bool m_parsed, m_prepared;
     DropFilegroupImplReq m_request;
@@ -3320,7 +3389,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::DropFilegroupRec>&
+    static DropFilegroupRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_dropFilegroupRecPool;
     }
@@ -3333,7 +3402,7 @@ private:
   };
 
   typedef Ptr<DropFilegroupRec> DropFilegroupRecPtr;
-  ArrayPool<DropFilegroupRec> c_dropFilegroupRecPool;
+  DropFilegroupRec_pool c_dropFilegroupRecPool;
 
   // OpInfo
   bool dropFilegroup_seize(SchemaOpPtr);
@@ -3355,6 +3424,9 @@ private:
 
   // MODULE: DropFile
 
+  struct DropFileRec;
+  typedef RecordPool<DropFileRec,ArenaPool> DropFileRec_pool;
+
   struct DropFileRec : public OpRec {
     bool m_parsed, m_prepared;
     DropFileImplReq m_request;
@@ -3362,7 +3434,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::DropFileRec>&
+    static DropFileRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_dropFileRecPool;
     }
@@ -3375,7 +3447,7 @@ private:
   };
 
   typedef Ptr<DropFileRec> DropFileRecPtr;
-  ArrayPool<DropFileRec> c_dropFileRecPool;
+  DropFileRec_pool c_dropFileRecPool;
 
   // OpInfo
   bool dropFile_seize(SchemaOpPtr);
@@ -3397,6 +3469,9 @@ private:
 
   // MODULE: CreateNodegroup
 
+  struct CreateNodegroupRec;
+  typedef RecordPool<CreateNodegroupRec,ArenaPool> CreateNodegroupRec_pool;
+
   struct CreateNodegroupRec : public OpRec {
     bool m_map_created;
     CreateNodegroupImplReq m_request;
@@ -3404,7 +3479,7 @@ private:
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::CreateNodegroupRec>&
+    static CreateNodegroupRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_createNodegroupRecPool;
     }
@@ -3432,7 +3507,7 @@ private:
   };
 
   typedef Ptr<CreateNodegroupRec> CreateNodegroupRecPtr;
-  ArrayPool<CreateNodegroupRec> c_createNodegroupRecPool;
+  CreateNodegroupRec_pool c_createNodegroupRecPool;
 
   // OpInfo
   void execCREATE_NODEGROUP_REQ(Signal*);
@@ -3465,13 +3540,16 @@ private:
 
   // MODULE: DropNodegroup
 
+  struct DropNodegroupRec;
+  typedef RecordPool<DropNodegroupRec,ArenaPool> DropNodegroupRec_pool;
+
   struct DropNodegroupRec : public OpRec {
     DropNodegroupImplReq m_request;
 
     // reflection
     static const OpInfo g_opInfo;
 
-    static ArrayPool<Dbdict::DropNodegroupRec>&
+    static DropNodegroupRec_pool&
     getPool(Dbdict* dict) {
       return dict->c_dropNodegroupRecPool;
     }
@@ -3498,7 +3576,7 @@ private:
   };
 
   typedef Ptr<DropNodegroupRec> DropNodegroupRecPtr;
-  ArrayPool<DropNodegroupRec> c_dropNodegroupRecPool;
+  DropNodegroupRec_pool c_dropNodegroupRecPool;
 
   // OpInfo
   void execDROP_NODEGROUP_REQ(Signal*);
@@ -3646,7 +3724,7 @@ private:
   
   void writeTableFile(Signal* signal, Uint32 tableId, 
 		      SegmentedSectionPtr tabInfo, Callback*);
-  void writeTableFile(Signal* signal, Uint32 tableId,
+  void writeTableFile(Signal* signal, SchemaOpPtr op_ptr, Uint32 tableId,
 		      OpSection opSection, Callback*);
   void startWriteTableFile(Signal* signal, Uint32 tableId);
   void openTableFile(Signal* signal, 
@@ -3941,6 +4019,8 @@ public:
 
 protected:
   virtual bool getParam(const char * param, Uint32 * retVal);
+private:
+  ArenaAllocator c_arenaAllocator;
 };
 
 inline bool

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-09-29 10:20:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-18 12:45:50 +0000
@@ -11338,6 +11338,7 @@ void Dbdih::initLcpLab(Signal* signal, U
     /**
      * For each fragment
      */
+    Uint32 sumReplicaCount = 0;
     for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
       jam();
       FragmentstorePtr fragPtr;
@@ -11369,8 +11370,18 @@ void Dbdih::initLcpLab(Signal* signal, U
       }
       
       fragPtr.p->noLcpReplicas = replicaCount;
+      sumReplicaCount += replicaCount;
     }//for
-    
+
+    if (sumReplicaCount == 0)
+    {
+      jam();
+      /**
+       * Table had no active replica...don't include it in LCP
+       */
+      tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+    }
+
     signal->theData[0] = DihContinueB::ZINIT_LCP;
     signal->theData[1] = senderRef;
     signal->theData[2] = tabPtr.i + 1;
@@ -13864,6 +13875,7 @@ void Dbdih::startNextChkpt(Signal* signa
   NdbNodeBitmask busyNodes; 
   busyNodes.clear();
   const Uint32 lcpNodes = c_lcpState.m_participatingLQH.count();
+  ndbout_c("startNextChkpt: %u lcpNodes=%u", lcpId, lcpNodes);
   
   bool save = true;
   LcpState::CurrentFragment curr = c_lcpState.currentFragment;
@@ -13924,6 +13936,8 @@ void Dbdih::startNextChkpt(Signal* signa
 	    nodePtr.p->noOfStartedChkpt = i + 1;
 	    
 	    sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
+            ndbout_c("  sendLCP_FRAG_ORD node: %u tab: %u frag: %u", 
+                     nodePtr.i, tabPtr.i, curr.fragmentId);
 	  } 
 	  else if (nodePtr.p->noOfQueuedChkpt < 2) 
 	  {
@@ -13966,6 +13980,7 @@ void Dbdih::startNextChkpt(Signal* signa
 	       * backlog have performed more checkpoints. We will return and 
 	       * will not continue the process of starting any more checkpoints.
 	       */
+              ndbout_c("  busyNodes == lcpNodes => return");
 	      return;
 	    }//if
 	  }//if
@@ -14017,12 +14032,14 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal*
       CRASH_INSERTION(7193);
       BlockReference ref = calcLqhBlockRef(nodePtr.i);
       sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
+      ndbout_c("  => sendLastLCP_FRAG_ORD");
 
       if (c_lcpState.m_local_lcp && c_lcpState.m_cnt_lcp_frag_ord == 0)
       {
         jam();
         c_lcpState.setLcpStatus(LCP_TAB_SAVED, __LINE__);
         sendLCP_COMPLETE_REP(signal);
+        ndbout_c("  => sendLCP_COMPLETE_REP (llcp)");
       }
     }
   }
@@ -14523,6 +14540,10 @@ void Dbdih::checkStartMoreLcp(Signal* si
     //-------------------------------------------------------------------
     nodePtr.p->noOfStartedChkpt = i + 1;
     
+    ndbout_c("checkStartMoreLcp => sendLCP_FRAG_ORD node: %u tab: %u frag: %u",
+             nodePtr.i,
+             nodePtr.p->startedChkpt[i].tableId,
+             nodePtr.p->startedChkpt[i].fragId);
     sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
   }
 

=== modified file 'storage/ndb/src/kernel/blocks/dbinfo/Dbinfo.cpp'
--- a/storage/ndb/src/kernel/blocks/dbinfo/Dbinfo.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbinfo/Dbinfo.cpp	2011-10-07 08:07:21 +0000
@@ -29,7 +29,7 @@
 
 Uint32 dbinfo_blocks[] = { DBACC, DBTUP, BACKUP, DBTC, SUMA, DBUTIL,
                            TRIX, DBTUX, DBDICT, CMVMI, DBLQH, LGMAN,
-                           PGMAN, DBSPJ, 0};
+                           PGMAN, DBSPJ, THRMAN, 0};
 
 Dbinfo::Dbinfo(Block_context& ctx) :
   SimulatedBlock(DBINFO, ctx)

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-10-13 09:51:01 +0000
@@ -2687,6 +2687,7 @@ private:
 
   bool validate_filter(Signal*);
   bool match_and_print(Signal*, Ptr<TcConnectionrec>);
+  void ndbinfo_write_op(Ndbinfo::Row&, TcConnectionrecPtr tcPtr);
 
   void define_backup(Signal*);
   void execDEFINE_BACKUP_REF(Signal*);

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-09-29 10:20:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-18 12:45:50 +0000
@@ -23394,6 +23394,43 @@ void Dblqh::execDBINFO_SCANREQ(Signal *s
     }
     break;
   }
+  case Ndbinfo::OPERATIONS_TABLEID:{
+    Uint32 bucket = cursor->data[0];
+
+    while (true)
+    {
+      if (rl.need_break(req))
+      {
+        jam();
+        ndbinfo_send_scan_break(signal, req, rl, bucket);
+        return;
+      }
+
+      for (; bucket < NDB_ARRAY_SIZE(ctransidHash); bucket++)
+      {
+        if (ctransidHash[bucket] != RNIL)
+          break;
+      }
+
+      if (bucket == NDB_ARRAY_SIZE(ctransidHash))
+      {
+        break;
+      }
+
+      TcConnectionrecPtr tcPtr;
+      tcPtr.i = ctransidHash[bucket];
+      while (tcPtr.i != RNIL)
+      {
+        jam();
+        ptrCheckGuard(tcPtr, ctcConnectrecFileSize, tcConnectionrec);
+        Ndbinfo::Row row(signal, req);
+        ndbinfo_write_op(row, tcPtr);
+        ndbinfo_send_row(signal, req, row, rl);
+        tcPtr.i = tcPtr.p->nextHashRec;
+      }
+      bucket++;
+    }
+  }
 
   default:
     break;
@@ -23402,6 +23439,75 @@ void Dblqh::execDBINFO_SCANREQ(Signal *s
   ndbinfo_send_scan_conf(signal, req, rl);
 }
 
+void
+Dblqh::ndbinfo_write_op(Ndbinfo::Row & row, TcConnectionrecPtr tcPtr)
+{
+  row.write_uint32(getOwnNodeId());
+  row.write_uint32(instance());          // block instance
+  row.write_uint32(tcPtr.i);             // objid
+  row.write_uint32(tcPtr.p->tcBlockref); // tcref
+  row.write_uint32(tcPtr.p->applRef);    // apiref
+
+  row.write_uint32(tcPtr.p->transid[0]);
+  row.write_uint32(tcPtr.p->transid[1]);
+  row.write_uint32(tcPtr.p->tableref);
+  row.write_uint32(tcPtr.p->fragmentid);
+
+  if (tcPtr.p->tcScanRec != RNIL)
+  {
+    ScanRecordPtr sp;
+    sp.i = tcPtr.p->tcScanRec;
+    c_scanRecordPool.getPtr(sp);
+
+    Uint32 op = NDB_INFO_OP_SCAN_UNKNOWN;
+    if (sp.p->scanLockMode)
+      op = NDB_INFO_OP_SCAN_EX;
+    else if (sp.p->scanLockHold)
+      op = NDB_INFO_OP_SCAN_SH;
+    else
+      op = NDB_INFO_OP_SCAN;
+
+    row.write_uint32(op);
+    row.write_uint32(sp.p->scanState);
+    row.write_uint32(0);
+  }
+  else
+  {
+    Uint32 op = NDB_INFO_OP_UNKNOWN;
+    switch(tcPtr.p->operation){
+    case ZREAD:
+      if (tcPtr.p->lockType)
+	op = NDB_INFO_OP_READ_EX;
+      else if (!tcPtr.p->dirtyOp)
+	op = NDB_INFO_OP_READ_SH;
+      else
+        op = NDB_INFO_OP_READ;
+      break;
+    case ZINSERT:
+      op = NDB_INFO_OP_INSERT;
+      break;
+    case ZUPDATE:
+      op = NDB_INFO_OP_UPDATE;
+      break;
+    case ZDELETE:
+      op = NDB_INFO_OP_DELETE;
+      break;
+    case ZWRITE:
+      op = NDB_INFO_OP_WRITE;
+      break;
+    case ZUNLOCK:
+      op = NDB_INFO_OP_UNLOCK;
+      break;
+    case ZREFRESH:
+      op = NDB_INFO_OP_REFRESH;
+      break;
+    }
+    row.write_uint32(op);
+    row.write_uint32(tcPtr.p->transactionState);
+    row.write_uint32(0);
+  }
+}
+
 
 /* **************************************************************** */
 /* ---------------------------------------------------------------- */

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhProxy.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhProxy.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhProxy.cpp	2011-10-07 08:07:21 +0000
@@ -690,7 +690,7 @@ DblqhProxy::completeLCP_2(Signal* signal
    *   that will checkpoint extent-pages
    */
   // NOTE: ugly to use MaxLqhWorkers directly
-  Uint32 instance = MaxLqhWorkers + 1;
+  Uint32 instance = c_workers + 1;
   sendSignal(numberToRef(PGMAN, instance, getOwnNodeId()),
              GSN_END_LCP_REQ, signal, EndLcpReq::SignalLength, JBB);
 }

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-08-22 08:35:35 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-09-29 11:43:27 +0000
@@ -580,6 +580,8 @@ public:
     Uint32 m_fragCount;
     // The number of fragments that we scan in parallel.
     Uint32 m_parallelism;
+    // True if we are still receiving the first batch for this operation.
+    bool   m_firstBatch;
     /**
      * True if this is the first instantiation of this operation. A child
      * operation will be instantiated once for each batch of its parent.
@@ -1229,7 +1231,6 @@ private:
   void scanIndex_execSCAN_FRAGCONF(Signal*, Ptr<Request>, Ptr<TreeNode>, Ptr<ScanFragHandle>);
   void scanIndex_parent_row(Signal*,Ptr<Request>,Ptr<TreeNode>, const RowPtr&);
   void scanIndex_fixupBound(Ptr<ScanFragHandle> fragPtr, Uint32 ptrI, Uint32);
-  void scanIndex_send(Signal*,Ptr<Request>,Ptr<TreeNode>);
   void scanIndex_send(Signal* signal,
                       Ptr<Request> requestPtr,
                       Ptr<TreeNode> treeNodePtr,

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-23 07:43:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-29 11:43:27 +0000
@@ -5023,6 +5023,7 @@ Dbspj::scanIndex_parent_batch_complete(S
   const ScanFragReq * org = (const ScanFragReq*)data.m_scanFragReq;
   ndbrequire(org->batch_size_rows > 0);
 
+  data.m_firstBatch = true;
   if (treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL)
   {
     jam();
@@ -5171,6 +5172,9 @@ Dbspj::scanIndex_send(Signal* signal,
                       Uint32 bs_rows,
                       Uint32& batchRange)
 {
+  jam();
+  ndbassert(bs_bytes > 0);
+  ndbassert(bs_rows > 0);
   /**
    * if (m_bits & prunemask):
    * - Range keys sliced out to each ScanFragHandle
@@ -5451,6 +5455,9 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
 
   if (data.m_frags_outstanding == 0)
   {
+    const bool isFirstBatch = data.m_firstBatch;
+    data.m_firstBatch = false;
+
     const ScanFragReq * const org
       = reinterpret_cast<const ScanFragReq*>(data.m_scanFragReq);
 
@@ -5486,24 +5493,78 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
     {
       jam();
       ndbrequire((requestPtr.p->m_state & Request::RS_ABORTING) != 0);
-    }
-    else if (! (data.m_rows_received == data.m_rows_expecting))
-    {
-      jam();
+      checkBatchComplete(signal, requestPtr, 1);
       return;
     }
-    else
+
+    if (isFirstBatch && data.m_frags_not_started > 0)
     {
-      if (treeNodePtr.p->m_bits & TreeNode::T_REPORT_BATCH_COMPLETE)
+      /**
+       * Check if we can expect to be able to fetch the entire result set by
+       * asking for more fragments within the same batch. This may improve 
+       * performance for bushy scans, as subsequent bushy branches must be
+       * re-executed for each batch of this scan.
+       */
+      
+      /**
+       * Find the maximal correlation value that we may have seen so far.
+       * Correlation value must be unique within batch and smaller than 
+       * org->batch_size_rows.
+       */
+      const Uint32 maxCorrVal = (data.m_totalRows) == 0 ? 0 :
+        org->batch_size_rows / data.m_parallelism * (data.m_parallelism - 1)
+        + data.m_totalRows;
+      
+      // Number of rows that we can still fetch in this batch.
+      const Int32 remainingRows 
+        = static_cast<Int32>(org->batch_size_rows - maxCorrVal);
+      
+      if (remainingRows >= data.m_frags_not_started &&
+          /**
+           * Check that (remaning row capacity)/(remaining fragments) is 
+           * greater or equal to (rows read so far)/(finished fragments).
+           */
+          remainingRows * static_cast<Int32>(data.m_parallelism) >=
+          static_cast<Int32>(data.m_totalRows * data.m_frags_not_started) &&
+          (org->batch_size_bytes - data.m_totalBytes) * data.m_parallelism >=
+          data.m_totalBytes * data.m_frags_not_started)
       {
         jam();
-        reportBatchComplete(signal, requestPtr, treeNodePtr);
+        Uint32 batchRange = maxCorrVal;
+        DEBUG("::scanIndex_execSCAN_FRAGCONF() first batch was not full."
+              " Asking for new batches from " << data.m_frags_not_started <<
+              " fragments with " << 
+              remainingRows / data.m_frags_not_started 
+              <<" rows and " << 
+              (org->batch_size_bytes - data.m_totalBytes)
+              / data.m_frags_not_started 
+              << " bytes.");
+        scanIndex_send(signal,
+                       requestPtr,
+                       treeNodePtr,
+                       data.m_frags_not_started,
+                       (org->batch_size_bytes - data.m_totalBytes)
+                       / data.m_frags_not_started,
+                       remainingRows / data.m_frags_not_started,
+                       batchRange);
+        return;
       }
     }
+    
+    if (data.m_rows_received != data.m_rows_expecting)
+    {
+      jam();
+      return;
+    }
+    
+    if (treeNodePtr.p->m_bits & TreeNode::T_REPORT_BATCH_COMPLETE)
+    {
+      jam();
+      reportBatchComplete(signal, requestPtr, treeNodePtr);
+    }
 
     checkBatchComplete(signal, requestPtr, 1);
-    return;
-  }
+  } // if (data.m_frags_outstanding == 0)
 }
 
 void

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjProxy.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjProxy.hpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjProxy.hpp	2011-10-07 08:07:21 +0000
@@ -24,7 +24,6 @@ public:
   virtual ~DbspjProxy();
   BLOCK_DEFINES(DbspjProxy);
 
-  virtual void loadWorkers() { tc_loadWorkers(); }
 protected:
   virtual SimulatedBlock* newWorker(Uint32 instanceNo);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-10-13 09:51:01 +0000
@@ -157,9 +157,6 @@ public:
     CS_DISCONNECTED = 1,
     CS_STARTED = 2,
     CS_RECEIVING = 3,
-    CS_PREPARED = 4,
-    CS_START_PREPARING = 5,
-    CS_REC_PREPARING = 6,
     CS_RESTART = 7,
     CS_ABORTING = 8,
     CS_COMPLETING = 9,
@@ -1989,6 +1986,7 @@ private:
 
   bool validate_filter(Signal*);
   bool match_and_print(Signal*, ApiConnectRecordPtr);
+  bool ndbinfo_write_trans(Ndbinfo::Row&, ApiConnectRecordPtr);
 
 #ifdef ERROR_INSERT
   bool testFragmentDrop(Signal* signal);

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-09-15 06:02:57 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-17 13:32:49 +0000
@@ -106,9 +106,6 @@ operator<<(NdbOut& out, Dbtc::Connection
   case Dbtc::CS_DISCONNECTED: out << "CS_DISCONNECTED"; break;
   case Dbtc::CS_STARTED: out << "CS_STARTED"; break;
   case Dbtc::CS_RECEIVING: out << "CS_RECEIVING"; break;
-  case Dbtc::CS_PREPARED: out << "CS_PREPARED"; break;
-  case Dbtc::CS_START_PREPARING: out << "CS_START_PREPARING"; break;
-  case Dbtc::CS_REC_PREPARING: out << "CS_REC_PREPARING"; break;
   case Dbtc::CS_RESTART: out << "CS_RESTART"; break;
   case Dbtc::CS_ABORTING: out << "CS_ABORTING"; break;
   case Dbtc::CS_COMPLETING: out << "CS_COMPLETING"; break;
@@ -1055,17 +1052,6 @@ Dbtc::handleFailedApiNode(Signal* signal
         abort010Lab(signal);
         TloopCount = 256;
         break;
-      case CS_PREPARED:
-        jam();
-      case CS_REC_PREPARING:
-        jam();
-      case CS_START_PREPARING:
-        jam();
-        /*********************************************************************/
-        // Not implemented yet.
-        /*********************************************************************/
-        systemErrorLab(signal, __LINE__);
-        break;
       case CS_RESTART:
         jam();
       case CS_COMPLETING:
@@ -6582,16 +6568,6 @@ void Dbtc::execTC_COMMITREQ(Signal* sign
       /***********************************************************************/
       errorCode = ZSCANINPROGRESS;
       break;
-    case CS_PREPARED:
-      jam();
-      return;
-    case CS_START_PREPARING:
-      jam();
-      return;
-    case CS_REC_PREPARING:
-      jam();
-      return;
-      break;
     default:
       warningHandlerLab(signal, __LINE__);
       return;
@@ -6709,12 +6685,6 @@ void Dbtc::execTCROLLBACKREQ(Signal* sig
     jam();
     apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
     break;
-  case CS_START_PREPARING:
-    jam();
-  case CS_PREPARED:
-    jam();
-  case CS_REC_PREPARING:
-    jam();
   default:
     goto TC_ROLL_system_error;
     break;
@@ -7739,12 +7709,6 @@ void Dbtc::timeOutFoundLab(Signal* signa
     jam();
   case CS_FAIL_COMMITTED:
     jam();
-  case CS_REC_PREPARING:
-    jam();
-  case CS_START_PREPARING:
-    jam();
-  case CS_PREPARED:
-    jam();
   case CS_RESTART:
     jam();
   case CS_FAIL_ABORTED:
@@ -13324,15 +13288,122 @@ void Dbtc::execDBINFO_SCANREQ(Signal *si
 
     break;
   }
+  case Ndbinfo::TRANSACTIONS_TABLEID:{
+    ApiConnectRecordPtr ptr;
+    ptr.i = cursor->data[0];
+    const Uint32 maxloop = 256;
+    for (Uint32 i = 0; i < maxloop; i++)
+    {
+      ptrCheckGuard(ptr, capiConnectFilesize, apiConnectRecord);
+      Ndbinfo::Row row(signal, req);
+      if (ndbinfo_write_trans(row, ptr))
+      {
+        jam();
+        ndbinfo_send_row(signal, req, row, rl);
+      }
 
+      ptr.i ++;
+      if (ptr.i == capiConnectFilesize)
+      {
+        goto done;
+      }
+      else if (rl.need_break(req))
+      {
+        break;
+      }
+    }
+    ndbinfo_send_scan_break(signal, req, rl, ptr.i);
+    return;
+  }
   default:
     break;
   }
 
+done:
   ndbinfo_send_scan_conf(signal, req, rl);
 }
 
 bool
+Dbtc::ndbinfo_write_trans(Ndbinfo::Row & row, ApiConnectRecordPtr transPtr)
+{
+  Uint32 conState = transPtr.p->apiConnectstate;
+
+  if (conState == CS_ABORTING && transPtr.p->abortState == AS_IDLE)
+  {
+    /**
+     * These is for all practical purposes equal
+     */
+    conState = CS_CONNECTED;
+  }
+
+  if (conState == CS_CONNECTED ||
+      conState == CS_DISCONNECTED ||
+      conState == CS_RESTART)
+  {
+    return false;
+  }
+
+  row.write_uint32(getOwnNodeId());
+  row.write_uint32(instance());   // block instance
+  row.write_uint32(transPtr.i);
+  row.write_uint32(transPtr.p->ndbapiBlockref);
+  row.write_uint32(transPtr.p->transid[0]);
+  row.write_uint32(transPtr.p->transid[1]);
+  row.write_uint32(conState);
+  row.write_uint32(transPtr.p->m_flags);
+  row.write_uint32(transPtr.p->lqhkeyreqrec);
+  Uint32 outstanding = 0;
+  switch((ConnectionState)conState) {
+  case CS_CONNECTED:
+  case CS_DISCONNECTED:
+    break;
+  case CS_STARTED:
+  case CS_RECEIVING:
+  case CS_REC_COMMITTING:
+  case CS_START_COMMITTING:
+  case CS_SEND_FIRE_TRIG_REQ:
+  case CS_WAIT_FIRE_TRIG_REQ:
+    outstanding = transPtr.p->lqhkeyreqrec - transPtr.p->lqhkeyconfrec;
+    break;
+  case CS_COMMITTING:
+  case CS_COMPLETING:
+  case CS_COMMIT_SENT:
+  case CS_COMPLETE_SENT:
+  case CS_ABORTING:
+    outstanding = transPtr.p->counter;
+    break;
+  case CS_PREPARE_TO_COMMIT:
+    break;
+  case CS_START_SCAN:
+    // TODO
+    break;
+  case CS_WAIT_ABORT_CONF:
+  case CS_WAIT_COMMIT_CONF:
+  case CS_WAIT_COMPLETE_CONF:
+    // not easily computed :-(
+    break;
+  case CS_FAIL_PREPARED:
+  case CS_FAIL_ABORTED:
+    // we're assembling a state...
+    break;
+  case CS_FAIL_COMMITTING:
+  case CS_FAIL_COMMITTED:
+  case CS_FAIL_ABORTING:
+  case CS_FAIL_COMPLETED:
+    // not easily computed :_(
+    break;
+  case CS_RESTART:
+    break;
+  }
+
+  row.write_uint32(outstanding);
+
+  Uint32 apiTimer = getApiConTimer(transPtr.i);
+  row.write_uint32(apiTimer ? (ctcTimer - apiTimer) / 100 : 0);
+  return true;
+}
+
+bool
 Dbtc::validate_filter(Signal* signal)
 {
   Uint32 * start = signal->theData + 1;
@@ -13464,9 +13535,6 @@ Dbtc::match_and_print(Signal* signal, Ap
   case CS_FAIL_PREPARED:
   case CS_FAIL_COMMITTING:
   case CS_FAIL_COMMITTED:
-  case CS_REC_PREPARING:
-  case CS_START_PREPARING:
-  case CS_PREPARED:
   case CS_RESTART:
   case CS_FAIL_ABORTED:
   case CS_DISCONNECTED:

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp	2011-10-07 08:07:21 +0000
@@ -37,7 +37,6 @@ public:
   virtual ~DbtcProxy();
   BLOCK_DEFINES(DbtcProxy);
 
-  virtual void loadWorkers() { tc_loadWorkers(); }
 protected:
   virtual SimulatedBlock* newWorker(Uint32 instanceNo);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-09-01 18:42:31 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-10-07 16:12:13 +0000
@@ -579,6 +579,9 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
   // for md5 of key (could maybe reuse existing temp buffer)
   Uint64 c_dataBuffer[ZWORDS_ON_PAGE/2 + 1];
 
+  // Crash the node when a tuple got corrupted
+  bool c_crashOnCorruptedTuple;
+
   struct Page_request 
   {
     Page_request() {}
@@ -2894,6 +2897,7 @@ private:
   
   Uint32 calculateChecksum(Tuple_header*, Tablerec* regTabPtr);
   void setChecksum(Tuple_header*, Tablerec* regTabPtr);
+  int corruptedTupleDetected(KeyReqStruct*);
 
   void complexTrigger(Signal* signal,
                       KeyReqStruct *req_struct,

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-25 13:19:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-10-07 16:12:13 +0000
@@ -166,6 +166,20 @@ Dbtup::calculateChecksum(Tuple_header* t
   return checksum;
 }
 
+int
+Dbtup::corruptedTupleDetected(KeyReqStruct *req_struct)
+{
+  ndbout_c("Tuple corruption detected."); 
+  if (c_crashOnCorruptedTuple)
+  {
+    ndbout_c(" Exiting."); 
+    ndbrequire(false);
+  }
+  terrorCode= ZTUPLE_CORRUPTED_ERROR;
+  tupkeyErrorLab(req_struct);
+  return -1;
+}
+
 /* ----------------------------------------------------------------- */
 /* -----------       INSERT_ACTIVE_OP_LIST            -------------- */
 /* ----------------------------------------------------------------- */
@@ -1014,10 +1028,7 @@ int Dbtup::handleReadReq(Signal* signal,
   if ((regTabPtr->m_bits & Tablerec::TR_Checksum) &&
       (calculateChecksum(req_struct->m_tuple_ptr, regTabPtr) != 0)) {
     jam();
-    ndbout_c("here2");
-    terrorCode= ZTUPLE_CORRUPTED_ERROR;
-    tupkeyErrorLab(req_struct);
-    return -1;
+    return corruptedTupleDetected(req_struct);
   }
 
   const Uint32 node = refToNode(sendBref);
@@ -1139,8 +1150,8 @@ int Dbtup::handleUpdateReq(Signal* signa
   if ((regTabPtr->m_bits & Tablerec::TR_Checksum) &&
       (calculateChecksum(req_struct->m_tuple_ptr, regTabPtr) != 0)) 
   {
-    terrorCode= ZTUPLE_CORRUPTED_ERROR;
-    goto error;
+    jam();
+    return corruptedTupleDetected(req_struct);
   }
 
   req_struct->m_tuple_ptr= dst;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-05-17 23:29:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-10-07 16:12:13 +0000
@@ -497,6 +497,14 @@ void Dbtup::execREAD_CONFIG_REQ(Signal*
   }
   
   initialiseRecordsLab(signal, 0, ref, senderData);
+
+  {
+    Uint32 val = 0;
+    ndb_mgm_get_int_parameter(p, CFG_DB_CRASH_ON_CORRUPTED_TUPLE,
+                              &val);
+    c_crashOnCorruptedTuple = val ? true : false;
+  }
+
 }//Dbtup::execSIZEALT_REP()
 
 void Dbtup::initRecords() 

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-10-13 09:02:21 +0000
@@ -842,6 +842,8 @@ public:
   static Uint32 mt_buildIndexFragment_wrapper(void*);
 private:
   Uint32 mt_buildIndexFragment(struct mt_BuildIndxCtx*);
+
+  Signal* c_signal_bug32040;
 };
 
 // Dbtux::TupLoc

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp	2011-10-13 09:02:21 +0000
@@ -90,6 +90,8 @@ Dbtux::Dbtux(Block_context& ctx, Uint32
   addRecSignal(GSN_NODE_STATE_REP, &Dbtux::execNODE_STATE_REP, true);
 
   addRecSignal(GSN_DROP_FRAG_REQ, &Dbtux::execDROP_FRAG_REQ);
+
+  c_signal_bug32040 = 0;
 }
 
 Dbtux::~Dbtux()
@@ -152,6 +154,7 @@ Dbtux::execSTTOR(Signal* signal)
     CLEAR_ERROR_INSERT_VALUE;
     c_tup = (Dbtup*)globalData.getBlock(DBTUP, instance());
     ndbrequire(c_tup != 0);
+    c_signal_bug32040 = signal;
     break;
   case 3:
     jam();

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp	2011-10-13 09:02:21 +0000
@@ -911,7 +911,11 @@ Dbtux::scanNext(ScanOpPtr scanPtr, bool
   }
 #endif
   // cannot be moved away from tuple we have locked
+#if defined VM_TRACE || defined ERROR_INSERT
   ndbrequire(scan.m_state != ScanOp::Locked);
+#else
+  ndbrequire(fromMaintReq || scan.m_state != ScanOp::Locked);
+#endif
   // scan direction
   const unsigned idir = scan.m_descending; // 0, 1
   const int jdir = 1 - 2 * (int)idir;      // 1, -1
@@ -921,6 +925,24 @@ Dbtux::scanNext(ScanOpPtr scanPtr, bool
   NodeHandle origNode(frag);
   selectNode(origNode, pos.m_loc);
   ndbrequire(islinkScan(origNode, scanPtr));
+  if (unlikely(scan.m_state == ScanOp::Locked)) {
+    // bug#32040 - no fix, just unlock and continue
+    jam();
+    if (scan.m_accLockOp != RNIL) {
+      jam();
+      Signal* signal = c_signal_bug32040;
+      AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+      lockReq->returnCode = RNIL;
+      lockReq->requestInfo = AccLockReq::Abort;
+      lockReq->accOpPtr = scan.m_accLockOp;
+      EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+      jamEntry();
+      ndbrequire(lockReq->returnCode == AccLockReq::Success);
+      scan.m_accLockOp = RNIL;
+      scan.m_lockwait = false;
+    }
+    scan.m_state = ScanOp::Next;
+  }
   // current node in loop
   NodeHandle node = origNode;
   // copy of entry found

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-10-17 13:37:47 +0000
@@ -149,8 +149,9 @@ public:
   // schema trans
   Uint32 c_schemaTransId;
   Uint32 c_schemaTransKey;
-  Uint32 c_hashMapId;
-  Uint32 c_hashMapVersion;
+  // intersignal transient store of: hash_map, logfilegroup, tablesspace
+  Uint32 c_objectId; 
+  Uint32 c_objectVersion;;
 
 public:
   Ndbcntr(Block_context&);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-10-17 13:37:47 +0000
@@ -2204,8 +2204,8 @@ Ndbcntr::execCREATE_HASH_MAP_CONF(Signal
   if (conf->senderData == 0)
   {
     jam();
-    c_hashMapId = conf->objectId;
-    c_hashMapVersion = conf->objectVersion;
+    c_objectId = conf->objectId;
+    c_objectVersion = conf->objectVersion;
   }
 
   createSystableLab(signal, 0);
@@ -2274,8 +2274,8 @@ Ndbcntr::createDDObjects(Signal * signal
     {
       jam();
       fg.TS_ExtentSize = Uint32(entry->size);
-      fg.TS_LogfileGroupId = RNIL;
-      fg.TS_LogfileGroupVersion = RNIL;
+      fg.TS_LogfileGroupId = c_objectId;
+      fg.TS_LogfileGroupVersion = c_objectVersion;
     }
 
     SimpleProperties::UnpackStatus s;
@@ -2310,8 +2310,8 @@ Ndbcntr::createDDObjects(Signal * signal
     DictFilegroupInfo::File f; f.init();
     BaseString::snprintf(f.FileName, sizeof(f.FileName), "%s", entry->name);
     f.FileType = entry->type;
-    f.FilegroupId = RNIL;
-    f.FilegroupVersion = RNIL;
+    f.FilegroupId = c_objectId;
+    f.FilegroupVersion = c_objectVersion;
     f.FileSizeHi = Uint32(entry->size >> 32);
     f.FileSizeLo = Uint32(entry->size);
 
@@ -2371,6 +2371,8 @@ Ndbcntr::execCREATE_FILEGROUP_CONF(Signa
 {
   jamEntry();
   CreateFilegroupConf* conf = (CreateFilegroupConf*)signal->getDataPtr();
+  c_objectId = conf->filegroupId;
+  c_objectVersion = conf->filegroupVersion;
   createDDObjects(signal, conf->senderData + 1);
 }
 
@@ -2433,8 +2435,8 @@ void Ndbcntr::createSystableLab(Signal*
   //w.add(DictTabInfo::KeyLength, 1);
   w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType);
   w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE);
-  w.add(DictTabInfo::HashMapObjectId, c_hashMapId);
-  w.add(DictTabInfo::HashMapVersion, c_hashMapVersion);
+  w.add(DictTabInfo::HashMapObjectId, c_objectId);
+  w.add(DictTabInfo::HashMapVersion, c_objectVersion);
 
   for (unsigned i = 0; i < table.columnCount; i++) {
     const SysColumn& column = table.columnList[i];

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-09-14 11:32:24 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-10-17 13:54:09 +0000
@@ -4788,7 +4788,9 @@ void Qmgr::failReport(Signal* signal,
     if (ERROR_INSERTED(938))
     {
       nodeFailCount++;
-      ndbout_c("QMGR : execFAIL_REP : %u nodes have failed", nodeFailCount);
+      ndbout_c("QMGR : execFAIL_REP(Failed : %u Source : %u  Cause : %u) : "
+               "%u nodes have failed", 
+               aFailedNode, sourceNode, aFailCause, nodeFailCount);
       /* Count DB nodes */
       Uint32 nodeCount = 0;
       for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
@@ -6877,6 +6879,12 @@ Qmgr::execNODE_PINGCONF(Signal* signal)
     return;
   }
 
+  if (ERROR_INSERTED(938))
+  {
+    ndbout_c("QMGR : execNODE_PING_CONF() from %u in tick %u",
+             sendersNodeId, m_connectivity_check.m_tick);
+  }
+
   /* Node must have been pinged, we must be waiting for the response,
    * or the node must have already failed
    */

=== modified file 'storage/ndb/src/kernel/blocks/record_types.hpp'
--- a/storage/ndb/src/kernel/blocks/record_types.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/record_types.hpp	2011-10-07 13:15:08 +0000
@@ -67,10 +67,15 @@
 #define RG_QUERY_MEMORY         8
 
 /**
+ * Schema transaction memory
+ */
+#define RG_SCHEMA_TRANS_MEMORY  9
+
+/**
  * 
  */
 #define RG_RESERVED             0
-#define RG_COUNT                9
+#define RG_COUNT                10
 
 /**
  * Record types
@@ -100,4 +105,27 @@
 #define RT_SPJ_DATABUFFER          MAKE_TID( 4, RG_QUERY_MEMORY)
 #define RT_SPJ_SCANFRAG            MAKE_TID( 5, RG_QUERY_MEMORY)
 
+#define RT_DBDICT_SCHEMA_TRANS_ARENA MAKE_TID( 1, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_SCHEMA_TRANSACTION MAKE_TID( 2, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_SCHEMA_OPERATION   MAKE_TID( 3, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_TABLE       MAKE_TID( 4, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_DROP_TABLE         MAKE_TID( 5, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_ALTER_TABLE        MAKE_TID( 6, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_TRIGGER     MAKE_TID( 7, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_DROP_TRIGGER       MAKE_TID( 8, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_INDEX       MAKE_TID( 9, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_DROP_INDEX         MAKE_TID( 10, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_ALTER_INDEX        MAKE_TID( 11, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_BUILD_INDEX        MAKE_TID( 12, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_INDEX_STAT         MAKE_TID( 13, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_FILEGROUP   MAKE_TID( 14, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_FILE        MAKE_TID( 15, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_DROP_FILEGROUP     MAKE_TID( 16, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_DROP_FILE          MAKE_TID( 17, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_HASH_MAP    MAKE_TID( 18, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_COPY_DATA          MAKE_TID( 19, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_CREATE_NODEGROUP   MAKE_TID( 20, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_DROP_NODEGROUP     MAKE_TID( 21, RG_SCHEMA_TRANS_MEMORY)
+#define RT_DBDICT_OP_SECTION_BUFFER  MAKE_TID( 22, RG_SCHEMA_TRANS_MEMORY)
+
 #endif

=== added file 'storage/ndb/src/kernel/blocks/thrman.cpp'
--- a/storage/ndb/src/kernel/blocks/thrman.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/thrman.cpp	2011-10-07 09:28:24 +0000
@@ -0,0 +1,129 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include "thrman.hpp"
+#include <mt.hpp>
+#include <signaldata/DbinfoScan.hpp>
+#include <NdbGetRUsage.h>
+
+#include <EventLogger.hpp>
+extern EventLogger * g_eventLogger;
+
+Thrman::Thrman(Block_context & ctx, Uint32 instanceno) :
+  SimulatedBlock(THRMAN, ctx, instanceno)
+{
+  BLOCK_CONSTRUCTOR(Thrman);
+
+  addRecSignal(GSN_DBINFO_SCANREQ, &Thrman::execDBINFO_SCANREQ);
+}
+
+Thrman::~Thrman()
+{
+}
+
+BLOCK_FUNCTIONS(Thrman)
+
+void
+Thrman::execDBINFO_SCANREQ(Signal* signal)
+{
+  jamEntry();
+
+  DbinfoScanReq req= *(DbinfoScanReq*)signal->theData;
+  const Ndbinfo::ScanCursor* cursor =
+    CAST_CONSTPTR(Ndbinfo::ScanCursor, DbinfoScan::getCursorPtr(&req));
+  Ndbinfo::Ratelimit rl;
+
+  switch(req.tableId) {
+  case Ndbinfo::THREADBLOCKS_TABLEID: {
+    Uint32 arr[NO_OF_BLOCKS];
+    Uint32 len = mt_get_blocklist(this, arr, NDB_ARRAY_SIZE(arr));
+    Uint32 pos = cursor->data[0];
+    for (; ; )
+    {
+      Ndbinfo::Row row(signal, req);
+      row.write_uint32(getOwnNodeId());
+      row.write_uint32(getThreadId());             // thr_no
+      row.write_uint32(blockToMain(arr[pos]));     // block_number
+      row.write_uint32(blockToInstance(arr[pos])); // block_instance
+      ndbinfo_send_row(signal, req, row, rl);
+
+      pos++;
+      if (pos == len)
+      {
+        jam();
+        break;
+      }
+      else if (rl.need_break(req))
+      {
+        jam();
+        ndbinfo_send_scan_break(signal, req, rl, pos);
+        return;
+      }
+    }
+    break;
+  }
+  case Ndbinfo::THREADSTAT_TABLEID:{
+    ndb_thr_stat stat;
+    mt_get_thr_stat(this, &stat);
+    Ndbinfo::Row row(signal, req);
+    row.write_uint32(getOwnNodeId());
+    row.write_uint32(getThreadId());  // thr_no
+    row.write_string(stat.name);
+    row.write_uint64(stat.loop_cnt);
+    row.write_uint64(stat.exec_cnt);
+    row.write_uint64(stat.wait_cnt);
+    row.write_uint64(stat.local_sent_prioa);
+    row.write_uint64(stat.local_sent_priob);
+    row.write_uint64(stat.remote_sent_prioa);
+    row.write_uint64(stat.remote_sent_priob);
+
+    row.write_uint64(stat.os_tid);
+    row.write_uint64(NdbTick_CurrentMillisecond());
+
+    struct ndb_rusage os_rusage;
+    Ndb_GetRUSage(&os_rusage);
+    row.write_uint64(os_rusage.ru_utime);
+    row.write_uint64(os_rusage.ru_stime);
+    row.write_uint64(os_rusage.ru_minflt);
+    row.write_uint64(os_rusage.ru_majflt);
+    row.write_uint64(os_rusage.ru_nvcsw);
+    row.write_uint64(os_rusage.ru_nivcsw);
+    ndbinfo_send_row(signal, req, row, rl);
+    break;
+  }
+  default:
+    break;
+  }
+
+  ndbinfo_send_scan_conf(signal, req, rl);
+}
+
+ThrmanProxy::ThrmanProxy(Block_context & ctx) :
+  LocalProxy(THRMAN, ctx)
+{
+}
+
+ThrmanProxy::~ThrmanProxy()
+{
+}
+
+SimulatedBlock*
+ThrmanProxy::newWorker(Uint32 instanceNo)
+{
+  return new Thrman(m_ctx, instanceNo);
+}
+

=== added file 'storage/ndb/src/kernel/blocks/thrman.hpp'
--- a/storage/ndb/src/kernel/blocks/thrman.hpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/thrman.hpp	2011-10-07 08:07:21 +0000
@@ -0,0 +1,48 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef THRMAN_H
+#define THRMAN_H
+
+#include <SimulatedBlock.hpp>
+#include <LocalProxy.hpp>
+
+class Thrman : public SimulatedBlock
+{
+public:
+  Thrman(Block_context& ctx, Uint32 instanceNumber = 0);
+  virtual ~Thrman();
+  BLOCK_DEFINES(Thrman);
+
+  void execDBINFO_SCANREQ(Signal*);
+protected:
+
+};
+
+class ThrmanProxy : public LocalProxy
+{
+public:
+  ThrmanProxy(Block_context& ctx);
+  virtual ~ThrmanProxy();
+  BLOCK_DEFINES(ThrmanProxy);
+
+protected:
+  virtual SimulatedBlock* newWorker(Uint32 instanceNo);
+
+};
+
+#endif

=== modified file 'storage/ndb/src/kernel/blocks/tsman.cpp'
--- a/storage/ndb/src/kernel/blocks/tsman.cpp	2011-03-15 16:11:47 +0000
+++ b/storage/ndb/src/kernel/blocks/tsman.cpp	2011-10-04 07:56:40 +0000
@@ -488,7 +488,7 @@ Tsman::execDROP_FILEGROUP_IMPL_REQ(Signa
       if (ptr.p->m_ref_count)
       {
         jam();
-        sendSignalWithDelay(reference(), GSN_DROP_FILEGROUP_REQ, signal,
+        sendSignalWithDelay(reference(), GSN_DROP_FILEGROUP_IMPL_REQ, signal,
                             100, signal->getLength());
         return;
       }

=== modified file 'storage/ndb/src/kernel/ndbd.cpp'
--- a/storage/ndb/src/kernel/ndbd.cpp	2011-09-23 09:13:22 +0000
+++ b/storage/ndb/src/kernel/ndbd.cpp	2011-10-07 13:15:08 +0000
@@ -239,8 +239,17 @@ init_global_memory_manager(EmulatorData
     ed.m_mem_manager->set_resource_limit(rl);
   }
 
+  Uint32 stpages = 64;
+  {
+    Resource_limit rl;
+    rl.m_min = stpages;
+    rl.m_max = 0;
+    rl.m_resource_id = RG_SCHEMA_TRANS_MEMORY;
+    ed.m_mem_manager->set_resource_limit(rl);
+  }
+
   Uint32 sum = shared_pages + tupmem + filepages + jbpages + sbpages +
-    pgman_pages;
+    pgman_pages + stpages;
 
   if (sum)
   {

=== modified file 'storage/ndb/src/kernel/vm/DLFifoList.hpp'
--- a/storage/ndb/src/kernel/vm/DLFifoList.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/DLFifoList.hpp	2011-10-07 11:46:40 +0000
@@ -465,18 +465,18 @@ DLFifoListImpl<P,T,U>::hasPrev(const Ptr
 
 // Specializations
 
-template <typename T, typename U = T>
-class DLFifoList : public DLFifoListImpl<ArrayPool<T>, T, U>
+template <typename T, typename U = T, typename P = ArrayPool<T> >
+class DLFifoList : public DLFifoListImpl<P, T, U>
 {
 public:
-  DLFifoList(ArrayPool<T> & p) : DLFifoListImpl<ArrayPool<T>, T, U>(p) {}
+  DLFifoList(P & p) : DLFifoListImpl<P, T, U>(p) {}
 };
 
-template <typename T, typename U = T>
-class LocalDLFifoList : public LocalDLFifoListImpl<ArrayPool<T>,T,U> {
+template <typename T, typename U = T, typename P = ArrayPool<T> >
+class LocalDLFifoList : public LocalDLFifoListImpl<P,T,U> {
 public:
-  LocalDLFifoList(ArrayPool<T> & p, typename DLFifoList<T,U>::Head & _src)
-    : LocalDLFifoListImpl<ArrayPool<T>,T,U>(p, _src) {}
+  LocalDLFifoList(P & p, typename DLFifoList<T,U,P>::Head & _src)
+    : LocalDLFifoListImpl<P,T,U>(p, _src) {}
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/vm/DLHashTable.hpp'
--- a/storage/ndb/src/kernel/vm/DLHashTable.hpp	2011-06-30 15:55:35 +0000
+++ b/storage/ndb/src/kernel/vm/DLHashTable.hpp	2011-10-13 09:25:13 +0000
@@ -27,11 +27,16 @@
  *   (with a double linked list)
  *
  * The entries in the hashtable must have the following methods:
- *  -# bool equal(const class T &) const;
+ *  -# bool U::equal(const class U &) const;
  *     Which should return equal if the to objects have the same key
- *  -# Uint32 hashValue() const;
+ *  -# Uint32 U::hashValue() const;
  *     Which should return a 32 bit hashvalue
+ *
+ * and the following members:
+ *  -# Uint32 U::nextHash;
+ *  -# Uint32 U::prevHash;
  */
+
 template <typename P, typename T, typename U = T>
 class DLHashTableImpl 
 {
@@ -211,7 +216,7 @@ inline
 void
 DLHashTableImpl<P, T, U>::add(Ptr<T> & obj)
 {
-  const Uint32 hv = obj.p->hashValue() & mask;
+  const Uint32 hv = obj.p->U::hashValue() & mask;
   const Uint32 i  = hashValues[hv];
   
   if(i == RNIL)
@@ -288,7 +293,7 @@ inline
 void
 DLHashTableImpl<P, T, U>::remove(Ptr<T> & ptr, const T & key)
 {
-  const Uint32 hv = key.hashValue() & mask;  
+  const Uint32 hv = key.U::hashValue() & mask;  
   
   Uint32 i;
   T * p;
@@ -300,7 +305,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T>
   while(i != RNIL)
   {
     p = thePool.getPtr(i);
-    if(key.equal(* p))
+    if(key.U::equal(* p))
     {
       const Uint32 next = p->U::nextHash;
       if(prev.i == RNIL)
@@ -366,7 +371,7 @@ DLHashTableImpl<P, T, U>::remove(Ptr<T>
   } 
   else 
   {
-    const Uint32 hv = ptr.p->hashValue() & mask;  
+    const Uint32 hv = ptr.p->U::hashValue() & mask;  
     if (hashValues[hv] == ptr.i)
     {
       hashValues[hv] = next;
@@ -400,7 +405,7 @@ DLHashTableImpl<P, T, U>::release(Ptr<T>
   } 
   else 
   {
-    const Uint32 hv = ptr.p->hashValue() & mask;  
+    const Uint32 hv = ptr.p->U::hashValue() & mask;  
     if (hashValues[hv] == ptr.i)
     {
       hashValues[hv] = next;
@@ -493,7 +498,7 @@ inline
 bool
 DLHashTableImpl<P, T, U>::find(Ptr<T> & ptr, const T & key) const 
 {
-  const Uint32 hv = key.hashValue() & mask;  
+  const Uint32 hv = key.U::hashValue() & mask;  
   
   Uint32 i;
   T * p;
@@ -502,7 +507,7 @@ DLHashTableImpl<P, T, U>::find(Ptr<T> &
   while(i != RNIL)
   {
     p = thePool.getPtr(i);
-    if(key.equal(* p))
+    if(key.U::equal(* p))
     {
       ptr.i = i;
       ptr.p = p;
@@ -517,11 +522,11 @@ DLHashTableImpl<P, T, U>::find(Ptr<T> &
 
 // Specializations
 
-template <typename T, typename U = T>
-class DLHashTable : public DLHashTableImpl<ArrayPool<T>, T, U>
+template <typename T, typename U = T, typename P = ArrayPool<T> >
+class DLHashTable : public DLHashTableImpl<P, T, U>
 {
 public:
-  DLHashTable(ArrayPool<T> & p) : DLHashTableImpl<ArrayPool<T>, T, U>(p) {}
+  DLHashTable(P & p) : DLHashTableImpl<P, T, U>(p) {}
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/vm/DataBuffer2.hpp'
--- a/storage/ndb/src/kernel/vm/DataBuffer2.hpp	2010-10-27 06:54:54 +0000
+++ b/storage/ndb/src/kernel/vm/DataBuffer2.hpp	2011-10-07 11:46:40 +0000
@@ -18,6 +18,8 @@
 #ifndef DATA_BUFFER2_HPP
 #define DATA_BUFFER2_HPP
 
+#include <ErrorReporter.hpp>
+
 /**
  * @class  DataBuffer
  * @brief  Buffer of data words

=== modified file 'storage/ndb/src/kernel/vm/Ndbinfo.hpp'
--- a/storage/ndb/src/kernel/vm/Ndbinfo.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/Ndbinfo.hpp	2011-10-11 08:11:15 +0000
@@ -46,7 +46,11 @@ public:
     RESOURCES_TABLEID =          7,
     COUNTERS_TABLEID =           8,
     NODES_TABLEID =              9,
-    DISKPAGEBUFFER_TABLEID =     10
+    DISKPAGEBUFFER_TABLEID =     10,
+    THREADBLOCKS_TABLEID =       11,
+    THREADSTAT_TABLEID =         12,
+    TRANSACTIONS_TABLEID =       13,
+    OPERATIONS_TABLEID =         14
   };
 
   struct Table {

=== modified file 'storage/ndb/src/kernel/vm/NdbinfoTables.cpp'
--- a/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-17 13:32:49 +0000
@@ -169,6 +169,76 @@ DECLARE_NDBINFO_TABLE(DISKPAGEBUFFER, 9)
   }
 };
 
+DECLARE_NDBINFO_TABLE(THREADBLOCKS, 4) =
+{ { "threadblocks", 4, 0, "which blocks are run in which threads" },
+  {
+    {"node_id",                     Ndbinfo::Number, "node id"},
+    {"thr_no",                      Ndbinfo::Number, "thread number"},
+    {"block_number",                Ndbinfo::Number, "block number"},
+    {"block_instance",              Ndbinfo::Number, "block instance"},
+  }
+};
+
+DECLARE_NDBINFO_TABLE(THREADSTAT, 18) =
+{ { "threadstat", 18, 0, "Statistics on execution threads" },
+  {
+    //{"0123456701234567"}
+    {"node_id",             Ndbinfo::Number, "node id"},
+    {"thr_no",              Ndbinfo::Number, "thread number"},
+    {"thr_nm",              Ndbinfo::String, "thread name"},
+    {"c_loop",              Ndbinfo::Number64,"No of loops in main loop"},
+    {"c_exec",              Ndbinfo::Number64,"No of signals executed"},
+    {"c_wait",              Ndbinfo::Number64,"No of times waited for more input"},
+    {"c_l_sent_prioa",      Ndbinfo::Number64,"No of prio A signals sent to own node"},
+    {"c_l_sent_priob",      Ndbinfo::Number64,"No of prio B signals sent to own node"},
+    {"c_r_sent_prioa",      Ndbinfo::Number64,"No of prio A signals sent to remote node"},
+    {"c_r_sent_priob",      Ndbinfo::Number64,"No of prio B signals sent to remote node"},
+    {"os_tid",              Ndbinfo::Number64,"OS thread id"},
+    {"os_now",              Ndbinfo::Number64,"OS gettimeofday (millis)"},
+    {"os_ru_utime",         Ndbinfo::Number64,"OS user CPU time (micros)"},
+    {"os_ru_stime",         Ndbinfo::Number64,"OS system CPU time (micros)"},
+    {"os_ru_minflt",        Ndbinfo::Number64,"OS page reclaims (soft page faults"},
+    {"os_ru_majflt",        Ndbinfo::Number64,"OS page faults (hard page faults)"},
+    {"os_ru_nvcsw",         Ndbinfo::Number64,"OS voluntary context switches"},
+    {"os_ru_nivcsw",        Ndbinfo::Number64,"OS involuntary context switches"}
+  }
+};
+
+DECLARE_NDBINFO_TABLE(TRANSACTIONS, 11) =
+{ { "transactions", 11, 0, "transactions" },
+  {
+    {"node_id",             Ndbinfo::Number, "node id"},
+    {"block_instance",      Ndbinfo::Number, "TC instance no"},
+    {"objid",               Ndbinfo::Number, "Object id of transaction object"},
+    {"apiref",              Ndbinfo::Number, "API reference"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
+    {"state",               Ndbinfo::Number, "Transaction state"},
+    {"flags",               Ndbinfo::Number, "Transaction flags"},
+    {"c_ops",               Ndbinfo::Number, "No of operations in transaction" },
+    {"outstanding",         Ndbinfo::Number, "Currently outstanding request" },
+    {"timer",               Ndbinfo::Number, "Timer (seconds)"},
+  }
+};
+
+DECLARE_NDBINFO_TABLE(OPERATIONS, 12) =
+{ { "operations", 12, 0, "operations" },
+  {
+    {"node_id",             Ndbinfo::Number, "node id"},
+    {"block_instance",      Ndbinfo::Number, "LQH instance no"},
+    {"objid",               Ndbinfo::Number, "Object id of operation object"},
+    {"tcref",               Ndbinfo::Number, "TC reference"},
+    {"apiref",              Ndbinfo::Number, "API reference"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
+    {"tableid",             Ndbinfo::Number, "Table id"},
+    {"fragmentid",          Ndbinfo::Number, "Fragment id"},
+    {"op",                  Ndbinfo::Number, "Operation type"},
+    {"state",               Ndbinfo::Number, "Operation state"},
+    {"flags",               Ndbinfo::Number, "Operation flags"}
+  }
+};
+
 #define DBINFOTBL(x) { Ndbinfo::x##_TABLEID, (Ndbinfo::Table*)&ndbinfo_##x }
 
 static
@@ -188,7 +258,11 @@ struct ndbinfo_table_list_entry {
   DBINFOTBL(RESOURCES),
   DBINFOTBL(COUNTERS),
   DBINFOTBL(NODES),
-  DBINFOTBL(DISKPAGEBUFFER)
+  DBINFOTBL(DISKPAGEBUFFER),
+  DBINFOTBL(THREADBLOCKS),
+  DBINFOTBL(THREADSTAT),
+  DBINFOTBL(TRANSACTIONS),
+  DBINFOTBL(OPERATIONS)
 };
 
 static int no_ndbinfo_tables =

=== modified file 'storage/ndb/src/kernel/vm/Pool.hpp'
--- a/storage/ndb/src/kernel/vm/Pool.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/vm/Pool.hpp	2011-10-07 11:46:40 +0000
@@ -33,7 +33,7 @@
  */
 #define RG_BITS 5
 #define RG_MASK ((1 << RG_BITS) - 1)
-#define MAKE_TID(TID,RG) ((TID << RG_BITS) | RG)
+#define MAKE_TID(TID,RG) Uint32((TID << RG_BITS) | RG)
 
 /**
  * Page bits

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2011-08-27 06:06:02 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2011-10-07 08:07:21 +0000
@@ -634,7 +634,9 @@ private:
    * In MT LQH main instance is the LQH proxy and the others ("workers")
    * are real LQHs run by multiple threads.
    */
-  enum { MaxInstances = 1 + MAX_NDBMT_LQH_WORKERS + 1 }; // main+lqh+extra
+protected:
+  enum { MaxInstances = 3 + MAX_NDBMT_TC_THREADS + MAX_NDBMT_LQH_WORKERS + 1 };
+private:
   SimulatedBlock** theInstanceList; // set in main, indexed by instance
   SimulatedBlock* theMainInstance;  // set in all
   /*

=== modified file 'storage/ndb/src/kernel/vm/dummy_nonmt.cpp'
--- a/storage/ndb/src/kernel/vm/dummy_nonmt.cpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/vm/dummy_nonmt.cpp	2011-10-07 08:07:21 +0000
@@ -20,49 +20,62 @@
 #include <ndb_types.h>
 
 void
-add_thr_map(Uint32, Uint32, Uint32)
+mt_init_thr_map()
 {
   assert(false);
 }
 
 void
-add_main_thr_map()
+mt_add_thr_map(Uint32, Uint32)
 {
   assert(false);
 }
 
 void
-add_lqh_worker_thr_map(Uint32, Uint32)
+mt_finalize_thr_map()
 {
   assert(false);
 }
 
-void
-add_extra_worker_thr_map(Uint32, Uint32)
+Uint32
+mt_get_instance_count(Uint32 block)
 {
   assert(false);
+  return 0;
 }
 
-void
-add_tc_worker_thr_map(Uint32, Uint32)
+Uint32
+compute_jb_pages(struct EmulatorData*)
 {
-  assert(false);
+  return 0;
 }
 
-void
-finalize_thr_map()
+
+bool
+NdbIsMultiThreaded()
 {
-  assert(false);
+  return false;
 }
 
+#include <BlockNumbers.h>
+
 Uint32
-compute_jb_pages(struct EmulatorData*)
+mt_get_blocklist(class SimulatedBlock * block, Uint32 arr[], Uint32 len)
 {
-  return 0;
+  (void)block;
+  for (Uint32 i = 0; i<NO_OF_BLOCKS; i++)
+  {
+    arr[i] = numberToBlock(MIN_BLOCK_NO + i, 0);
+  }
+  return NO_OF_BLOCKS;
 }
 
-bool
-NdbIsMultiThreaded()
+#include "mt.hpp"
+
+void
+mt_get_thr_stat(class SimulatedBlock *, ndb_thr_stat* dst)
 {
-  return false;
+  bzero(dst, sizeof(* dst));
+  dst->name = "main";
 }
+

=== modified file 'storage/ndb/src/kernel/vm/mt.cpp'
--- a/storage/ndb/src/kernel/vm/mt.cpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/vm/mt.cpp	2011-10-07 08:07:21 +0000
@@ -72,11 +72,11 @@ static const Uint32 MAX_SIGNALS_BEFORE_W
 
 //#define NDB_MT_LOCK_TO_CPU
 
-#define MAX_BLOCK_INSTANCES (1 + MAX_NDBMT_LQH_WORKERS + 1) //main+lqh+extra
 #define NUM_MAIN_THREADS 2 // except receiver
 #define MAX_THREADS (NUM_MAIN_THREADS +       \
                      MAX_NDBMT_LQH_THREADS +  \
                      MAX_NDBMT_TC_THREADS + 1)
+#define MAX_BLOCK_INSTANCES (MAX_THREADS)
 
 /* If this is too small it crashes before first signal. */
 #define MAX_INSTANCES_PER_THREAD (16 + 8 * MAX_NDBMT_LQH_THREADS)
@@ -876,10 +876,16 @@ struct thr_data
   /* Watchdog counter for this thread. */
   Uint32 m_watchdog_counter;
   /* Signal delivery statistics. */
-  Uint32 m_prioa_count;
-  Uint32 m_prioa_size;
-  Uint32 m_priob_count;
-  Uint32 m_priob_size;
+  struct
+  {
+    Uint64 m_loop_cnt;
+    Uint64 m_exec_cnt;
+    Uint64 m_wait_cnt;
+    Uint64 m_prioa_count;
+    Uint64 m_prioa_size;
+    Uint64 m_priob_count;
+    Uint64 m_priob_size;
+  } m_stat;
 
   /* Array of node ids with pending remote send data. */
   Uint8 m_pending_send_nodes[MAX_NTRANSPORTERS];
@@ -2592,7 +2598,7 @@ add_thr_map(Uint32 main, Uint32 instance
 
 /* Static assignment of main instances (before first signal). */
 void
-add_main_thr_map()
+mt_init_thr_map()
 {
   /* Keep mt-classic assignments in MT LQH. */
   const Uint32 thr_GLOBAL = 0;
@@ -2620,33 +2626,72 @@ add_main_thr_map()
   add_thr_map(RESTORE, 0, thr_LOCAL);
   add_thr_map(DBINFO, 0, thr_LOCAL);
   add_thr_map(DBSPJ, 0, thr_GLOBAL);
+  add_thr_map(THRMAN, 0, thr_GLOBAL);
 }
 
-/* Workers added by LocalProxy (before first signal). */
-void
-add_lqh_worker_thr_map(Uint32 block, Uint32 instance)
+Uint32
+mt_get_instance_count(Uint32 block)
 {
-  require(instance != 0);
-  Uint32 i = instance - 1;
-  Uint32 thr_no = NUM_MAIN_THREADS + i % num_lqh_threads;
-  add_thr_map(block, instance, thr_no);
+  switch(block){
+  case DBLQH:
+  case DBACC:
+  case DBTUP:
+  case DBTUX:
+  case BACKUP:
+  case RESTORE:
+    return globalData.ndbMtLqhWorkers;
+    break;
+  case PGMAN:
+    return globalData.ndbMtLqhWorkers + 1;
+    break;
+  case DBTC:
+  case DBSPJ:
+    return globalData.ndbMtTcThreads;
+    break;
+  case THRMAN:
+    return num_threads;
+  default:
+    require(false);
+  }
+  return 0;
 }
 
 void
-add_tc_worker_thr_map(Uint32 block, Uint32 instance)
+mt_add_thr_map(Uint32 block, Uint32 instance)
 {
   require(instance != 0);
-  Uint32 i = instance - 1;
-  Uint32 thr_no = NUM_MAIN_THREADS + num_lqh_threads + i;
-  add_thr_map(block, instance, thr_no);
-}
+  Uint32 thr_no = NUM_MAIN_THREADS;
+  switch(block){
+  case DBLQH:
+  case DBACC:
+  case DBTUP:
+  case DBTUX:
+  case BACKUP:
+  case RESTORE:
+    thr_no += (instance - 1) % num_lqh_threads;
+    break;
+  case PGMAN:
+    if (instance == num_lqh_threads + 1)
+    {
+      // Put extra PGMAN together with it's Proxy
+      thr_no = block2ThreadId(block, 0);
+    }
+    else
+    {
+      thr_no += (instance - 1) % num_lqh_threads;
+    }
+    break;
+  case DBTC:
+  case DBSPJ:
+    thr_no += num_lqh_threads + (instance - 1);
+    break;
+  case THRMAN:
+    thr_no = instance - 1;
+    break;
+  default:
+    require(false);
+  }
 
-/* Extra workers run`in proxy thread. */
-void
-add_extra_worker_thr_map(Uint32 block, Uint32 instance)
-{
-  require(instance != 0);
-  Uint32 thr_no = block2ThreadId(block, 0);
   add_thr_map(block, instance, thr_no);
 }
 
@@ -2661,7 +2706,7 @@ add_extra_worker_thr_map(Uint32 block, U
  * NOTE: extra pgman worker is instance 5
  */
 void
-finalize_thr_map()
+mt_finalize_thr_map()
 {
   for (Uint32 b = 0; b < NO_OF_BLOCKS; b++)
   {
@@ -2694,60 +2739,6 @@ finalize_thr_map()
   }
 }
 
-static void reportSignalStats(Uint32 self, Uint32 a_count, Uint32 a_size,
-                              Uint32 b_count, Uint32 b_size)
-{
-  SignalT<6> sT;
-  Signal *s= new (&sT) Signal(0);
-
-  memset(&s->header, 0, sizeof(s->header));
-  s->header.theLength = 6;
-  s->header.theSendersSignalId = 0;
-  s->header.theSendersBlockRef = numberToRef(0, 0);
-  s->header.theVerId_signalNumber = GSN_EVENT_REP;
-  s->header.theReceiversBlockNumber = CMVMI;
-  s->theData[0] = NDB_LE_MTSignalStatistics;
-  s->theData[1] = self;
-  s->theData[2] = a_count;
-  s->theData[3] = a_size;
-  s->theData[4] = b_count;
-  s->theData[5] = b_size;
-  /* ToDo: need this really be prio A like in old code? */
-  sendlocal(self, &s->header, s->theData,
-            NULL);
-}
-
-static inline void
-update_sched_stats(thr_data *selfptr)
-{
-  if(selfptr->m_prioa_count + selfptr->m_priob_count >= 2000000)
-  {
-    reportSignalStats(selfptr->m_thr_no,
-                      selfptr->m_prioa_count,
-                      selfptr->m_prioa_size,
-                      selfptr->m_priob_count,
-                      selfptr->m_priob_size);
-    selfptr->m_prioa_count = 0;
-    selfptr->m_prioa_size = 0;
-    selfptr->m_priob_count = 0;
-    selfptr->m_priob_size = 0;
-
-#if 0
-    Uint32 thr_no = selfptr->m_thr_no;
-    ndbout_c("--- %u fifo: %u jba: %u global: %u",
-             thr_no,
-             fifo_used_pages(selfptr),
-             selfptr->m_jba_head.used(),
-             g_thr_repository.m_free_list.m_cnt);
-    for (Uint32 i = 0; i<num_threads; i++)
-    {
-      ndbout_c("  %u-%u : %u",
-               thr_no, i, selfptr->m_in_queue_head[i].used());
-    }
-#endif
-  }
-}
-
 static void
 init_thread(thr_data *selfptr)
 {
@@ -2854,8 +2845,6 @@ mt_receiver_thread_main(void *thr_arg)
   { 
     static int cnt = 0;
 
-    update_sched_stats(selfptr);
-
     if (cnt == 0)
     {
       watchDogCounter = 5;
@@ -2892,6 +2881,8 @@ mt_receiver_thread_main(void *thr_arg)
         has_received = true;
       }
     }
+    selfptr->m_stat.m_loop_cnt++;
+    selfptr->m_stat.m_exec_cnt += sum;
   }
 
   globalEmulatorData.theWatchDog->unregisterWatchedThread(thr_no);
@@ -3028,14 +3019,14 @@ mt_job_thread_main(void *thr_arg)
 
   Uint32 pending_send = 0;
   Uint32 send_sum = 0;
-  int loops = 0;
-  int maxloops = 10;/* Loops before reading clock, fuzzy adapted to 1ms freq. */
+  Uint32 loops = 0;
+  Uint32 maxloops = 10;/* Loops before reading clock, fuzzy adapted to 1ms freq. */
+  Uint32 waits = 0;
   NDB_TICKS now = selfptr->m_time;
 
   while (globalData.theRestartFlag != perform_stop)
   { 
     loops++;
-    update_sched_stats(selfptr);
 
     watchDogCounter = 2;
     scan_time_queues(selfptr, now);
@@ -3080,9 +3071,12 @@ mt_job_thread_main(void *thr_arg)
                             selfptr);
         if (waited)
         {
+          waits++;
           /* Update current time after sleeping */
           now = NdbTick_CurrentMillisecond();
-          loops = 0;
+          selfptr->m_stat.m_wait_cnt += waits;
+          selfptr->m_stat.m_loop_cnt += loops;
+          waits = loops = 0;
         }
       }
     }
@@ -3097,7 +3091,9 @@ mt_job_thread_main(void *thr_arg)
       {
         /* Update current time after sleeping */
         now = NdbTick_CurrentMillisecond();
-        loops = 0;
+        selfptr->m_stat.m_wait_cnt += waits;
+        selfptr->m_stat.m_loop_cnt += loops;
+        waits = loops = 0;
       }
     }
     else
@@ -3120,8 +3116,11 @@ mt_job_thread_main(void *thr_arg)
       else if (diff > 1 && maxloops > 1)
         maxloops -= ((maxloops/10) + 1); /* Overslept: Need more frequent read*/
 
-      loops = 0;
+      selfptr->m_stat.m_wait_cnt += waits;
+      selfptr->m_stat.m_loop_cnt += loops;
+      waits = loops = 0;
     }
+    selfptr->m_stat.m_exec_cnt += sum;
   }
 
   globalEmulatorData.theWatchDog->unregisterWatchedThread(thr_no);
@@ -3150,9 +3149,9 @@ sendlocal(Uint32 self, const SignalHeade
   assert(pthread_equal(selfptr->m_thr_id, pthread_self()));
   struct thr_data * dstptr = rep->m_thread + dst;
 
-  selfptr->m_priob_count++;
+  selfptr->m_stat.m_priob_count++;
   Uint32 siglen = (sizeof(*s) >> 2) + s->theLength + s->m_noOfSections;
-  selfptr->m_priob_size += siglen;
+  selfptr->m_stat.m_priob_size += siglen;
 
   thr_job_queue *q = dstptr->m_in_queue + self;
   thr_jb_write_state *w = selfptr->m_write_states + dst;
@@ -3178,9 +3177,9 @@ sendprioa(Uint32 self, const SignalHeade
          pthread_equal(selfptr->m_thr_id, pthread_self()));
   struct thr_data *dstptr = rep->m_thread + dst;
 
-  selfptr->m_prioa_count++;
+  selfptr->m_stat.m_prioa_count++;
   Uint32 siglen = (sizeof(*s) >> 2) + s->theLength + s->m_noOfSections;
-  selfptr->m_prioa_size += siglen;  
+  selfptr->m_stat.m_prioa_size += siglen;
 
   thr_job_queue *q = &(dstptr->m_jba);
   thr_jb_write_state w;
@@ -3359,10 +3358,7 @@ thr_init(struct thr_repository* rep, str
   }
   queue_init(&selfptr->m_tq);
 
-  selfptr->m_prioa_count = 0;
-  selfptr->m_prioa_size = 0;
-  selfptr->m_priob_count = 0;
-  selfptr->m_priob_size = 0;
+  bzero(&selfptr->m_stat, sizeof(selfptr->m_stat));
 
   selfptr->m_pending_send_count = 0;
   selfptr->m_pending_send_mask.clear();
@@ -4086,6 +4082,40 @@ mt_assert_own_thread(SimulatedBlock* blo
 }
 #endif
 
+
+Uint32
+mt_get_blocklist(SimulatedBlock * block, Uint32 arr[], Uint32 len)
+{
+  Uint32 thr_no = block->getThreadId();
+  thr_data *thr_ptr = g_thr_repository.m_thread + thr_no;
+
+  for (Uint32 i = 0; i < thr_ptr->m_instance_count; i++)
+  {
+    arr[i] = thr_ptr->m_instance_list[i];
+  }
+
+  return thr_ptr->m_instance_count;
+}
+
+void
+mt_get_thr_stat(class SimulatedBlock * block, ndb_thr_stat* dst)
+{
+  bzero(dst, sizeof(* dst));
+  Uint32 thr_no = block->getThreadId();
+  thr_data *selfptr = g_thr_repository.m_thread + thr_no;
+
+  THRConfigApplier & conf = globalEmulatorData.theConfiguration->m_thr_config;
+  dst->thr_no = thr_no;
+  dst->name = conf.getName(selfptr->m_instance_list, selfptr->m_instance_count);
+  dst->os_tid = NdbThread_GetTid(selfptr->m_thread);
+  dst->loop_cnt = selfptr->m_stat.m_loop_cnt;
+  dst->exec_cnt = selfptr->m_stat.m_exec_cnt;
+  dst->wait_cnt = selfptr->m_stat.m_wait_cnt;
+  dst->local_sent_prioa = selfptr->m_stat.m_prioa_count;
+  dst->local_sent_priob = selfptr->m_stat.m_priob_count;
+}
+
+
 /**
  * Global data
  */

=== modified file 'storage/ndb/src/kernel/vm/mt.hpp'
--- a/storage/ndb/src/kernel/vm/mt.hpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/vm/mt.hpp	2011-10-07 08:07:21 +0000
@@ -29,13 +29,12 @@
 */
 extern Uint32 receiverThreadId;
 
+Uint32 mt_get_instance_count(Uint32 block);
+
 /* Assign block instances to thread */
-void add_thr_map(Uint32 block, Uint32 instance, Uint32 thr_no);
-void add_main_thr_map();
-void add_lqh_worker_thr_map(Uint32 block, Uint32 instance);
-void add_tc_worker_thr_map(Uint32 block, Uint32 instance);
-void add_extra_worker_thr_map(Uint32 block, Uint32 instance);
-void finalize_thr_map();
+void mt_init_thr_map();
+void mt_add_thr_map(Uint32 block, Uint32 instance);
+void mt_finalize_thr_map();
 
 void sendlocal(Uint32 self, const struct SignalHeader *s,
                const Uint32 *data, const Uint32 secPtr[3]);
@@ -87,4 +86,28 @@ void mt_wakeup(class SimulatedBlock*);
 void mt_assert_own_thread(class SimulatedBlock*);
 #endif
 
+/**
+ * return list of references running in this thread
+ */
+Uint32
+mt_get_blocklist(class SimulatedBlock*, Uint32 dst[], Uint32 len);
+
+
+struct ndb_thr_stat
+{
+  Uint32 thr_no;
+  Uint64 os_tid;
+  const char * name;
+  Uint64 loop_cnt;
+  Uint64 exec_cnt;
+  Uint64 wait_cnt;
+  Uint64 local_sent_prioa;
+  Uint64 local_sent_priob;
+  Uint64 remote_sent_prioa;
+  Uint64 remote_sent_priob;
+};
+
+void
+mt_get_thr_stat(class SimulatedBlock *, ndb_thr_stat* dst);
+
 #endif

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.cpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2011-09-23 09:13:22 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2011-10-07 08:07:21 +0000
@@ -964,6 +964,14 @@ THRConfigApplier::appendInfo(BaseString&
   }
 }
 
+const char *
+THRConfigApplier::getName(const unsigned short list[], unsigned cnt) const
+{
+  const T_Thread* thr = find_thread(list, cnt);
+  assert(thr != 0);
+  return getEntryName(thr->m_type);
+}
+
 int
 THRConfigApplier::create_cpusets()
 {

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.hpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.hpp	2011-09-23 09:13:22 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.hpp	2011-10-07 08:07:21 +0000
@@ -124,6 +124,7 @@ class THRConfigApplier : public THRConfi
 public:
   int create_cpusets();
 
+  const char * getName(const unsigned short list[], unsigned cnt) const;
   void appendInfo(BaseString&, const unsigned short list[], unsigned cnt) const;
   int do_bind(NdbThread*, const unsigned short list[], unsigned cnt);
   int do_bind_io(NdbThread*);

=== modified file 'storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp'
--- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp	2011-08-16 08:27:14 +0000
+++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp	2011-10-07 13:15:08 +0000
@@ -94,7 +94,7 @@ public:
 private:
   void grow(Uint32 start, Uint32 cnt);
 
-#define XX_RL_COUNT 9
+#define XX_RL_COUNT 10
   /**
    * Return pointer to free page data on page
    */

=== modified file 'storage/ndb/src/mgmsrv/ConfigInfo.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2011-09-02 17:24:52 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2011-10-07 16:12:13 +0000
@@ -2122,6 +2122,22 @@ const ConfigInfo::ParamInfo ConfigInfo::
     STR_VALUE(MAX_INT_RNIL)
   },
 
+  {
+    CFG_DB_CRASH_ON_CORRUPTED_TUPLE,
+    "CrashOnCorruptedTuple",
+    DB_TOKEN,
+    "To be failfast or not, when checksum indicates corruption.",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_BOOL,
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,1)
+    "false",
+#else
+    "true",
+#endif
+    "false",
+    "true"},
+
   /***************************************************************************
    * API
    ***************************************************************************/

=== modified file 'storage/ndb/src/ndbapi/Ndb.cpp'
--- a/storage/ndb/src/ndbapi/Ndb.cpp	2011-09-07 17:12:12 +0000
+++ b/storage/ndb/src/ndbapi/Ndb.cpp	2011-10-17 12:43:31 +0000
@@ -2254,13 +2254,31 @@ Ndb::getNdbErrorDetail(const NdbError& e
 void
 Ndb::setCustomData(void* _customDataPtr)
 {
-  theImpl->customDataPtr = _customDataPtr;
+  theImpl->customData = Uint64(_customDataPtr);
 }
 
 void*
 Ndb::getCustomData() const
 {
-  return theImpl->customDataPtr;
+  return (void*)theImpl->customData;
+}
+
+void
+Ndb::setCustomData64(Uint64 _customData)
+{
+  theImpl->customData = _customData;
+}
+
+Uint64
+Ndb::getCustomData64() const
+{
+  return theImpl->customData;
+}
+
+Uint64
+Ndb::getNextTransactionId() const
+{
+  return theFirstTransId;
 }
 
 Uint32

=== modified file 'storage/ndb/src/ndbapi/NdbImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-10-17 12:43:31 +0000
@@ -129,7 +129,7 @@ public:
 
   BaseString m_systemPrefix; // Buffer for preformatted for <sys>/<def>/
   
-  void* customDataPtr;
+  Uint64 customData;
 
   Uint64 clientStats[ Ndb::NumClientStatistics ];
   

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-14 10:30:08 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-29 11:35:02 +0000
@@ -343,7 +343,8 @@ NdbQueryDef::destroy() const
 void
 NdbQueryDef::print() const
 {
-  m_impl.getQueryOperation(0U).printTree(0, Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32>());
+  m_impl.getQueryOperation(0U)
+    .printTree(0, NdbQueryOperationDefImpl::SiblingMask());
 }
 
 /*************************************************************************
@@ -1188,7 +1189,8 @@ NdbQueryBuilderImpl::prepare()
   if (doPrintQueryTree)
   {
     ndbout << "Query tree:" << endl;
-    def->getQueryOperation(0U).printTree(0, Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32>());
+    def->getQueryOperation(0U)
+      .printTree(0, NdbQueryOperationDefImpl::SiblingMask());
   }
 
   return def;
@@ -2159,7 +2161,8 @@ NdbQueryOperationDefImpl::appendChildPro
  * that connect the tree nodes.
  */
 static void printMargin(Uint32 depth, 
-                        Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> hasMoreSiblingsMask, 
+                        NdbQueryOperationDefImpl::SiblingMask 
+                        hasMoreSiblingsMask, 
                         bool header)
 {
   if (depth > 0)
@@ -2193,11 +2196,10 @@ static void printMargin(Uint32 depth,
 
 void 
 NdbQueryOperationDefImpl::printTree(Uint32 depth, 
-                                    Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> 
-                                    hasMoreSiblingsMask) const
+                                    SiblingMask hasMoreSiblingsMask) const
 {
   // Print vertical line leading down to this node.
-  Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> firstLineMask = hasMoreSiblingsMask;
+  SiblingMask firstLineMask = hasMoreSiblingsMask;
   firstLineMask.set(depth);
   printMargin(depth, firstLineMask, false);
   ndbout << endl;
@@ -2214,22 +2216,24 @@ NdbQueryOperationDefImpl::printTree(Uint
     printMargin(depth, hasMoreSiblingsMask, false);
     ndbout << " index: " << getIndex()->getName() << endl; 
   }
-  /* For each child but the last one, use a mask with an extra bit set to
-   * indicate that there are more siblings.
-   */
-  hasMoreSiblingsMask.set(depth+1);
+
   for (int childNo = 0; 
-       childNo < static_cast<int>(getNoOfChildOperations()) - 1; 
+       childNo < static_cast<int>(getNoOfChildOperations()); 
        childNo++)
   {
-    getChildOperation(childNo).printTree(depth+1, hasMoreSiblingsMask);
-  }
-  if (getNoOfChildOperations() > 0)
-  {
-    // The last child has no more siblings.
-    hasMoreSiblingsMask.clear(depth+1);
-    getChildOperation(getNoOfChildOperations() - 1)
-      .printTree(depth+1, hasMoreSiblingsMask);
+    if (childNo == 0)
+    {
+      /* For each child but the last one, use a mask with an extra bit set to
+       * indicate that there are more siblings.
+       */
+      hasMoreSiblingsMask.set(depth+1);
+    }
+    if (childNo == static_cast<int>(getNoOfChildOperations()) - 1)
+    {
+      // The last child has no more siblings.
+      hasMoreSiblingsMask.clear(depth+1);
+    }
+    getChildOperation(childNo).printTree(depth+1, hasMoreSiblingsMask); 
   }
 } // NdbQueryOperationDefImpl::printTree()
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-14 10:30:08 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-29 11:35:02 +0000
@@ -429,6 +429,12 @@ public:
   // Get type of query operation
   virtual NdbQueryOperationDef::Type getType() const = 0;
 
+  /**
+   * Used for telling if parent at depth n has more siblings. (In that case
+   * we need to draw a horisontal line leading to that sibling.)
+   */
+  typedef Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> SiblingMask;
+
   /** Print query tree graph to trace file (using recursion).
    * @param depth Number of ancestor nodes that this node has.
    * @param hasMoreSiblingsMask The n'th bit should be set if the n'th ancestor
@@ -436,7 +442,7 @@ public:
    */
   void printTree(
            Uint32 depth, 
-           Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> hasMoreSiblingsMask) const;
+           SiblingMask hasMoreSiblingsMask) const;
 
 protected:
   // QueryTree building:

=== modified file 'storage/ndb/src/ndbapi/Ndbinit.cpp'
--- a/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-10-17 12:43:31 +0000
@@ -207,7 +207,7 @@ NdbImpl::NdbImpl(Ndb_cluster_connection
     wakeHandler(0),
     wakeContext(~Uint32(0)),
     m_ev_op(0),
-    customDataPtr(0)
+    customData(0)
 {
   int i;
   for (i = 0; i < MAX_NDB_NODES; i++) {

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.cpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-10-14 13:24:26 +0000
@@ -1191,9 +1191,11 @@ TransporterFacade::sendFragmentedSignal(
       /* This section fits whole, move onto next */
       this_chunk_sz+= remaining_sec_sz;
       i++;
+      continue;
     }
     else
     {
+      assert(this_chunk_sz <= CHUNK_SZ);
       /* This section doesn't fit, truncate it */
       unsigned send_sz= CHUNK_SZ - this_chunk_sz;
       if (i != start_i)
@@ -1205,19 +1207,34 @@ TransporterFacade::sendFragmentedSignal(
          * The final piece does not need to be a multiple of
          * NDB_SECTION_SEGMENT_SZ
          * 
-         * Note that this can push this_chunk_sz above CHUNK_SZ
-         * Should probably round-down, but need to be careful of
-         * 'can't fit any' cases.  Instead, CHUNK_SZ is defined
-         * with some slack below MAX_SENT_MESSAGE_BYTESIZE
+         * We round down the available send space to the nearest whole 
+         * number of segments.
+         * If there's not enough space for one segment, then we round up
+         * to one segment.  This can make us send more than CHUNK_SZ, which
+         * is ok as it's defined as less than the maximum message length.
          */
-	send_sz=
-	  NDB_SECTION_SEGMENT_SZ
-	  *((send_sz+NDB_SECTION_SEGMENT_SZ-1)
-            /NDB_SECTION_SEGMENT_SZ);
-        if (send_sz > remaining_sec_sz)
-	  send_sz= remaining_sec_sz;
+        send_sz = (send_sz / NDB_SECTION_SEGMENT_SZ) * 
+          NDB_SECTION_SEGMENT_SZ;                        /* Round down */
+        send_sz = MAX(send_sz, NDB_SECTION_SEGMENT_SZ);  /* At least one */
+        send_sz = MIN(send_sz, remaining_sec_sz);        /* Only actual data */
+        
+        /* If we've squeezed the last bit of data in, jump out of 
+         * here to send the last fragment.
+         * Otherwise, send what we've collected so far.
+         */
+        if ((send_sz == remaining_sec_sz) &&      /* All sent */
+            (i == secs - 1))                      /* No more sections */
+        {
+          this_chunk_sz+=  remaining_sec_sz;
+          i++;
+          continue;
+        }
       }
 
+      /* At this point, there must be data to send in a further signal */
+      assert((send_sz < remaining_sec_sz) ||
+             (i < secs - 1));
+
       /* Modify tmp generic section ptr to describe truncated
        * section
        */
@@ -1256,9 +1273,6 @@ TransporterFacade::sendFragmentedSignal(
                  tmp_signal.readSignalNumber() == GSN_API_REGREQ);
         }
       }
-      // setup variables for next signal
-      start_i= i;
-      this_chunk_sz= 0;
       assert(remaining_sec_sz >= send_sz);
       Uint32 remaining= remaining_sec_sz - send_sz;
       tmp_ptr[i].sz= remaining;
@@ -1271,6 +1285,10 @@ TransporterFacade::sendFragmentedSignal(
       if (remaining == 0)
         /* This section's done, move onto the next */
 	i++;
+      
+      // setup variables for next signal
+      start_i= i;
+      this_chunk_sz= 0;
     }
   }
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-09-22 11:48:33 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-10-07 13:15:08 +0000
@@ -493,7 +493,8 @@ ErrorBundle ErrorCodes[] = {
   { 786,  DMEC, NR, "Schema transaction aborted due to node-failure" },
   { 792,  DMEC, SE, "Default value for primary key column not supported" },
   { 794,  DMEC, AE, "Schema feature requires data node upgrade" },
-  
+  { 796,  DMEC, SE, "Out of schema transaction memory" },
+
   /**
    * FunctionNotImplemented
    */

=== modified file 'storage/ndb/test/include/HugoCalculator.hpp'
--- a/storage/ndb/test/include/HugoCalculator.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/include/HugoCalculator.hpp	2011-10-12 10:19:08 +0000
@@ -41,6 +41,7 @@ public:
                      const char* valPtr, Uint32 valLen);
   int getIdValue(NDBT_ResultRow* const pRow) const;
   int getUpdatesValue(NDBT_ResultRow* const pRow) const;
+  int getIdColNo() const { return m_idCol;}
   int isIdCol(int colId) { return m_idCol == colId; };
   int isUpdateCol(int colId){ return m_updatesCol == colId; };
 

=== modified file 'storage/ndb/test/include/HugoOperations.hpp'
--- a/storage/ndb/test/include/HugoOperations.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/include/HugoOperations.hpp	2011-10-12 10:19:08 +0000
@@ -117,7 +117,8 @@ public:
   bool getPartIdForRow(const NdbOperation* pOp, int rowid, Uint32& partId);
   
   int setValues(NdbOperation*, int rowId, int updateId);
-  
+  int setNonPkValues(NdbOperation*, int rowId, int updateId);
+
   int verifyUpdatesValue(int updatesValue, int _numRows = 0);
 
   int indexReadRecords(Ndb*, const char * idxName, int recordNo,

=== modified file 'storage/ndb/test/include/HugoQueryBuilder.hpp'
--- a/storage/ndb/test/include/HugoQueryBuilder.hpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/test/include/HugoQueryBuilder.hpp	2011-10-04 06:23:21 +0000
@@ -60,15 +60,9 @@ public:
     /**
      * Query might table scan
      */
-    O_TABLE_SCAN = 0x20,
-
-    /**
-     * If not any options set, random query qill be created
-     */
-    O_RANDOM_OPTIONS = (OptionMask)((~(OptionMask)0) & 
-                                    ~(OptionMask)(O_SCAN | O_LOOKUP))
+    O_TABLE_SCAN = 0x20
   };
-  static const OptionMask OM_RANDOM_OPTIONS = (OptionMask)O_RANDOM_OPTIONS;
+  static const OptionMask OM_RANDOM_OPTIONS = (OptionMask)(O_PK_INDEX | O_UNIQUE_INDEX | O_ORDERED_INDEX | O_TABLE_SCAN);
 
   HugoQueryBuilder(Ndb* ndb, const NdbDictionary::Table**tabptr, 
                    OptionMask om = OM_RANDOM_OPTIONS){

=== modified file 'storage/ndb/test/ndbapi/Makefile.am'
--- a/storage/ndb/test/ndbapi/Makefile.am	2011-09-13 09:10:52 +0000
+++ b/storage/ndb/test/ndbapi/Makefile.am	2011-10-14 13:24:26 +0000
@@ -112,6 +112,8 @@ testMgmd_CXXFLAGS = -I$(top_srcdir)/stor
 testSingleUserMode_SOURCES = testSingleUserMode.cpp
 testNativeDefault_SOURCES = testNativeDefault.cpp
 testNdbApi_SOURCES = testNdbApi.cpp
+testNdbApi_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/ndbapi \
+	-I$(top_srcdir)/storage/ndb/include/transporter
 testNodeRestart_SOURCES = testNodeRestart.cpp
 testUpgrade_SOURCES = testUpgrade.cpp
 testUpgrade_LDADD = $(LDADD) $(top_srcdir)/libmysql_r/libmysqlclient_r.la

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-09-29 06:48:39 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-10-14 13:24:26 +0000
@@ -25,6 +25,8 @@
 #include <random.h>
 #include <NdbTick.h>
 #include <my_sys.h>
+#include <SignalSender.hpp>
+#include <GlobalSignalNumbers.h>
 
 #define MAX_NDB_OBJECTS 32678
 
@@ -4972,6 +4974,635 @@ int runNdbClusterConnectionConnect(NDBT_
   return NDBT_OK;
 }
 
+/* Testing fragmented signal send/receive */
+
+/*
+  SectionStore
+
+  Abstraction of long section storage api.
+  Used by FragmentAssembler to assemble received long sections
+*/
+class SectionStore
+{
+public:
+  virtual ~SectionStore() {};
+  virtual int appendToSection(Uint32 secId, LinearSectionPtr ptr) = 0;
+};
+
+/*
+  Basic Section Store
+
+  Naive implementation using malloc.  Real usage might use something better.
+*/
+class BasicSectionStore : public SectionStore
+{
+public:
+  BasicSectionStore()
+  {
+    init();
+  };
+
+  ~BasicSectionStore()
+  {
+    freeStorage();
+  };
+
+  void init()
+  {
+    ptrs[0].p = NULL;
+    ptrs[0].sz = 0;
+
+    ptrs[2] = ptrs[1] = ptrs[0];
+  }
+
+  void freeStorage()
+  {
+    free(ptrs[0].p);
+    free(ptrs[1].p);
+    free(ptrs[2].p);
+  }
+
+  virtual int appendToSection(Uint32 secId, LinearSectionPtr ptr)
+  {
+    /* Potentially expensive re-alloc + copy */
+    assert(secId < 3);
+    
+    Uint32 existingSz = ptrs[secId].sz;
+    Uint32* existingBuff = ptrs[secId].p;
+
+    Uint32 newSize = existingSz + ptr.sz;
+    Uint32* newBuff = (Uint32*) realloc(existingBuff, newSize * 4);
+
+    if (!newBuff)
+      return -1;
+    
+    memcpy(newBuff + existingSz, ptr.p, ptr.sz * 4);
+    
+    ptrs[secId].p = newBuff;
+    ptrs[secId].sz = existingSz + ptr.sz;
+
+    return 0;
+  }
+    
+  LinearSectionPtr ptrs[3];
+};
+
+
+
+/*
+  FragmentAssembler
+
+  Used to assemble sections from multiple fragment signals, and 
+  produce a 'normal' signal.
+  
+  Requires a SectionStore implementation to accumulate the section
+  fragments
+
+  Might be useful generic utility, or not.
+
+  Usage : 
+    FragmentAssembler fa(ss);
+    while (!fa.isComplete())
+    {
+      sig = waitSignal();
+      ss.handleSignal(sig, sections);
+    }
+
+    fa.getSignalHeader();
+    fa.getSignalBody();
+    fa.getSectionStore(); ..
+
+*/
+class FragmentAssembler
+{
+public:
+  enum AssemblyError
+  {
+    NoError = 0,
+    FragmentSequence = 1,
+    FragmentSource = 2,
+    FragmentIdentity = 3,
+    SectionAppend = 4
+  };
+
+  FragmentAssembler(SectionStore* _secStore):
+    secsReceived(0),
+    secStore(_secStore),
+    complete(false),
+    fragId(0),
+    sourceNode(0),
+    error(NoError)
+  {}
+
+  int handleSignal(const SignalHeader* sigHead,
+                   const Uint32* sigBody,
+                   LinearSectionPtr* sections)
+  {
+    Uint32 sigLen = sigHead->theLength;
+    
+    if (fragId == 0)
+    {
+      switch (sigHead->m_fragmentInfo)
+      {
+      case 0:
+      {
+        /* Not fragmented, pass through */
+        sh = *sigHead;
+        memcpy(signalBody, sigBody, sigLen * 4);
+        Uint32 numSecs = sigHead->m_noOfSections;
+        for (Uint32 i=0; i<numSecs; i++)
+        {
+          if (secStore->appendToSection(i, sections[i]) != 0)
+          {
+            error = SectionAppend;
+            return -1;
+          }
+        }
+        complete = true;
+        break;
+      }
+      case 1:
+      {
+        /* Start of fragmented signal */
+        Uint32 incomingFragId;
+        Uint32 incomingSourceNode;
+        Uint32 numSecsInFragment;
+        
+        if (handleFragmentSections(sigHead, sigBody, sections,
+                                   &incomingFragId, &incomingSourceNode,
+                                   &numSecsInFragment) != 0)
+          return -1;
+        
+        assert(incomingFragId != 0);
+        fragId = incomingFragId;
+        sourceNode = incomingSourceNode;
+        assert(numSecsInFragment > 0);
+        
+        break;
+      }
+      default:
+      {
+        /* Error, out of sequence fragment */
+        error = FragmentSequence;
+        return -1;
+        break;
+      }
+      }
+    }
+    else
+    {
+      /* FragId != 0 */
+      switch (sigHead->m_fragmentInfo)
+      {
+      case 0:
+      case 1:
+      {
+        /* Error, out of sequence fragment */
+        error = FragmentSequence;
+        return -1;
+      }
+      case 2:
+        /* Fall through */
+      case 3:
+      {
+        /* Body fragment */
+        Uint32 incomingFragId;
+        Uint32 incomingSourceNode;
+        Uint32 numSecsInFragment;
+        
+        if (handleFragmentSections(sigHead, sigBody, sections,
+                                   &incomingFragId, &incomingSourceNode,
+                                   &numSecsInFragment) != 0)
+          return -1;
+
+        if (incomingSourceNode != sourceNode)
+        {
+          /* Error in source node */
+          error = FragmentSource;
+          return -1;
+        }
+        if (incomingFragId != fragId)
+        {
+          error = FragmentIdentity;
+          return -1;
+        }
+        
+        if (sigHead->m_fragmentInfo == 3)
+        {
+          /* Final fragment, contains actual signal body */
+          memcpy(signalBody,
+                 sigBody,
+                 sigLen * 4);
+          sh = *sigHead;
+          sh.theLength = sigLen - (numSecsInFragment + 1);
+          sh.m_noOfSections = 
+            ((secsReceived & 4)? 1 : 0) +
+            ((secsReceived & 2)? 1 : 0) +
+            ((secsReceived & 1)? 1 : 0);
+          sh.m_fragmentInfo = 0;
+          
+          complete=true;
+        }
+        break;
+      }
+      default:
+      {
+        /* Bad fragmentinfo field */
+        error = FragmentSequence;
+        return -1;
+      }
+      }
+    }
+
+    return 0;
+  }
+
+  int handleSignal(NdbApiSignal* signal,
+                   LinearSectionPtr* sections)
+  {
+    return handleSignal(signal, signal->getDataPtr(), sections);
+  }
+
+  bool isComplete()
+  {
+    return complete;
+  }
+
+  /* Valid if isComplete() */
+  SignalHeader getSignalHeader()
+  {
+    return sh;
+  }
+  
+  /* Valid if isComplete() */
+  Uint32* getSignalBody()
+  {
+    return signalBody;
+  }
+
+  /* Valid if isComplete() */
+  Uint32 getSourceNode()
+  {
+    return sourceNode;
+  }
+
+  SectionStore* getSectionStore()
+  {
+    return secStore;
+  }
+
+  AssemblyError getError() const
+  {
+    return error;
+  }
+  
+private:
+  int handleFragmentSections(const SignalHeader* sigHead,
+                             const Uint32* sigBody,
+                             LinearSectionPtr* sections,
+                             Uint32* incomingFragId,
+                             Uint32* incomingSourceNode,
+                             Uint32* numSecsInFragment)
+  {
+    Uint32 sigLen = sigHead->theLength;
+    
+    *numSecsInFragment = sigHead->m_noOfSections;
+    assert(sigLen >= (1 + *numSecsInFragment));
+           
+    *incomingFragId = sigBody[sigLen - 1];
+    *incomingSourceNode = refToNode(sigHead->theSendersBlockRef);
+    const Uint32* secIds = &sigBody[sigLen - (*numSecsInFragment) - 1];
+    
+    for (Uint32 i=0; i < *numSecsInFragment; i++)
+    {
+      secsReceived |= (1 < secIds[i]);
+      
+      if (secStore->appendToSection(secIds[i], sections[i]) != 0)
+      {
+        error = SectionAppend;
+        return -1;
+      }
+    }
+    
+    return 0;
+  }
+
+  Uint32 secsReceived;
+  SectionStore* secStore;
+  bool complete;
+  Uint32 fragId;
+  Uint32 sourceNode;
+  SignalHeader sh;
+  Uint32 signalBody[NdbApiSignal::MaxSignalWords];
+  AssemblyError error;
+};                 
+
+static const Uint32 MAX_SEND_BYTES=32768; /* Align with TransporterDefinitions.hpp */
+static const Uint32 MAX_SEND_WORDS=MAX_SEND_BYTES/4;
+static const Uint32 SEGMENT_WORDS= 60; /* Align with SSPool etc */
+static const Uint32 SEGMENT_BYTES = SEGMENT_WORDS * 4;
+//static const Uint32 MAX_SEGS_PER_SEND=64; /* 6.3 */
+static const Uint32 MAX_SEGS_PER_SEND = (MAX_SEND_BYTES / SEGMENT_BYTES) - 2; /* Align with TransporterFacade.cpp */
+static const Uint32 MAX_WORDS_PER_SEND = MAX_SEGS_PER_SEND * SEGMENT_WORDS;
+static const Uint32 HALF_MAX_WORDS_PER_SEND = MAX_WORDS_PER_SEND / 2;
+static const Uint32 THIRD_MAX_WORDS_PER_SEND = MAX_WORDS_PER_SEND / 3;
+static const Uint32 MEDIUM_SIZE = 5000;
+
+/* Most problems occurred with sections lengths around the boundary
+ * of the max amount sent - MAX_WORDS_PER_SEND, so we define interesting
+ * sizes so that we test behavior around these boundaries
+ */
+static Uint32 interestingSizes[] = 
+{
+  0,
+  1, 
+  MEDIUM_SIZE,
+  THIRD_MAX_WORDS_PER_SEND -1,
+  THIRD_MAX_WORDS_PER_SEND,
+  THIRD_MAX_WORDS_PER_SEND +1,
+  HALF_MAX_WORDS_PER_SEND -1,
+  HALF_MAX_WORDS_PER_SEND,
+  HALF_MAX_WORDS_PER_SEND + 1,
+  MAX_WORDS_PER_SEND -1, 
+  MAX_WORDS_PER_SEND, 
+  MAX_WORDS_PER_SEND + 1,
+  (2* MAX_SEND_WORDS) + 1,
+  1234 /* Random */
+};
+
+
+/* 
+   FragSignalChecker
+
+   Class for testing fragmented signal send + receive
+*/
+class FragSignalChecker
+{
+public:
+
+  Uint32* buffer;
+
+  FragSignalChecker()
+  {
+    buffer= NULL;
+    init();
+  }
+
+  ~FragSignalChecker()
+  {
+    free(buffer);
+  }
+
+  void init()
+  {
+    buffer = (Uint32*) malloc(getBufferSize());
+
+    if (buffer)
+    {
+      /* Init to a known pattern */
+      for (Uint32 i = 0; i < (getBufferSize()/4); i++)
+      {
+        buffer[i] = i;
+      }
+    }
+  }
+
+  static Uint32 getNumInterestingSizes()
+  {
+    return sizeof(interestingSizes) / sizeof(Uint32);
+  }
+
+  static Uint32 getNumIterationsRequired()
+  {
+    /* To get combinatorial coverage, need each of 3
+     * sections with each of the interesting sizes
+     */
+    Uint32 numSizes = getNumInterestingSizes();
+    return numSizes * numSizes * numSizes;
+  }
+
+  static Uint32 getSecSz(Uint32 secNum, Uint32 iter)
+  {
+    assert(secNum < 3);
+    Uint32 numSizes = getNumInterestingSizes();
+    Uint32 divisor = (secNum == 0 ? 1 : 
+                      secNum == 1 ? numSizes :
+                      numSizes * numSizes);
+    /* offset ensures only end sections are 0 length */
+    Uint32 index = (iter / divisor) % numSizes;
+    if ((index == 0) && (iter >= (divisor * numSizes)))
+      index = 1; /* Avoid lower numbered section being empty */
+    Uint32 value = interestingSizes[index];
+    if(value == 1234)
+    {
+      value = 1 + (rand() % (2* MAX_WORDS_PER_SEND));
+    }
+    return value;
+  }
+
+  static Uint32 getBufferSize()
+  {
+    const Uint32 MaxSectionWords = (2 * MAX_SEND_WORDS) + 1;
+    const Uint32 MaxTotalSectionsWords = MaxSectionWords * 3;
+    return MaxTotalSectionsWords * 4;
+  }
+
+  int sendRequest(SignalSender* ss, 
+                  Uint32* sizes)
+  {
+    /* 
+     * We want to try out various interactions between the
+     * 3 sections and the length of the data sent
+     * - All fit in one 'chunk'
+     * - None fit in one 'chunk'
+     * - Each ends on a chunk boundary
+     *
+     * Max send size is ~ 32kB
+     * Segment size is 60 words / 240 bytes
+     *  -> 136 segments / chunk
+     *  -> 134 segments / chunk 'normally' sent
+     *  -> 32160 bytes
+     */
+    g_err << "Sending "
+          << sizes[0]
+          << " " << sizes[1]
+          << " " << sizes[2]
+          << endl;
+    
+    const Uint32 numSections = 
+      (sizes[0] ? 1 : 0) + 
+      (sizes[1] ? 1 : 0) + 
+      (sizes[2] ? 1 : 0);
+    const Uint32 testType = 40;
+    const Uint32 fragmentLength = 1;
+    const Uint32 print = 1;
+    const Uint32 len = 5 + numSections;
+    SimpleSignal request(false);
+    
+    Uint32* signalBody = request.getDataPtrSend();
+    signalBody[0] = ss->getOwnRef();
+    signalBody[1] = testType;
+    signalBody[2] = fragmentLength;
+    signalBody[3] = print;
+    signalBody[4] = 0; /* Return count */
+    signalBody[5] = sizes[0];
+    signalBody[6] = sizes[1];
+    signalBody[7] = sizes[2];
+    
+    
+    request.ptr[0].sz = sizes[0];
+    request.ptr[0].p = &buffer[0];
+    request.ptr[1].sz = sizes[1];
+    request.ptr[1].p = &buffer[sizes[0]];
+    request.ptr[2].sz = sizes[2];
+    request.ptr[2].p = &buffer[sizes[0] + sizes[1]];
+    
+    request.header.m_noOfSections= numSections;
+    
+    int rc = 0;
+    ss->lock();
+    rc = ss->sendFragmentedSignal(ss->get_an_alive_node(),
+                                  request,
+                                  CMVMI,
+                                  GSN_TESTSIG,
+                                  len);
+    ss->unlock();
+    
+    if (rc != 0)
+    {
+      g_err << "Error sending signal" << endl;
+      return rc;
+    }
+    
+    return 0;
+  }
+
+  int waitResponse(SignalSender* ss,
+                   Uint32* expectedSz)
+  {
+    /* Here we need to wait for all of the signals which
+     * comprise a fragmented send, and check that
+     * the data is as expected
+     */
+    BasicSectionStore bss;
+    FragmentAssembler fa(&bss);
+    
+    while(true)
+    {
+      ss->lock();
+      SimpleSignal* response = ss->waitFor(10000);
+      ss->unlock();
+      
+      if (!response)
+      {
+        g_err << "Timed out waiting for response" << endl;
+        return -1;
+      }
+      
+      //response->print();
+      
+      if (response->header.theVerId_signalNumber == GSN_TESTSIG)
+      {
+        if (fa.handleSignal(&response->header,
+                            response->getDataPtr(),
+                            response->ptr) != 0)
+        {
+          g_err << "Error assembling fragmented signal."
+                << "  Error is "
+                << (Uint32) fa.getError()
+                << endl;
+          return -1;
+        }
+        
+        if (fa.isComplete())
+        {
+          Uint32 expectedWord = 0;
+          for (Uint32 i=0; i < 3; i++)
+          {
+            if (bss.ptrs[i].sz != expectedSz[i])
+            {
+              g_err << "Wrong size for section : "
+                    << i
+                    << " expected " << expectedSz[i]
+                    << " but received " << bss.ptrs[i].sz
+                    << endl;
+              return -1;
+            }
+            
+            for (Uint32 d=0; d < expectedSz[i]; d++)
+            {
+              if (bss.ptrs[i].p[d] != expectedWord)
+              {
+                g_err << "Bad data in section "
+                      << i
+                      << " at word number "
+                      << d
+                      << ".  Expected "
+                      << expectedWord
+                      << " but found "
+                      << bss.ptrs[i].p[d]
+                      << endl;
+                return -1;
+              }
+              expectedWord++;
+            }
+          }
+          
+          break;
+        }
+        
+      }
+    }
+    
+    return 0;
+  }
+  
+  int runTest(SignalSender* ss)
+  {
+    for (Uint32 iter=0; 
+         iter < getNumIterationsRequired(); 
+         iter++)
+    {
+      int rc;
+      Uint32 sizes[3];
+      sizes[0] = getSecSz(0, iter);
+      sizes[1] = getSecSz(1, iter);
+      sizes[2] = getSecSz(2, iter);
+      
+      /* Build request, including sections */
+      rc = sendRequest(ss, sizes);
+      if (rc != 0)
+      {
+        g_err << "Failed sending request on iteration " << iter 
+              << " with rc " << rc << endl;
+        return NDBT_FAILED;
+      }
+      
+      /* Wait for response */
+      rc = waitResponse(ss, sizes);
+      if (rc != 0)
+      {
+        g_err << "Failed waiting for response on iteration " << iter
+              << " with rc " << rc << endl;
+        return NDBT_FAILED;
+      }
+    }
+    
+    return NDBT_OK;
+  }
+};
+
+
+int testFragmentedSend(NDBT_Context* ctx, NDBT_Step* step){
+  Ndb* pNdb= GETNDB(step);
+  Ndb_cluster_connection* conn = &pNdb->get_ndb_cluster_connection();
+  SignalSender ss(conn);
+  FragSignalChecker fsc;
+  
+  return fsc.runTest(&ss);
+}
+
+
 
 NDBT_TESTSUITE(testNdbApi);
 TESTCASE("MaxNdb", 
@@ -5245,6 +5876,10 @@ TESTCASE("NdbClusterConnectSR",
   STEPS(runNdbClusterConnect, MAX_NODES);
   STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
 }
+TESTCASE("TestFragmentedSend",
+         "Test fragmented send behaviour"){
+  INITIALIZER(testFragmentedSend);
+}
 NDBT_TESTSUITE_END(testNdbApi);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-08-19 09:38:29 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-18 12:45:50 +0000
@@ -4728,17 +4728,23 @@ int runSplitLatency25PctFail(NDBT_Contex
   /**
    * Now wait for half of cluster to die...
    */
-  ndbout_c("Waiting for half of cluster to die");
-  int not_started = 0;
   const int node_count = restarter.getNumDbNodes();
+  ndbout_c("Waiting for half of cluster (%u/%u) to die", node_count/2, node_count);
+  int not_started = 0;
   do
   {
     not_started = 0;
     for (int i = 0; i < node_count; i++)
     {
-      if (restarter.getNodeStatus(restarter.getDbNodeId(i)) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+      int nodeId = restarter.getDbNodeId(i);
+      int status = restarter.getNodeStatus(nodeId);
+      ndbout_c("Node %u status %u", nodeId, status);
+      if (status == NDB_MGM_NODE_STATUS_NOT_STARTED)
         not_started++;
     }
+    NdbSleep_MilliSleep(2000);
+    ndbout_c("%u / %u in state NDB_MGM_NODE_STATUS_NOT_STARTED(%u)",
+             not_started, node_count, NDB_MGM_NODE_STATUS_NOT_STARTED);
   } while (2 * not_started != node_count);
 
   ndbout_c("Restarting cluster");

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2011-10-03 11:06:06 +0000
@@ -187,9 +187,23 @@ extern int          g_baseport;
 extern int          g_fqpn;
 extern int          g_fix_nodeid;
 extern int          g_default_ports;
+extern int          g_restart;
 
 extern const char * g_clusters;
 
+/**
+ * Since binaries move location between 5.1 and 5.5
+ *   we keep full path to them here
+ */
+char * find_bin_path(const char * basename);
+extern const char * g_ndb_mgmd_bin_path;
+extern const char * g_ndbd_bin_path;
+extern const char * g_ndbmtd_bin_path;
+extern const char * g_mysqld_bin_path;
+extern const char * g_mysql_install_db_bin_path;
+
+extern const char * g_search_path[];
+
 #ifdef _WIN32
 #include <direct.h>
 

=== added file 'storage/ndb/test/run-test/conf-daily-perf.cnf'
--- a/storage/ndb/test/run-test/conf-daily-perf.cnf	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/conf-daily-perf.cnf	2011-10-16 18:36:16 +0000
@@ -0,0 +1,64 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+fix-nodeid=1
+mt = 2
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+loose-skip-bdb
+skip-grant-tables
+socket=mysql.sock
+
+ndbcluster=1
+ndb-force-send=1
+ndb-use-exact-count=0
+ndb-extra-logging=1
+ndb-autoincrement-prefetch-sz=256
+engine-condition-pushdown=1
+ndb-cluster-connection-pool=4
+
+key_buffer = 256M
+max_allowed_packet = 16M
+sort_buffer_size = 512K
+read_buffer_size = 256K
+read_rnd_buffer_size = 512K
+myisam_sort_buffer_size = 8M
+max-connections=200
+thread-cache-size=128
+
+query_cache_type = 0
+query_cache_size = 0
+table_open_cache=1024
+table_definition_cache=256
+
+[client]
+protocol=tcp
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host5,CHOOSE_host6,CHOOSE_host7,CHOOSE_host8
+ndbapi= CHOOSE_host2,CHOOSE_host3,CHOOSE_host4
+mysqld = CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 250M
+DataMemory = 1500M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 5
+NoOfFragmentLogFiles = 8
+FragmentLogFileSize = 64M
+ODirect=1
+MaxNoOfExecutionThreads=8
+
+SharedGlobalMemory=256M
+DiskPageBufferMemory=256M
+FileSystemPath=/data0/autotest
+FileSystemPathDataFiles=/data1/autotest
+FileSystemPathUndoFiles=/data2/autotest
+InitialLogfileGroup=undo_buffer_size=64M;undofile01.dat:256M;undofile02.dat:256M
+InitialTablespace=datafile01.dat:256M;datafile02.dat:256M

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-09-27 05:37:30 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-14 13:24:26 +0000
@@ -96,6 +96,10 @@ max-time: 600
 cmd: atrt-testBackup
 args: -n Bug57650 T1
 
+max-time: 1000
+cmd: atrt-testBackup
+args: -n BackupBank T6 
+
 # BASIC FUNCTIONALITY
 max-time: 500
 cmd: testBasic
@@ -356,6 +360,18 @@ max-time: 300
 cmd: testIndex
 args: -n FireTrigOverload T1
 
+max-time: 500
+cmd: testIndex
+args: -n Bug25059 -r 3000 T1
+
+max-time: 2500
+cmd: testIndex
+args: -l 2 -n SR1 T6 T13 
+
+max-time: 2500
+cmd: testIndex
+args: -l 2 -n SR1_O T6 T13 
+
 #
 # SCAN TESTS
 #
@@ -713,6 +729,10 @@ args: -n NFNR1_O T6 T13
 
 max-time: 2500
 cmd: testIndex
+args: -n NFNR2 T6 T13 
+
+max-time: 2500
+cmd: testIndex
 args: -n NFNR2_O T6 T13 
 
 max-time: 2500
@@ -809,6 +829,10 @@ max-time: 500
 cmd: testDict
 args: -n Bug54651 T1
 
+max-time: 1500
+cmd: testDict
+args: -n CreateMaxTables T6 
+
 #
 # TEST NDBAPI
 #
@@ -1034,6 +1058,14 @@ max-time: 5000
 cmd: testSystemRestart
 args: -n SR_UNDO T6 
 
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR3 T6 
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR4 T6 
+
 #
 max-time: 5000
 cmd: testSystemRestart
@@ -1185,6 +1217,26 @@ max-time: 2500
 cmd: testNodeRestart
 args: -n RestartRandomNodeError T6 T13 
 
+max-time: 2500
+cmd: testNodeRestart
+args: -l 1 -n MixedReadUpdateScan 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n Terror T6 T13 
+
+max-time: 3600
+cmd: testNodeRestart
+args: -l 1 -n RestartNFDuringNR T6 T13 
+
+max-time: 3600
+cmd: testNodeRestart
+args: -n RestartNodeDuringLCP T6 
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FiftyPercentStopAndWait T6 T13 
+
 #
 # MGMAPI AND MGSRV
 #
@@ -1366,6 +1418,10 @@ max-time: 600
 cmd: testSystemRestart
 args: -n Bug22696 T1
 
+max-time: 1000
+cmd: testSRBank
+args: -n SR -l 300 -r 15 T1
+
 max-time: 600
 cmd: testNodeRestart
 args: -n pnr --nologging T1
@@ -1774,3 +1830,8 @@ max-time: 500
 cmd: testNdbApi
 args: -n NdbClusterConnectSR T1
 
+# Fragmented signal send
+max-time 1800
+cmd: testNdbApi
+args: -n TestFragmentedSend T1
+

=== modified file 'storage/ndb/test/run-test/daily-devel-tests.txt'
--- a/storage/ndb/test/run-test/daily-devel-tests.txt	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/run-test/daily-devel-tests.txt	2011-10-05 13:18:31 +0000
@@ -15,10 +15,6 @@
 #
 # BACKUP
 #
-max-time: 1000
-cmd: atrt-testBackup
-args: -n BackupBank T6 
-
 max-time: 500
 cmd: testNdbApi
 args: -n MaxOperations T1 T6 T13 
@@ -27,18 +23,6 @@ max-time: 1500
 cmd: testDict
 args: -n CreateTableWhenDbIsFull T6 
 
-max-time: 1500
-cmd: testDict
-args: -n CreateMaxTables T6 
-
-max-time: 500
-cmd: testIndex
-args: -n Bug25059 -r 3000 T1
-
-max-time: 1000
-cmd: testSRBank
-args: -n SR -l 300 -r 15 T1
-
 max-time: 1000
 cmd: testSRBank
 args: -n NR -l 300 -r 15 T1
@@ -51,14 +35,6 @@ args: -n Mix -l 300 -r 15 T1
 #
 # SYSTEM RESTARTS
 #
-max-time: 1500
-cmd: testSystemRestart
-args: -n SR3 T6 
-
-max-time: 1500
-cmd: testSystemRestart
-args: -n SR4 T6 
-
 #
 max-time: 1500
 cmd: testSystemRestart
@@ -73,28 +49,8 @@ args: -l 1 -n MixedPkReadPkUpdate
 
 max-time: 2500
 cmd: testNodeRestart
-args: -l 1 -n MixedReadUpdateScan 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n Terror T6 T13 
-
-max-time: 2500
-cmd: testNodeRestart
 args: -n FullDb T6 T13 
 
-max-time: 3600
-cmd: testNodeRestart
-args: -l 1 -n RestartNFDuringNR T6 T13 
-
-max-time: 3600
-cmd: testNodeRestart
-args: -n RestartNodeDuringLCP T6 
-
-max-time: 2500
-cmd: testNodeRestart
-args: -n FiftyPercentStopAndWait T6 T13 
-
 max-time: 500
 cmd: testNodeRestart
 args: -n Bug16772 T1
@@ -106,24 +62,12 @@ args: -n Bug16772 T1
 #
 max-time: 2500
 cmd: testIndex
-args: -n NFNR2 T6 T13 
-
-max-time: 2500
-cmd: testIndex
 args: -n NFNR3 T6 T13 
 
 max-time: 2500
 cmd: testIndex
-args: -l 2 -n SR1 T6 T13 
-
-max-time: 2500
-cmd: testIndex
 args: -n NFNR3_O T6 T13 
 
-max-time: 2500
-cmd: testIndex
-args: -l 2 -n SR1_O T6 T13 
-
 # dict trans
 max-time: 1800
 cmd: testDict

=== added file 'storage/ndb/test/run-test/daily-perf-tests.txt'
--- a/storage/ndb/test/run-test/daily-perf-tests.txt	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/test/run-test/daily-perf-tests.txt	2011-10-16 18:36:16 +0000
@@ -0,0 +1,140 @@
+# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+max-time: 300
+cmd: DbCreate
+args:
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 25
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 100
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 200
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1 -proc 25
+type: bench
+
+# baseline
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord
+type: bench
+
+# minimal record
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 2
+type: bench
+
+# 4k record
+max-time: 600
+cmd: flexAsynch
+args: -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 25 -s 40
+type: bench
+
+# baseline DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord
+type: bench
+
+# minimal record DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 2
+type: bench
+
+# 4k record DD
+max-time: 600
+cmd: flexAsynch
+args: -dd -temp -con 2 -t 8 -r 2 -p 64 -ndbrecord -a 25 -s 40
+type: bench
+
+# sql
+max-time: 600
+client: ndb-sql-perf-create-table.sh
+args: t1
+
+max-time: 600
+client: ndb-sql-perf-select.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=1
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-select.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=4
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-update.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=1
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-update.sh
+args: t1 1 64
+mysqld: --ndb-cluster-connection-pool=4
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-table.sh
+args: t1
+mysqld:
+
+# sql join
+max-time: 600
+client: ndb-sql-perf-load-tpcw.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-tpcw-getBestSeller.sh
+args:
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-tpcw.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-load-music-store.sh
+args:
+
+max-time: 600
+client: ndb-sql-perf-select-music-store.sh
+args:
+type: bench
+
+max-time: 600
+client: ndb-sql-perf-drop-music-store.sh
+args:
+

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2011-10-03 08:46:52 +0000
@@ -179,8 +179,8 @@ setup_files(atrt_config& config, int set
 	  const char * val;
 	  require(proc.m_options.m_loaded.get("--datadir=", &val));
 	  BaseString tmp;
-	  tmp.assfmt("%s/bin/mysql_install_db --defaults-file=%s/my.cnf --datadir=%s > %s/mysql_install_db.log 2>&1",
-		     g_prefix, g_basedir, val, proc.m_proc.m_cwd.c_str());
+	  tmp.assfmt("%s --defaults-file=%s/my.cnf --datadir=%s > %s/mysql_install_db.log 2>&1",
+		     g_mysql_install_db_bin_path, g_basedir, val, proc.m_proc.m_cwd.c_str());
 
           to_fwd_slashes(tmp);
 	  if (sh(tmp.c_str()) != 0)
@@ -305,8 +305,13 @@ setup_files(atrt_config& config, int set
 	  }
 	  fprintf(fenv, "\"\nexport CMD\n");
 	}
-	
-	fprintf(fenv, "PATH=%s/bin:%s/libexec:$PATH\n", g_prefix, g_prefix);
+
+        fprintf(fenv, "PATH=");
+        for (int i = 0; g_search_path[i] != 0; i++)
+        {
+          fprintf(fenv, "%s/%s:", g_prefix, g_search_path[i]);
+        }
+        fprintf(fenv, "$PATH\n");
 	keys.push_back("PATH");
 	for (size_t k = 0; k<keys.size(); k++)
 	  fprintf(fenv, "export %s\n", keys[k].c_str());
@@ -314,7 +319,7 @@ setup_files(atrt_config& config, int set
 	fclose(fenv);
       }
       free(env);
-      
+
       {
         tmp.assfmt("%s/ssh-login.sh", proc.m_proc.m_cwd.c_str());
         FILE* fenv = fopen(tmp.c_str(), "w+");

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2011-10-03 14:59:24 +0000
@@ -66,6 +66,7 @@ int          g_fix_nodeid= 0;
 int          g_default_ports = 0;
 int          g_mt = 0;
 int          g_mt_rr = 0;
+int          g_restart = 0;
 
 const char * g_cwd = 0;
 const char * g_basedir = 0;
@@ -80,6 +81,37 @@ const char * g_dummy;
 char * g_env_path = 0;
 const char* g_mysqld_host = 0;
 
+const char * g_ndb_mgmd_bin_path = 0;
+const char * g_ndbd_bin_path = 0;
+const char * g_ndbmtd_bin_path = 0;
+const char * g_mysqld_bin_path = 0;
+const char * g_mysql_install_db_bin_path = 0;
+
+static struct
+{
+  bool is_required;
+  const char * exe;
+  const char ** var;
+} g_binaries[] = {
+  { true,  "ndb_mgmd",         &g_ndb_mgmd_bin_path},
+  { true,  "ndbd",             &g_ndbd_bin_path },
+  { false, "ndbmtd",           &g_ndbmtd_bin_path },
+  { true,  "mysqld",           &g_mysqld_bin_path },
+  { true,  "mysql_install_db", &g_mysql_install_db_bin_path },
+  { true, 0, 0 }
+};
+
+const char *
+g_search_path[] =
+{
+  "bin",
+  "libexec",
+  "sbin",
+  "scripts",
+  0
+};
+static bool find_binaries();
+
 static struct my_option g_options[] =
 {
   { "help", '?', "Display this help and exit.", 
@@ -178,6 +210,12 @@ main(int argc, char ** argv)
   }
   
   g_logger.info("Starting...");
+
+  if (!find_binaries())
+  {
+    goto end;
+  }
+
   g_config.m_generated = false;
   g_config.m_replication = g_replicate;
   if (!setup_config(g_config, g_mysqld_host))
@@ -638,6 +676,9 @@ parse_args(int argc, char** argv)
       case 'q':
 	g_do_quit = 1;
 	break;
+      case 'r':
+        g_restart = 1;
+        break;
       default:
 	g_logger.error("Unknown switch '%c'", *arg);
 	return false;
@@ -796,7 +837,11 @@ parse_args(int argc, char** argv)
 
 bool
 connect_hosts(atrt_config& config){
-  for(size_t i = 0; i<config.m_hosts.size(); i++){
+  for(size_t i = 0; i<config.m_hosts.size(); i++)
+  {
+    if (config.m_hosts[i]->m_hostname.length() == 0)
+      continue;
+
     if(config.m_hosts[i]->m_cpcd->connect() != 0){
       g_logger.error("Unable to connect to cpc %s:%d",
 		     config.m_hosts[i]->m_cpcd->getHost(),
@@ -1088,7 +1133,11 @@ update_status(atrt_config& config, int){
   
   Vector<SimpleCpcClient::Process> dummy;
   m_procs.fill(config.m_hosts.size(), dummy);
-  for(size_t i = 0; i<config.m_hosts.size(); i++){
+  for(size_t i = 0; i<config.m_hosts.size(); i++)
+  {
+    if (config.m_hosts[i]->m_hostname.length() == 0)
+      continue;
+
     Properties p;
     config.m_hosts[i]->m_cpcd->list_processes(m_procs[i], p);
   }
@@ -1261,11 +1310,14 @@ setup_test_case(atrt_config& config, con
        proc.m_type == atrt_process::AP_CLIENT)
     {
       BaseString cmd;
-      if (tc.m_command.c_str()[0] != '/')
+      char * p = find_bin_path(tc.m_command.c_str());
+      if (p == 0)
       {
-        cmd.appfmt("%s/bin/", g_prefix);
+        g_logger.critical("Failed to locate '%s'", tc.m_command.c_str());
+        return false;
       }
-      cmd.append(tc.m_command.c_str());
+      cmd.assign(p);
+      free(p);
 
       if (0) // valgrind
       {
@@ -1299,6 +1351,9 @@ gather_result(atrt_config& config, int *
 
   for(size_t i = 0; i<config.m_hosts.size(); i++)
   {
+    if (config.m_hosts[i]->m_hostname.length() == 0)
+      continue;
+
     tmp.appfmt(" %s:%s/*", 
 	       config.m_hosts[i]->m_hostname.c_str(),
 	       config.m_hosts[i]->m_basedir.c_str());
@@ -1333,7 +1388,10 @@ setup_hosts(atrt_config& config){
     return false;
   }
 
-  for(size_t i = 0; i<config.m_hosts.size(); i++){
+  for(size_t i = 0; i<config.m_hosts.size(); i++)
+  {
+    if (config.m_hosts[i]->m_hostname.length() == 0)
+      continue;
     BaseString tmp = g_setup_progname;
     tmp.appfmt(" %s %s/ %s/", 
 	       config.m_hosts[i]->m_hostname.c_str(),
@@ -1375,6 +1433,9 @@ deploy(int d, atrt_config & config)
 {
   for (size_t i = 0; i<config.m_hosts.size(); i++)
   {
+    if (config.m_hosts[i]->m_hostname.length() == 0)
+      continue;
+
     if (d & 1)
     {
       if (!do_rsync(g_basedir, config.m_hosts[i]->m_hostname.c_str()))
@@ -1509,6 +1570,35 @@ reset_config(atrt_config & config)
   return changed;
 }
 
+static
+bool
+find_binaries()
+{
+  g_logger.info("Locating binaries...");
+  bool ok = true;
+  for (int i = 0; g_binaries[i].exe != 0; i++)
+  {
+    const char * p = find_bin_path(g_binaries[i].exe);
+    if (p == 0)
+    {
+      if (g_binaries[i].is_required)
+      {
+        g_logger.critical("Failed to locate '%s'", g_binaries[i].exe);
+        ok = false;
+      }
+      else
+      {
+        g_logger.info("Failed to locate '%s'...ok", g_binaries[i].exe);
+      }
+    }
+    else
+    {
+      * g_binaries[i].var = p;
+    }
+  }
+  return ok;
+}
+
 template class Vector<Vector<SimpleCpcClient::Process> >;
 template class Vector<atrt_host*>;
 template class Vector<atrt_cluster*>;

=== modified file 'storage/ndb/test/run-test/setup.cpp'
--- a/storage/ndb/test/run-test/setup.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/run-test/setup.cpp	2011-10-03 11:06:06 +0000
@@ -20,6 +20,7 @@
 #include <util/ndb_opts.h>
 #include <util/NdbOut.hpp>
 #include <util/BaseString.hpp>
+#include <util/File.hpp>
 
 extern int g_mt;
 extern int g_mt_rr;
@@ -321,7 +322,7 @@ load_process(atrt_config& config, atrt_c
   case atrt_process::AP_NDB_MGMD:
   {
     proc.m_proc.m_name.assfmt("%u-%s", proc_no, "ndb_mgmd");
-    proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndb_mgmd");
+    proc.m_proc.m_path.assign(g_ndb_mgmd_bin_path);
     proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
 			      proc.m_host->m_basedir.c_str());
     proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s",
@@ -336,13 +337,15 @@ load_process(atrt_config& config, atrt_c
   } 
   case atrt_process::AP_NDBD:
   {
-    if (g_mt == 0 || (g_mt == 1 && ((g_mt_rr++) & 1) == 0))
+    if (g_mt == 0 ||
+        (g_mt == 1 && ((g_mt_rr++) & 1) == 0) ||
+        g_ndbmtd_bin_path == 0)
     {
-      proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndbd");
+      proc.m_proc.m_path.assign(g_ndbd_bin_path);
     }
     else
     {
-      proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndbmtd");
+      proc.m_proc.m_path.assign(g_ndbmtd_bin_path);
     }
     
     proc.m_proc.m_name.assfmt("%u-%s", proc_no, "ndbd");
@@ -350,7 +353,9 @@ load_process(atrt_config& config, atrt_c
 			      proc.m_host->m_basedir.c_str());
     proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s",
 			      cluster.m_name.c_str());
-    proc.m_proc.m_args.append(" --nodaemon --initial -n");
+    proc.m_proc.m_args.append(" --nodaemon -n");
+    if (!g_restart)
+      proc.m_proc.m_args.append(" --initial");
     if (g_fix_nodeid)
       proc.m_proc.m_args.appfmt(" --ndb-nodeid=%u", proc.m_nodeid);
     proc.m_proc.m_cwd.assfmt("%sndbd.%u", dir.c_str(), proc.m_index);
@@ -361,7 +366,7 @@ load_process(atrt_config& config, atrt_c
   case atrt_process::AP_MYSQLD:
   {
     proc.m_proc.m_name.assfmt("%u-%s", proc_no, "mysqld");
-    proc.m_proc.m_path.assign(g_prefix).append("/libexec/mysqld");
+    proc.m_proc.m_path.assign(g_mysqld_bin_path);
     proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
 			      proc.m_host->m_basedir.c_str());
     proc.m_proc.m_args.appfmt(" --defaults-group-suffix=.%d%s",
@@ -1037,3 +1042,28 @@ operator<<(NdbOut& out, const atrt_proce
   return out;
 }
 
+char *
+find_bin_path(const char * exe)
+{
+  if (exe == 0)
+    return 0;
+
+  if (exe[0] == '/')
+  {
+    /**
+     * Trust that path is correct...
+     */
+    return strdup(exe);
+  }
+
+  for (int i = 0; g_search_path[i] != 0; i++)
+  {
+    BaseString p;
+    p.assfmt("%s/%s/%s", g_prefix, g_search_path[i], exe);
+    if (File_class::exists(p.c_str()))
+    {
+      return strdup(p.c_str());
+    }
+  }
+  return 0;
+}

=== modified file 'storage/ndb/test/src/HugoOperations.cpp'
--- a/storage/ndb/test/src/HugoOperations.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/HugoOperations.cpp	2011-10-12 10:19:08 +0000
@@ -411,20 +411,30 @@ int
 HugoOperations::setValues(NdbOperation* pOp, int rowId, int updateId)
 {
   // Define primary keys
-  int a;
   if (equalForRow(pOp, rowId) != 0)
     return NDBT_FAILED;
-  
-  for(a = 0; a<tab.getNoOfColumns(); a++){
-    if (tab.getColumn(a)->getPrimaryKey() == false){
-      if(setValueForAttr(pOp, a, rowId, updateId ) != 0){ 
+
+  if (setNonPkValues(pOp, rowId, updateId) != 0)
+    return NDBT_FAILED;
+
+  return NDBT_OK;
+}
+
+int
+HugoOperations::setNonPkValues(NdbOperation* pOp, int rowId, int updateId)
+{
+  for(int a = 0; a<tab.getNoOfColumns(); a++)
+  {
+    if (tab.getColumn(a)->getPrimaryKey() == false)
+    {
+      if(setValueForAttr(pOp, a, rowId, updateId ) != 0)
+      {
 	ERR(pTrans->getNdbError());
         setNdbError(pTrans->getNdbError());
 	return NDBT_FAILED;
       }
     }
   }
-  
   return NDBT_OK;
 }
 

=== modified file 'storage/ndb/test/src/NDBT_Find.cpp'
--- a/storage/ndb/test/src/NDBT_Find.cpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/test/src/NDBT_Find.cpp	2011-09-30 20:37:26 +0000
@@ -81,7 +81,10 @@ NDBT_find_ndb_mgmd(BaseString& path)
 {
   NDBT_find_binary(path, "ndb_mgmd",
                    "../../src/mgmsrv",
-                   "../storage/ndb/src/mgmsrv/",
+                   "../storage/ndb/src/mgmsrv",
+                   "../libexec",
+                   "../sbin",
+                   "../bin",
                    NULL);
 }
 

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-10-05 11:21:23 +0000
@@ -91,6 +91,11 @@ ADD_EXECUTABLE(ndb_dump_frm_data
   ndb_dump_frm_data.cpp)
 TARGET_LINK_LIBRARIES(ndb_dump_frm_data ndbNDBT ndbgeneral)
 
+MYSQL_ADD_EXECUTABLE(ndbinfo_select_all
+  ndbinfo_select_all.cpp
+  COMPONENT ClusterTools)
+TARGET_LINK_LIBRARIES(ndbinfo_select_all ndbNDBT)
+
 IF (MYSQL_VERSION_ID LESS "50501")
   # Don't build or install this program anymore in 5.5+
   ADD_EXECUTABLE(ndb_test_platform ndb_test_platform.cpp)

=== modified file 'storage/ndb/tools/Makefile.am'
--- a/storage/ndb/tools/Makefile.am	2011-09-02 17:24:52 +0000
+++ b/storage/ndb/tools/Makefile.am	2011-10-05 11:21:23 +0000
@@ -32,7 +32,8 @@ ndbtools_PROGRAMS = \
   ndb_select_all \
   ndb_select_count \
   ndb_restore ndb_config \
-  ndb_index_stat
+  ndb_index_stat \
+  ndbinfo_select_all
 
 tools_common_sources = ../test/src/NDBT_ReturnCodes.cpp \
                        ../test/src/NDBT_Table.cpp \
@@ -81,6 +82,7 @@ ndbinfo.sql: $(ndbinfo_sql_SOURCES)
 
 ndb_index_stat_SOURCES = ndb_index_stat.cpp $(tools_common_sources)
 ndb_dump_frm_data_SOURCES = ndb_dump_frm_data.cpp
+ndbinfo_select_all_SOURCES = ndbinfo_select_all.cpp
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
@@ -99,4 +101,5 @@ ndb_config_LDFLAGS = @ndb_bin_am_ldflags
 ndbinfo_sql_LDFLAGS = @ndb_bin_am_ldflags@
 ndb_index_stat_LDFLAGS = @ndb_bin_am_ldflags@
 ndb_dump_frm_data_LDFLAGS = @ndb_bin_am_ldflags@
+ndbinfo_select_all_LDFLAGS = @ndb_bin_am_ldflags@
 

=== added file 'storage/ndb/tools/ndbinfo_select_all.cpp'
--- a/storage/ndb/tools/ndbinfo_select_all.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/tools/ndbinfo_select_all.cpp	2011-10-07 09:17:10 +0000
@@ -0,0 +1,189 @@
+/*
+   Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+
+#include <ndb_global.h>
+#include <ndb_opts.h>
+
+#include <NdbApi.hpp>
+#include <NdbOut.hpp>
+#include "../src/ndbapi/NdbInfo.hpp"
+#include <NdbSleep.h>
+
+static int loops = 1;
+static int delay = 5;
+const char *load_default_groups[]= { "mysql_cluster",0 };
+
+static struct my_option my_long_options[] =
+{
+  NDB_STD_OPTS("ndbinfo_select_all"),
+  { "loops", 'l', "Run same select several times",
+    (uchar**) &loops, (uchar**) &loops, 0,
+    GET_INT, REQUIRED_ARG, loops, 0, 0, 0, 0, 0 },
+  { "delay", 256, "Delay between loops (in seconds)",
+    (uchar**) &delay, (uchar**) &delay, 0,
+    GET_INT, REQUIRED_ARG, delay, 0, 0, 0, 0, 0 },
+  { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+static void short_usage_sub(void)
+{
+  ndb_short_usage_sub(NULL);
+}
+
+static void usage()
+{
+  ndb_usage(short_usage_sub, load_default_groups, my_long_options);
+}
+
+int
+main(int argc, char** argv)
+{
+  NDB_INIT(argv[0]);
+  ndb_opt_set_usage_funcs(short_usage_sub, usage);
+  load_defaults("my",load_default_groups,&argc,&argv);
+  int ho_error;
+#ifndef DBUG_OFF
+  opt_debug= "d:t:O,/tmp/ndbinfo_select_all.trace";
+#endif
+  if ((ho_error=handle_options(&argc, &argv, my_long_options,
+			       ndb_std_get_one_option)))
+    return 1;
+
+  if (argv[0] == 0)
+  {
+    return 0;
+  }
+
+  Ndb_cluster_connection con(opt_ndb_connectstring, opt_ndb_nodeid);
+  con.set_name("ndbinfo_select_all");
+  if(con.connect(12, 5, 1) != 0)
+  {
+    ndbout << "Unable to connect to management server." << endl;
+    return 1;
+  }
+
+  if (con.wait_until_ready(30,0) < 0)
+  {
+    ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+    return 1;
+  }
+
+  NdbInfo info(&con, "");
+  if (!info.init())
+  {
+    ndbout << "Failed to init ndbinfo!" << endl;
+    return 1;
+  }
+
+  const Uint32 batchsizerows = 32;
+
+  for (int ll = 0; loops == 0 || ll < loops; ll++)
+  {
+    for (int ii = 0; argv[ii] != 0; ii++)
+    {
+      ndbout << "== " << argv[ii] << " ==" << endl;
+
+      const NdbInfo::Table * pTab = 0;
+      int res = info.openTable(argv[ii], &pTab);
+      if (res != 0)
+      {
+        ndbout << "Failed to open: " << argv[ii] << ", res: " << res << endl;
+        continue;
+      }
+
+      unsigned cols = pTab->columns();
+      for (unsigned i = 0; i<cols; i++)
+      {
+        const NdbInfo::Column * pCol = pTab->getColumn(i);
+        ndbout << pCol->m_name.c_str() << "\t";
+      }
+      ndbout << endl;
+
+      NdbInfoScanOperation * pScan = 0;
+      res= info.createScanOperation(pTab, &pScan, batchsizerows);
+      if (res != 0)
+      {
+        ndbout << "Failed to createScan: " << argv[ii] << ", res: " << res<< endl;
+        info.closeTable(pTab);
+        continue;
+      }
+
+      if (pScan->readTuples() != 0)
+      {
+        ndbout << "scanOp->readTuples failed" << endl;
+        return 1;
+      }
+
+      Vector<const NdbInfoRecAttr*> recAttrs;
+      for (unsigned i = 0; i<cols; i++)
+      {
+        const NdbInfoRecAttr* pRec = pScan->getValue(i);
+        if (pRec == 0)
+        {
+          ndbout << "Failed to getValue(" << i << ")" << endl;
+          return 1;
+        }
+        recAttrs.push_back(pRec);
+      }
+
+      if(pScan->execute() != 0)
+      {
+        ndbout << "scanOp->execute failed" << endl;
+        return 1;
+      }
+
+      while(pScan->nextResult() == 1)
+      {
+        for (unsigned i = 0; i<cols; i++)
+        {
+          if (recAttrs[i]->isNULL())
+          {
+            ndbout << "NULL";
+          }
+          else
+          {
+            switch(pTab->getColumn(i)->m_type){
+            case NdbInfo::Column::String:
+              ndbout << recAttrs[i]->c_str();
+              break;
+            case NdbInfo::Column::Number:
+              ndbout << recAttrs[i]->u_32_value();
+              break;
+            case NdbInfo::Column::Number64:
+              ndbout << recAttrs[i]->u_64_value();
+              break;
+            }
+          }
+          ndbout << "\t";
+        }
+        ndbout << endl;
+      }
+
+      info.releaseScanOperation(pScan);
+      info.closeTable(pTab);
+    }
+
+    if ((loops == 0 || ll + 1 != loops) && delay > 0)
+    {
+      NdbSleep_SecSleep(delay);
+    }
+  }
+  return 0;
+}
+
+template class Vector<const NdbInfoRecAttr*>;

=== modified file 'storage/ndb/tools/ndbinfo_sql.cpp'
--- a/storage/ndb/tools/ndbinfo_sql.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/tools/ndbinfo_sql.cpp	2011-10-07 13:15:08 +0000
@@ -99,6 +99,9 @@ struct view {
     "  WHEN 4 THEN \"JOBBUFFER\""
     "  WHEN 5 THEN \"FILE_BUFFERS\""
     "  WHEN 6 THEN \"TRANSPORTER_BUFFERS\""
+    "  WHEN 7 THEN \"DISK_PAGE_BUFFER\""
+    "  WHEN 8 THEN \"QUERY_MEMORY\""
+    "  WHEN 9 THEN \"SCHEMA_TRANS_MEMORY\""
     "  ELSE \"<unknown>\" "
     " END AS resource_name, "
     "reserved, used, max "

=== modified file 'tests/mysql_client_test.c'
--- a/tests/mysql_client_test.c	2011-06-30 15:55:35 +0000
+++ b/tests/mysql_client_test.c	2011-10-17 11:35:32 +0000
@@ -18399,6 +18399,87 @@ static void test_bug47485()
 }
 
 
+#ifndef MCP_BUG13001491
+/*
+  Bug#13001491: MYSQL_REFRESH CRASHES WHEN STORED ROUTINES ARE RUN CONCURRENTLY.
+*/
+static void test_bug13001491()
+{
+  int rc;
+  char query[MAX_TEST_QUERY_LENGTH];
+  MYSQL *c;
+
+  myheader("test_bug13001491");
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "GRANT ALL PRIVILEGES ON *.* TO mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "GRANT RELOAD ON *.* TO mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+
+  c= mysql_client_init(NULL);
+
+  DIE_UNLESS(mysql_real_connect(c, opt_host, "mysqltest_u1", NULL,
+                                current_db, opt_port, opt_unix_socket,
+                                CLIENT_MULTI_STATEMENTS |
+                                CLIENT_MULTI_RESULTS));
+
+  rc= mysql_query(c, "DROP PROCEDURE IF EXISTS p1");
+  myquery(rc);
+
+  rc= mysql_query(c,
+    "CREATE PROCEDURE p1() "
+    "BEGIN "
+    " DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END; "
+    " SELECT COUNT(*) "
+    " FROM INFORMATION_SCHEMA.PROCESSLIST "
+    " GROUP BY user "
+    " ORDER BY NULL "
+    " INTO @a; "
+    "END");
+  myquery(rc);
+
+  rc= mysql_query(c, "CALL p1()");
+  myquery(rc);
+
+  mysql_free_result(mysql_store_result(c));
+
+  /* Check that mysql_refresh() succeeds without REFRESH_LOG. */
+  rc= mysql_refresh(c, REFRESH_GRANT |
+                       REFRESH_TABLES | REFRESH_HOSTS |
+                       REFRESH_STATUS | REFRESH_THREADS);
+  myquery(rc);
+
+  /*
+    Check that mysql_refresh(REFRESH_LOG) does not crash the server even if it
+    fails. mysql_refresh(REFRESH_LOG) fails when error log points to unavailable
+    location.
+  */
+  mysql_refresh(c, REFRESH_LOG);
+
+  rc= mysql_query(c, "DROP PROCEDURE p1");
+  myquery(rc);
+
+  mysql_close(c);
+  c= NULL;
+
+  my_snprintf(query, MAX_TEST_QUERY_LENGTH,
+           "DROP USER mysqltest_u1@%s",
+           opt_host ? opt_host : "'localhost'");
+           
+  rc= mysql_query(mysql, query);
+  myquery(rc);
+}
+
+#endif
 /*
   Read and parse arguments and MySQL options from my.cnf
 */
@@ -18725,6 +18806,9 @@ static struct my_tests_st my_tests[]= {
   { "test_bug42373", test_bug42373 },
   { "test_bug54041", test_bug54041 },
   { "test_bug47485", test_bug47485 },
+#ifndef MCP_BUG13001491
+  { "test_bug13001491", test_bug13001491 },
+#endif
   { 0, 0 }
 };
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0-llcp branch (jonas.oreland:3692 to 3693) jonas oreland19 Oct