List:Commits« Previous MessageNext Message »
From:jonas oreland Date:November 3 2011 5:24pm
Subject:bzr push into mysql-5.5-cluster branch (jonas.oreland:3623 to 3624)
View as plain text  
 3624 jonas oreland	2011-11-03 [merge]
      ndb - merge 71 to 72

    added:
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java
      storage/ndb/include/kernel/statedesc.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
    modified:
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/r/ndbinfo_dump.result
      mysql-test/suite/ndb/t/ndbinfo.test
      scripts/mysql_system_tables.sql
      sql/ha_ndbcluster_connection.cc
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/vm/Rope.cpp
      storage/ndb/src/kernel/vm/Rope.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/src/HugoQueries.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/ndbinfo_sql.cpp
 3623 magnus.blaudd@stripped	2011-11-02 [merge]
      Merge in latest schema dist fixes

    added:
      sql/ndb_schema_dist.cc
      sql/ndb_schema_dist.h
      sql/ndb_schema_object.cc
      sql/ndb_schema_object.h
    modified:
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbcluster_tables.h
      sql/ndb_share.h
      storage/ndb/CMakeLists.txt
=== modified file 'mysql-test/suite/ndb/r/ndbinfo.result'
--- a/mysql-test/suite/ndb/r/ndbinfo.result	2011-10-13 17:13:02 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo.result	2011-11-03 17:22:01 +0000
@@ -317,6 +317,147 @@ node_id
 1
 2
 
+desc threadblocks;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+thr_no	int(10) unsigned	YES		NULL	
+block_name	varchar(512)	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+select distinct block_name from threadblocks order by 1;
+block_name
+BACKUP
+CMVMI
+DBACC
+DBDICT
+DBDIH
+DBINFO
+DBLQH
+DBSPJ
+DBTC
+DBTUP
+DBTUX
+DBUTIL
+LGMAN
+NDBCNTR
+NDBFS
+PGMAN
+QMGR
+RESTORE
+SUMA
+THRMAN
+TRIX
+TSMAN
+desc threadstat;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+thr_no	int(10) unsigned	YES		NULL	
+thr_nm	varchar(512)	YES		NULL	
+c_loop	bigint(20) unsigned	YES		NULL	
+c_exec	bigint(20) unsigned	YES		NULL	
+c_wait	bigint(20) unsigned	YES		NULL	
+c_l_sent_prioa	bigint(20) unsigned	YES		NULL	
+c_l_sent_priob	bigint(20) unsigned	YES		NULL	
+c_r_sent_prioa	bigint(20) unsigned	YES		NULL	
+c_r_sent_priob	bigint(20) unsigned	YES		NULL	
+os_tid	bigint(20) unsigned	YES		NULL	
+os_now	bigint(20) unsigned	YES		NULL	
+os_ru_utime	bigint(20) unsigned	YES		NULL	
+os_ru_stime	bigint(20) unsigned	YES		NULL	
+os_ru_minflt	bigint(20) unsigned	YES		NULL	
+os_ru_majflt	bigint(20) unsigned	YES		NULL	
+os_ru_nvcsw	bigint(20) unsigned	YES		NULL	
+os_ru_nivcsw	bigint(20) unsigned	YES		NULL	
+select count(*) > 0 block_name from threadstat;
+block_name
+1
+
+desc cluster_transactions;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+state	varchar(256)	YES		NULL	
+count_operations	int(10) unsigned	YES		NULL	
+outstanding_operations	int(10) unsigned	YES		NULL	
+inactive_seconds	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+desc server_transactions;
+Field	Type	Null	Key	Default	Extra
+mysql_connection_id	bigint(21) unsigned	NO		0	
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+state	varchar(256)	YES		NULL	
+count_operations	int(10) unsigned	YES		NULL	
+outstanding_operations	int(10) unsigned	YES		NULL	
+inactive_seconds	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+desc cluster_operations;
+Field	Type	Null	Key	Default	Extra
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+operation_type	varchar(9)	YES		NULL	
+state	varchar(256)	YES		NULL	
+tableid	int(10) unsigned	YES		NULL	
+fragmentid	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+tc_node_id	bigint(21) unsigned	YES		NULL	
+tc_block_no	bigint(21) unsigned	YES		NULL	
+tc_block_instance	bigint(21) unsigned	YES		NULL	
+desc server_operations;
+Field	Type	Null	Key	Default	Extra
+mysql_connection_id	bigint(21) unsigned	NO		0	
+node_id	int(10) unsigned	YES		NULL	
+block_instance	int(10) unsigned	YES		NULL	
+transid	bigint(22) unsigned	YES		NULL	
+operation_type	varchar(9)	YES		NULL	
+state	varchar(256)	YES		NULL	
+tableid	int(10) unsigned	YES		NULL	
+fragmentid	int(10) unsigned	YES		NULL	
+client_node_id	bigint(21) unsigned	YES		NULL	
+client_block_ref	bigint(21) unsigned	YES		NULL	
+tc_node_id	bigint(21) unsigned	YES		NULL	
+tc_block_no	bigint(21) unsigned	YES		NULL	
+tc_block_instance	bigint(21) unsigned	YES		NULL	
+
+create table t1 (a int primary key) engine = ndb;
+begin;
+insert into t1 values (1);
+select state, count_operations, outstanding_operations,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_transactions;
+state	count_operations	outstanding_operations	client_node_id
+Started	1	0	<client_node_id>
+select node_id, operation_type, state,
+IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_operations
+order by 1;
+node_id	operation_type	state	tc_node_id	client_node_id
+1	INSERT	Prepared	<tc_node_id>	<client_node_id>
+2	INSERT	Prepared	<tc_node_id>	<client_node_id>
+
+select st.state, st.count_operations, st.outstanding_operations,
+       so.node_id, so.state, so.operation_type
+from server_transactions st,
+     server_operations so
+where st.transid = so.transid
+  and so.tc_node_id = st.node_id
+  and so.tc_block_instance = st.block_instance
+  and so.client_node_id = st.client_node_id
+  and so.client_block_ref = st.client_block_ref;
+state	count_operations	outstanding_operations	node_id	state	operation_type
+Started	1	0	1	Prepared	INSERT
+Started	1	0	2	Prepared	INSERT
+rollback;
+drop table t1;
+
 set @@global.ndbinfo_offline=TRUE;
 select @@ndbinfo_offline;
 @@ndbinfo_offline

=== modified file 'mysql-test/suite/ndb/r/ndbinfo_dump.result'
--- a/mysql-test/suite/ndb/r/ndbinfo_dump.result	2011-10-20 19:52:11 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo_dump.result	2011-11-03 17:22:01 +0000
@@ -1,7 +1,7 @@
 USE ndbinfo;
 select count(*) from blocks;
 count(*)
-21
+22
 select count(*) from blocks;
 count(*)
-21
+22

=== modified file 'mysql-test/suite/ndb/t/ndbinfo.test'
--- a/mysql-test/suite/ndb/t/ndbinfo.test	2011-06-15 14:21:47 +0000
+++ b/mysql-test/suite/ndb/t/ndbinfo.test	2011-11-03 17:22:01 +0000
@@ -201,6 +201,44 @@ set @@ndbinfo_offline=1;
 let $q1 = SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
 eval $q1;
 
+# new views
+desc threadblocks;
+select distinct block_name from threadblocks order by 1;
+desc threadstat;
+select count(*) > 0 block_name from threadstat;
+
+desc cluster_transactions;
+desc server_transactions;
+desc cluster_operations;
+desc server_operations;
+
+create table t1 (a int primary key) engine = ndb;
+begin;
+insert into t1 values (1);
+select state, count_operations, outstanding_operations,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_transactions;
+select node_id, operation_type, state,
+IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
+IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
+  client_node_id
+from server_operations
+order by 1;
+
+--sorted_result
+select st.state, st.count_operations, st.outstanding_operations,
+       so.node_id, so.state, so.operation_type
+from server_transactions st,
+     server_operations so
+where st.transid = so.transid
+  and so.tc_node_id = st.node_id
+  and so.tc_block_instance = st.block_instance
+  and so.client_node_id = st.client_node_id
+  and so.client_block_ref = st.client_block_ref; 
+rollback;
+drop table t1;
+
 # Turn on ndbinfo_offline
 set @@global.ndbinfo_offline=TRUE;
 select @@ndbinfo_offline;

=== modified file 'scripts/mysql_system_tables.sql'
--- a/scripts/mysql_system_tables.sql	2011-09-02 09:16:56 +0000
+++ b/scripts/mysql_system_tables.sql	2011-11-03 17:22:01 +0000
@@ -527,53 +527,98 @@ EXECUTE stmt;
 DROP PREPARE stmt;
 
 # Drop any old views in ndbinfo
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.transporters','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`transporters`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.logspaces','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`logspaces`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.logbuffers','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`logbuffers`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.resources','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`resources`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.counters','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`counters`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.nodes','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`nodes`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.memoryusage','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`memoryusage`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS ndbinfo.diskpagebuffer','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`threadblocks`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`threadstat`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`cluster_transactions`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`server_transactions`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`cluster_operations`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP VIEW IF EXISTS `ndbinfo`.`server_operations`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # Drop any old lookup tables in ndbinfo
-SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS ndbinfo.blocks','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`blocks`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS ndbinfo.config_params','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`config_params`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$dbtc_apiconnect_state`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$dblqh_tcconnect_state`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
@@ -699,13 +744,57 @@ PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
+# ndbinfo.ndb$threadblocks
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$threadblocks`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$threadblocks` (`node_id` INT UNSIGNED COMMENT "node id",`thr_no` INT UNSIGNED COMMENT "thread number",`block_number` INT UNSIGNED COMMENT "block number",`block_instance` INT UNSIGNED COMMENT "block instance") COMMENT="which blocks are run in which threads" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$threadstat
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$threadstat`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$threadstat` (`node_id` INT UNSIGNED COMMENT "node id",`thr_no` INT UNSIGNED COMMENT "thread number",`thr_nm` VARCHAR(512) COMMENT "thread name",`c_loop` BIGINT UNSIGNED COMMENT "No of loops in main loop",`c_exec` BIGINT UNSIGNED COMMENT "No of signals executed",`c_wait` BIGINT UNSIGNED COMMENT "No of times waited for more input",`c_l_sent_prioa` BIGINT UNSIGNED COMMENT "No of prio A signals sent to own node",`c_l_sent_priob` BIGINT UNSIGNED COMMENT "No of prio B signals sent to own node",`c_r_sent_prioa` BIGINT UNSIGNED COMMENT "No of prio A signals sent to remote node",`c_r_sent_priob` BIGINT UNSIGNED COMMENT "No of prio B signals sent to remote node",`os_tid` BIGINT UNSIGNED COMMENT "OS thread id",`os_now` BIGINT UNSIGNED COMMENT "OS gettimeofday (millis)",`os_ru_utime` BIGINT UNSIGNED COMMENT "OS user CPU time (micros)",`os_ru_stime` BIGINT UNSIGNED COMMENT "OS system CPU time (micros)",`os_ru_minflt` BIGINT UNSIGNED 
 COMMENT "OS page reclaims (soft page faults",`os_ru_majflt` BIGINT UNSIGNED COMMENT "OS page faults (hard page faults)",`os_ru_nvcsw` BIGINT UNSIGNED COMMENT "OS voluntary context switches",`os_ru_nivcsw` BIGINT UNSIGNED COMMENT "OS involuntary context switches") COMMENT="Statistics on execution threads" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$transactions
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$transactions`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$transactions` (`node_id` INT UNSIGNED COMMENT "node id",`block_instance` INT UNSIGNED COMMENT "TC instance no",`objid` INT UNSIGNED COMMENT "Object id of transaction object",`apiref` INT UNSIGNED COMMENT "API reference",`transid0` INT UNSIGNED COMMENT "Transaction id",`transid1` INT UNSIGNED COMMENT "Transaction id",`state` INT UNSIGNED COMMENT "Transaction state",`flags` INT UNSIGNED COMMENT "Transaction flags",`c_ops` INT UNSIGNED COMMENT "No of operations in transaction",`outstanding` INT UNSIGNED COMMENT "Currently outstanding request",`timer` INT UNSIGNED COMMENT "Timer (seconds)") COMMENT="transactions" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$operations
+SET @str=IF(@have_ndbinfo,'DROP TABLE IF EXISTS `ndbinfo`.`ndb$operations`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$operations` (`node_id` INT UNSIGNED COMMENT "node id",`block_instance` INT UNSIGNED COMMENT "LQH instance no",`objid` INT UNSIGNED COMMENT "Object id of operation object",`tcref` INT UNSIGNED COMMENT "TC reference",`apiref` INT UNSIGNED COMMENT "API reference",`transid0` INT UNSIGNED COMMENT "Transaction id",`transid1` INT UNSIGNED COMMENT "Transaction id",`tableid` INT UNSIGNED COMMENT "Table id",`fragmentid` INT UNSIGNED COMMENT "Fragment id",`op` INT UNSIGNED COMMENT "Operation type",`state` INT UNSIGNED COMMENT "Operation state",`flags` INT UNSIGNED COMMENT "Operation flags") COMMENT="operations" ENGINE=NDBINFO','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
 # ndbinfo.blocks
 SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`blocks` (block_number INT UNSIGNED PRIMARY KEY, block_name VARCHAR(512))','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`blocks` VALUES (254, "CMVMI"), (248, "DBACC"), (250, "DBDICT"), (246, "DBDIH"), (247, "DBLQH"), (245, "DBTC"), (249, "DBTUP"), (253, "NDBFS"), (251, "NDBCNTR"), (252, "QMGR"), (255, "TRIX"), (244, "BACKUP"), (256, "DBUTIL"), (257, "SUMA"), (258, "DBTUX"), (259, "TSMAN"), (260, "LGMAN"), (261, "PGMAN"), (262, "RESTORE"), (263, "DBINFO"), (264, "DBSPJ")','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`blocks` VALUES (254, "CMVMI"), (248, "DBACC"), (250, "DBDICT"), (246, "DBDIH"), (247, "DBLQH"), (245, "DBTC"), (249, "DBTUP"), (253, "NDBFS"), (251, "NDBCNTR"), (252, "QMGR"), (255, "TRIX"), (244, "BACKUP"), (256, "DBUTIL"), (257, "SUMA"), (258, "DBTUX"), (259, "TSMAN"), (260, "LGMAN"), (261, "PGMAN"), (262, "RESTORE"), (263, "DBINFO"), (264, "DBSPJ"), (265, "THRMAN")','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
@@ -716,55 +805,119 @@ PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
-SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`config_params` VALUES (179, "MaxNoOfSubscriptions"), (180, "MaxNoOfSubscribers"), (181, "MaxNoOfConcurrentSubOperations"), (5, "HostName"), (3, "NodeId"), (101, "NoOfReplicas"), (103, "MaxNoOfAttributes"), (102, "MaxNoOfTables"), (149, "MaxNoOfOrderedIndexes"), (150, "MaxNoOfUniqueHashIndexes"), (110, "MaxNoOfConcurrentIndexOperations"), (105, "MaxNoOfTriggers"), (109, "MaxNoOfFiredTriggers"), (100, "MaxNoOfSavedMessages"), (177, "LockExecuteThreadToCPU"), (178, "LockMaintThreadsToCPU"), (176, "RealtimeScheduler"), (114, "LockPagesInMainMemory"), (123, "TimeBetweenWatchDogCheck"), (174, "SchedulerExecutionTimer"), (175, "SchedulerSpinTimer"), (141, "TimeBetweenWatchDogCheckInitial"), (124, "StopOnError"), (107, "MaxNoOfConcurrentOperations"), (151, "MaxNoOfLocalOperations"), (152, "MaxNoOfLocalScans"), (153, "BatchSizePerLocalScan"), (106, "MaxNoOfConcurrentTransactions"), (108, "MaxNoOfConcurrentScans"), (111, "TransactionBu
 fferMemory"), (113, "IndexMemory"), (112, "DataMemory"), (154, "UndoIndexBuffer"), (155, "UndoDataBuffer"), (156, "RedoBuffer"), (157, "LongMessageBuffer"), (160, "DiskPageBufferMemory"), (198, "SharedGlobalMemory"), (115, "StartPartialTimeout"), (116, "StartPartitionedTimeout"), (117, "StartFailureTimeout"), (118, "HeartbeatIntervalDbDb"), (119, "HeartbeatIntervalDbApi"), (120, "TimeBetweenLocalCheckpoints"), (121, "TimeBetweenGlobalCheckpoints"), (170, "TimeBetweenEpochs"), (171, "TimeBetweenEpochsTimeout"), (182, "MaxBufferedEpochs"), (126, "NoOfFragmentLogFiles"), (140, "FragmentLogFileSize"), (189, "InitFragmentLogFiles"), (190, "DiskIOThreadPool"), (159, "MaxNoOfOpenFiles"), (162, "InitialNoOfOpenFiles"), (129, "TimeBetweenInactiveTransactionAbortCheck"), (130, "TransactionInactiveTimeout"), (131, "TransactionDeadlockDetectionTimeout"), (148, "Diskless"), (122, "ArbitrationTimeout"), (142, "Arbitration"), (7, "DataDir"), (125, "FileSystemPath"), (250, "LogLevelStartup"
 ), (251, "LogLevelShutdown"), (252, "LogLevelStatistic"), (253, "LogLevelCheckpoint"), (254, "LogLevelNodeRestart"), (255, "LogLevelConnection"), (259, "LogLevelCongestion"), (258, "LogLevelError"), (256, "LogLevelInfo"), (158, "BackupDataDir"), (163, "DiskSyncSize"), (164, "DiskCheckpointSpeed"), (165, "DiskCheckpointSpeedInRestart"), (133, "BackupMemory"), (134, "BackupDataBufferSize"), (135, "BackupLogBufferSize"), (136, "BackupWriteSize"), (139, "BackupMaxWriteSize"), (161, "StringMemory"), (169, "MaxAllocate"), (166, "MemReportFrequency"), (167, "BackupReportFrequency"), (184, "StartupStatusReportFrequency"), (168, "ODirect"), (172, "CompressedBackup"), (173, "CompressedLCP"), (9, "TotalSendBufferMemory"), (202, "ReservedSendBufferMemory"), (185, "Nodegroup"), (186, "MaxNoOfExecutionThreads"), (188, "__ndbmt_lqh_workers"), (187, "__ndbmt_lqh_threads"), (191, "__ndbmt_classic"), (193, "FileSystemPathDD"), (194, "FileSystemPathDataFiles"), (195, "FileSystemPathUndoFiles")
 , (196, "InitialLogfileGroup"), (197, "InitialTablespace"), (605, "MaxLCPStartDelay"), (606, "BuildIndexThreads"), (607, "HeartbeatOrder"), (608, "DictTrace"), (609, "MaxStartFailRetries"), (610, "StartFailRetryDelay")','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`config_params` VALUES (179, "MaxNoOfSubscriptions"), (180, "MaxNoOfSubscribers"), (181, "MaxNoOfConcurrentSubOperations"), (5, "HostName"), (3, "NodeId"), (101, "NoOfReplicas"), (103, "MaxNoOfAttributes"), (102, "MaxNoOfTables"), (149, "MaxNoOfOrderedIndexes"), (150, "MaxNoOfUniqueHashIndexes"), (110, "MaxNoOfConcurrentIndexOperations"), (105, "MaxNoOfTriggers"), (109, "MaxNoOfFiredTriggers"), (100, "MaxNoOfSavedMessages"), (177, "LockExecuteThreadToCPU"), (178, "LockMaintThreadsToCPU"), (176, "RealtimeScheduler"), (114, "LockPagesInMainMemory"), (123, "TimeBetweenWatchDogCheck"), (174, "SchedulerExecutionTimer"), (175, "SchedulerSpinTimer"), (141, "TimeBetweenWatchDogCheckInitial"), (124, "StopOnError"), (107, "MaxNoOfConcurrentOperations"), (627, "MaxDMLOperationsPerTransaction"), (151, "MaxNoOfLocalOperations"), (152, "MaxNoOfLocalScans"), (153, "BatchSizePerLocalScan"), (106, "MaxNoOfConcurrentTransactions"), (108, "MaxNo
 OfConcurrentScans"), (111, "TransactionBufferMemory"), (113, "IndexMemory"), (112, "DataMemory"), (154, "UndoIndexBuffer"), (155, "UndoDataBuffer"), (156, "RedoBuffer"), (157, "LongMessageBuffer"), (160, "DiskPageBufferMemory"), (198, "SharedGlobalMemory"), (115, "StartPartialTimeout"), (116, "StartPartitionedTimeout"), (117, "StartFailureTimeout"), (619, "StartNoNodegroupTimeout"), (118, "HeartbeatIntervalDbDb"), (618, "ConnectCheckIntervalDelay"), (119, "HeartbeatIntervalDbApi"), (120, "TimeBetweenLocalCheckpoints"), (121, "TimeBetweenGlobalCheckpoints"), (170, "TimeBetweenEpochs"), (171, "TimeBetweenEpochsTimeout"), (182, "MaxBufferedEpochs"), (126, "NoOfFragmentLogFiles"), (140, "FragmentLogFileSize"), (189, "InitFragmentLogFiles"), (190, "DiskIOThreadPool"), (159, "MaxNoOfOpenFiles"), (162, "InitialNoOfOpenFiles"), (129, "TimeBetweenInactiveTransactionAbortCheck"), (130, "TransactionInactiveTimeout"), (131, "TransactionDeadlockDetectionTimeout"), (148, "Diskless"), (122
 , "ArbitrationTimeout"), (142, "Arbitration"), (7, "DataDir"), (125, "FileSystemPath"), (250, "LogLevelStartup"), (251, "LogLevelShutdown"), (252, "LogLevelStatistic"), (253, "LogLevelCheckpoint"), (254, "LogLevelNodeRestart"), (255, "LogLevelConnection"), (259, "LogLevelCongestion"), (258, "LogLevelError"), (256, "LogLevelInfo"), (158, "BackupDataDir"), (163, "DiskSyncSize"), (164, "DiskCheckpointSpeed"), (165, "DiskCheckpointSpeedInRestart"), (133, "BackupMemory"), (134, "BackupDataBufferSize"), (135, "BackupLogBufferSize"), (136, "BackupWriteSize"), (139, "BackupMaxWriteSize"), (161, "StringMemory"), (169, "MaxAllocate"), (166, "MemReportFrequency"), (167, "BackupReportFrequency"), (184, "StartupStatusReportFrequency"), (168, "ODirect"), (172, "CompressedBackup"), (173, "CompressedLCP"), (9, "TotalSendBufferMemory"), (202, "ReservedSendBufferMemory"), (185, "Nodegroup"), (186, "MaxNoOfExecutionThreads"), (188, "__ndbmt_lqh_workers"), (187, "__ndbmt_lqh_threads"), (191, "_
 _ndbmt_classic"), (628, "ThreadConfig"), (193, "FileSystemPathDD"), (194, "FileSystemPathDataFiles"), (195, "FileSystemPathUndoFiles"), (196, "InitialLogfileGroup"), (197, "InitialTablespace"), (605, "MaxLCPStartDelay"), (606, "BuildIndexThreads"), (607, "HeartbeatOrder"), (608, "DictTrace"), (609, "MaxStartFailRetries"), (610, "StartFailRetryDelay"), (613, "EventLogBufferSize"), (614, "Numa"), (611, "RedoOverCommitLimit"), (612, "RedoOverCommitCounter"), (615, "LateAlloc"), (616, "TwoPassInitialNodeRestartCopy"), (617, "MaxParallelScansPerFragment"), (620, "IndexStatAutoCreate"), (621, "IndexStatAutoUpdate"), (622, "IndexStatSaveSize"), (623, "IndexStatSaveScale"), (624, "IndexStatTriggerPct"), (625, "IndexStatTriggerScale"), (626, "IndexStatUpdateDelay"), (629, "CrashOnCorruptedTuple")','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$dbtc_apiconnect_state
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$dbtc_apiconnect_state` (state_int_value  INT UNSIGNED PRIMARY KEY, state_name VARCHAR(256), state_friendly_name VARCHAR(256), state_description VARCHAR(256))','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`ndb$dbtc_apiconnect_state` VALUES (0, "CS_CONNECTED", "Connected", "An allocated idle transaction object"), (1, "CS_DISCONNECTED", "Disconnected", "An unallocated connection object"), (2, "CS_STARTED", "Started", "A started transaction"), (3, "CS_RECEIVING", "Receiving", "A transaction receiving operations"), (7, "CS_RESTART", "", ""), (8, "CS_ABORTING", "Aborting", "A transaction aborting"), (9, "CS_COMPLETING", "Completing", "A transaction completing"), (10, "CS_COMPLETE_SENT", "Completing", "A transaction completing"), (11, "CS_PREPARE_TO_COMMIT", "", ""), (12, "CS_COMMIT_SENT", "Committing", "A transaction committing"), (13, "CS_START_COMMITTING", "", ""), (14, "CS_COMMITTING", "Committing", "A transaction committing"), (15, "CS_REC_COMMITTING", "", ""), (16, "CS_WAIT_ABORT_CONF", "Aborting", ""), (17, "CS_WAIT_COMPLETE_CONF", "Completing", ""), (18, "CS_WAIT_COMMIT_CONF", "Committing", ""), (19, "CS_FAIL_ABORTING", "Take
 OverAborting", ""), (20, "CS_FAIL_ABORTED", "TakeOverAborting", ""), (21, "CS_FAIL_PREPARED", "", ""), (22, "CS_FAIL_COMMITTING", "TakeOverCommitting", ""), (23, "CS_FAIL_COMMITTED", "TakeOverCommitting", ""), (24, "CS_FAIL_COMPLETED", "TakeOverCompleting", ""), (25, "CS_START_SCAN", "Scanning", ""), (26, "CS_SEND_FIRE_TRIG_REQ", "Precomitting", ""), (27, "CS_WAIT_FIRE_TRIG_REQ", "Precomitting", "")','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.ndb$dblqh_tcconnect_state
+SET @str=IF(@have_ndbinfo,'CREATE TABLE `ndbinfo`.`ndb$dblqh_tcconnect_state` (state_int_value  INT UNSIGNED PRIMARY KEY, state_name VARCHAR(256), state_friendly_name VARCHAR(256), state_description VARCHAR(256))','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+SET @str=IF(@have_ndbinfo,'INSERT INTO `ndbinfo`.`ndb$dblqh_tcconnect_state` VALUES (0, "IDLE", "Idle", ""), (1, "WAIT_ACC", "WaitLock", ""), (2, "WAIT_TUPKEYINFO", "", ""), (3, "WAIT_ATTR", "WaitData", ""), (4, "WAIT_TUP", "WaitTup", ""), (5, "STOPPED", "Stopped", ""), (6, "LOG_QUEUED", "LogPrepare", ""), (7, "PREPARED", "Prepared", ""), (8, "LOG_COMMIT_WRITTEN_WAIT_SIGNAL", "", ""), (9, "LOG_COMMIT_QUEUED_WAIT_SIGNAL", "", ""), (10, "COMMIT_STOPPED", "CommittingStopped", ""), (11, "LOG_COMMIT_QUEUED", "Committing", ""), (12, "COMMIT_QUEUED", "Committing", ""), (13, "COMMITTED", "Committed", ""), (35, "WAIT_TUP_COMMIT", "Committing", ""), (14, "WAIT_ACC_ABORT", "Aborting", ""), (15, "ABORT_QUEUED", "Aborting", ""), (16, "ABORT_STOPPED", "AbortingStopped", ""), (17, "WAIT_AI_AFTER_ABORT", "Aborting", ""), (18, "LOG_ABORT_QUEUED", "Aborting", ""), (19, "WAIT_TUP_TO_ABORT", "Aborting", ""), (20, "WAIT_SCAN_AI", "Scanning", ""), (21, "SCAN_STATE_USED", "Scanning", ""), (22, "SC
 AN_FIRST_STOPPED", "Scanning", ""), (23, "SCAN_CHECK_STOPPED", "Scanning", ""), (24, "SCAN_STOPPED", "ScanningStopped", ""), (25, "SCAN_RELEASE_STOPPED", "ScanningStopped", ""), (26, "SCAN_CLOSE_STOPPED", "ScanningStopped", ""), (27, "COPY_CLOSE_STOPPED", "ScanningStopped", ""), (28, "COPY_FIRST_STOPPED", "ScanningStopped", ""), (29, "COPY_STOPPED", "ScanningStopped", ""), (30, "SCAN_TUPKEY", "Scanning", ""), (31, "COPY_TUPKEY", "NodeRecoveryScanning", ""), (32, "TC_NOT_CONNECTED", "Idle", ""), (33, "PREPARED_RECEIVED_COMMIT", "Committing", ""), (34, "LOG_COMMIT_WRITTEN", "Committing", "")','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.transporters
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`transporters` AS SELECT node_id, remote_node_id,  CASE connection_status  WHEN 0 THEN "CONNECTED"  WHEN 1 THEN "CONNECTING"  WHEN 2 THEN "DISCONNECTED"  WHEN 3 THEN "DISCONNECTING"  ELSE NULL  END AS status FROM ndbinfo.ndb$transporters','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`transporters` AS SELECT node_id, remote_node_id,  CASE connection_status  WHEN 0 THEN "CONNECTED"  WHEN 1 THEN "CONNECTING"  WHEN 2 THEN "DISCONNECTED"  WHEN 3 THEN "DISCONNECTING"  ELSE NULL  END AS status FROM `ndbinfo`.`ndb$transporters`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.logspaces
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logspaces` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE NULL  END AS log_type, log_id, log_part, total, used FROM ndbinfo.ndb$logspaces','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logspaces` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE NULL  END AS log_type, log_id, log_part, total, used FROM `ndbinfo`.`ndb$logspaces`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.logbuffers
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logbuffers` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE "<unknown>"  END AS log_type, log_id, log_part, total, used FROM ndbinfo.ndb$logbuffers','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`logbuffers` AS SELECT node_id,  CASE log_type  WHEN 0 THEN "REDO"  WHEN 1 THEN "DD-UNDO"  ELSE "<unknown>"  END AS log_type, log_id, log_part, total, used FROM `ndbinfo`.`ndb$logbuffers`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.resources
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`resources` AS SELECT node_id,  CASE resource_id  WHEN 0 THEN "RESERVED"  WHEN 1 THEN "DISK_OPERATIONS"  WHEN 2 THEN "DISK_RECORDS"  WHEN 3 THEN "DATA_MEMORY"  WHEN 4 THEN "JOBBUFFER"  WHEN 5 THEN "FILE_BUFFERS"  WHEN 6 THEN "TRANSPORTER_BUFFERS"  ELSE "<unknown>"  END AS resource_name, reserved, used, max FROM ndbinfo.ndb$resources','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`resources` AS SELECT node_id,  CASE resource_id  WHEN 0 THEN "RESERVED"  WHEN 1 THEN "DISK_OPERATIONS"  WHEN 2 THEN "DISK_RECORDS"  WHEN 3 THEN "DATA_MEMORY"  WHEN 4 THEN "JOBBUFFER"  WHEN 5 THEN "FILE_BUFFERS"  WHEN 6 THEN "TRANSPORTER_BUFFERS"  WHEN 7 THEN "DISK_PAGE_BUFFER"  WHEN 8 THEN "QUERY_MEMORY"  WHEN 9 THEN "SCHEMA_TRANS_MEMORY"  ELSE "<unknown>"  END AS resource_name, reserved, used, max FROM `ndbinfo`.`ndb$resources`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.counters
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`counters` AS SELECT node_id, b.block_name, block_instance, counter_id, CASE counter_id  WHEN 1 THEN "ATTRINFO"  WHEN 2 THEN "TRANSACTIONS"  WHEN 3 THEN "COMMITS"  WHEN 4 THEN "READS"  WHEN 5 THEN "SIMPLE_READS"  WHEN 6 THEN "WRITES"  WHEN 7 THEN "ABORTS"  WHEN 8 THEN "TABLE_SCANS"  WHEN 9 THEN "RANGE_SCANS"  WHEN 10 THEN "OPERATIONS"  WHEN 11 THEN "READS_RECEIVED"  WHEN 12 THEN "LOCAL_READS_SENT"  WHEN 13 THEN "REMOTE_READS_SENT"  WHEN 14 THEN "READS_NOT_FOUND"  WHEN 15 THEN "TABLE_SCANS_RECEIVED"  WHEN 16 THEN "LOCAL_TABLE_SCANS_SENT"  WHEN 17 THEN "RANGE_SCANS_RECEIVED"  WHEN 18 THEN "LOCAL_RANGE_SCANS_SENT"  WHEN 19 THEN "REMOTE_RANGE_SCANS_SENT"  WHEN 20 THEN "SCAN_BATCHES_RETURNED"  WHEN 21 THEN "SCAN_ROWS_RETURNED"  WHEN 22 THEN "PRUNED_RANGE_SCANS_RECEIVED"  WHEN 23 THEN "CONST_PRUNED_RANGE_SCANS_RECEIVED"  ELSE "<unknown>"  END AS counter_name, val FROM ndbinfo.
 ndb$counters c, ndbinfo.blocks b WHERE c.block_number = b.block_number','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`counters` AS SELECT node_id, b.block_name, block_instance, counter_id, CASE counter_id  WHEN 1 THEN "ATTRINFO"  WHEN 2 THEN "TRANSACTIONS"  WHEN 3 THEN "COMMITS"  WHEN 4 THEN "READS"  WHEN 5 THEN "SIMPLE_READS"  WHEN 6 THEN "WRITES"  WHEN 7 THEN "ABORTS"  WHEN 8 THEN "TABLE_SCANS"  WHEN 9 THEN "RANGE_SCANS"  WHEN 10 THEN "OPERATIONS"  WHEN 11 THEN "READS_RECEIVED"  WHEN 12 THEN "LOCAL_READS_SENT"  WHEN 13 THEN "REMOTE_READS_SENT"  WHEN 14 THEN "READS_NOT_FOUND"  WHEN 15 THEN "TABLE_SCANS_RECEIVED"  WHEN 16 THEN "LOCAL_TABLE_SCANS_SENT"  WHEN 17 THEN "RANGE_SCANS_RECEIVED"  WHEN 18 THEN "LOCAL_RANGE_SCANS_SENT"  WHEN 19 THEN "REMOTE_RANGE_SCANS_SENT"  WHEN 20 THEN "SCAN_BATCHES_RETURNED"  WHEN 21 THEN "SCAN_ROWS_RETURNED"  WHEN 22 THEN "PRUNED_RANGE_SCANS_RECEIVED"  WHEN 23 THEN "CONST_PRUNED_RANGE_SCANS_RECEIVED"  ELSE "<unknown>"  END AS counter_name, val FROM `ndbinfo
 `.`ndb$counters` c LEFT JOIN `ndbinfo`.blocks b ON c.block_number = b.block_number','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.nodes
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`nodes` AS SELECT node_id, uptime, CASE status  WHEN 0 THEN "NOTHING"  WHEN 1 THEN "CMVMI"  WHEN 2 THEN "STARTING"  WHEN 3 THEN "STARTED"  WHEN 4 THEN "SINGLEUSER"  WHEN 5 THEN "STOPPING_1"  WHEN 6 THEN "STOPPING_2"  WHEN 7 THEN "STOPPING_3"  WHEN 8 THEN "STOPPING_4"  ELSE "<unknown>"  END AS status, start_phase, config_generation FROM ndbinfo.ndb$nodes','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`nodes` AS SELECT node_id, uptime, CASE status  WHEN 0 THEN "NOTHING"  WHEN 1 THEN "CMVMI"  WHEN 2 THEN "STARTING"  WHEN 3 THEN "STARTED"  WHEN 4 THEN "SINGLEUSER"  WHEN 5 THEN "STOPPING_1"  WHEN 6 THEN "STOPPING_2"  WHEN 7 THEN "STOPPING_3"  WHEN 8 THEN "STOPPING_4"  ELSE "<unknown>"  END AS status, start_phase, config_generation FROM `ndbinfo`.`ndb$nodes`','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.memoryusage
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`memoryusage` AS SELECT node_id,  pool_name AS memory_type,  SUM(used*entry_size) AS used,  SUM(used) AS used_pages,  SUM(total*entry_size) AS total,  SUM(total) AS total_pages FROM ndbinfo.ndb$pools WHERE block_number IN (248, 254) AND   (pool_name = "Index memory" OR pool_name = "Data memory") GROUP BY node_id, memory_type','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`memoryusage` AS SELECT node_id,  pool_name AS memory_type,  SUM(used*entry_size) AS used,  SUM(used) AS used_pages,  SUM(total*entry_size) AS total,  SUM(total) AS total_pages FROM `ndbinfo`.`ndb$pools` WHERE block_number IN (248, 254) AND   (pool_name = "Index memory" OR pool_name = "Data memory") GROUP BY node_id, memory_type','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
 
 # ndbinfo.diskpagebuffer
-SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE SQL SECURITY INVOKER VIEW `ndbinfo`.`diskpagebuffer` AS SELECT node_id, block_instance, pages_written, pages_written_lcp, pages_read, log_waits, page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io FROM ndbinfo.ndb$diskpagebuffer','SET @dummy = 0');
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`diskpagebuffer` AS SELECT node_id, block_instance, pages_written, pages_written_lcp, pages_read, log_waits, page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io FROM `ndbinfo`.`ndb$diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.diskpagebuffer
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`diskpagebuffer` AS SELECT node_id, block_instance, pages_written, pages_written_lcp, pages_read, log_waits, page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io FROM `ndbinfo`.`ndb$diskpagebuffer`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.threadblocks
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`threadblocks` AS SELECT t.node_id, t.thr_no, b.block_name, t.block_instance FROM `ndbinfo`.`ndb$threadblocks` t LEFT JOIN `ndbinfo`.blocks b ON t.block_number = b.block_number','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.threadstat
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`threadstat` AS SELECT * from `ndbinfo`.`ndb$threadstat`','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.cluster_transactions
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`cluster_transactions` AS SELECT t.node_id, t.block_instance, t.transid0 + (t.transid1 << 32) as transid, s.state_friendly_name as state,  t.c_ops as count_operations,  t.outstanding as outstanding_operations,  t.timer as inactive_seconds,  (t.apiref & 65535) as client_node_id,  (t.apiref >> 16) as client_block_ref FROM `ndbinfo`.`ndb$transactions` t LEFT JOIN `ndbinfo`.`ndb$dbtc_apiconnect_state` s        ON s.state_int_value = t.state','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.server_transactions
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`server_transactions` AS SELECT map.mysql_connection_id, t.*FROM information_schema.ndb_transid_mysql_connection_map map JOIN `ndbinfo`.cluster_transactions t   ON (map.ndb_transid >> 32) = (t.transid >> 32)','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.cluster_operations
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`cluster_operations` AS SELECT o.node_id, o.block_instance, o.transid0 + (o.transid1 << 32) as transid, case o.op  when 1 then "READ" when 2 then "READ-SH" when 3 then "READ-EX" when 4 then "INSERT" when 5 then "UPDATE" when 6 then "DELETE" when 7 then "WRITE" when 8 then "UNLOCK" when 9 then "REFRESH" when 257 then "SCAN" when 258 then "SCAN-SH" when 259 then "SCAN-EX" ELSE "<unknown>" END as operation_type,  s.state_friendly_name as state,  o.tableid,  o.fragmentid,  (o.apiref & 65535) as client_node_id,  (o.apiref >> 16) as client_block_ref,  (o.tcref & 65535) as tc_node_id,  ((o.tcref >> 16) & 511) as tc_block_no,  ((o.tcref >> (16 + 9)) & 127) as tc_block_instance FROM `ndbinfo`.`ndb$operations` o LEFT JOIN `ndbinfo`.`ndb$dblqh_tcconnect_state` s        ON s.state_int_value = o.state','SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+# ndbinfo.server_operations
+SET @str=IF(@have_ndbinfo,'CREATE OR REPLACE DEFINER=`root@localhost` SQL SECURITY INVOKER VIEW `ndbinfo`.`server_operations` AS SELECT map.mysql_connection_id, o.* FROM `ndbinfo`.cluster_operations o JOIN information_schema.ndb_transid_mysql_connection_map map  ON (map.ndb_transid >> 32) = (o.transid >> 32)','SET @dummy = 0');
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
@@ -774,3 +927,4 @@ SET @str=IF(@have_ndbinfo,'SET @@global.
 PREPARE stmt FROM @str;
 EXECUTE stmt;
 DROP PREPARE stmt;
+

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-10-27 12:04:20 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-11-03 17:22:01 +0000
@@ -346,7 +346,7 @@ ndb_transid_mysql_connection_map_fill_ta
 {
   DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
 
-  const bool all = check_global_access(thd, PROCESS_ACL);
+  const bool all = (check_global_access(thd, PROCESS_ACL) == 0);
   const ulonglong self = thd_get_thread_id(thd);
 
   TABLE* table= tables->table;

=== modified file 'storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java'
--- a/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java	2011-01-31 09:07:01 +0000
+++ b/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java	2011-10-27 23:43:25 +0000
@@ -229,4 +229,12 @@ public interface Session {
      */
     void markModified(Object instance, String fieldName);
 
+    /** Unload the schema definition for a class. This must be done after the schema
+     * definition has changed in the database due to an alter table command.
+     * The next time the class is used the schema will be reloaded.
+     * @param cls the class for which the schema is unloaded
+     * @return the name of the schema that was unloaded
+     */
+    String unloadSchema(Class<?> cls);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2011-08-03 01:00:56 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2011-10-27 23:43:25 +0000
@@ -489,4 +489,19 @@ public class SessionFactoryImpl implemen
         return result;
     }
 
+    public String unloadSchema(Class<?> cls, Dictionary dictionary) {
+        synchronized(typeToHandlerMap) {
+            String tableName = null;
+            DomainTypeHandler<?> domainTypeHandler = typeToHandlerMap.remove(cls);
+            if (domainTypeHandler != null) {
+                // remove the ndb dictionary cached table definition
+                tableName = domainTypeHandler.getTableName();
+                if (tableName != null) {
+                    dictionary.removeCachedTable(tableName);
+                }
+            }
+            return tableName;
+        }
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-10-22 00:40:34 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-10-27 23:43:25 +0000
@@ -1384,4 +1384,8 @@ public class SessionImpl implements Sess
         }
     }
 
+    public String unloadSchema(Class<?> cls) {
+        return factory.unloadSchema(cls, dictionary);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java	2011-10-27 23:43:25 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -53,7 +53,7 @@ public class DomainTypeHandlerFactoryImp
     }
 
     public <T> DomainTypeHandler<T> createDomainTypeHandler(Class<T> domainClass, Dictionary dictionary) {
-        DomainTypeHandler<T> handler;
+        DomainTypeHandler<T> handler = null;
         StringBuffer errorMessages = new StringBuffer();
         for (DomainTypeHandlerFactory factory: domainTypeHandlerFactories) {
             try {
@@ -82,6 +82,15 @@ public class DomainTypeHandlerFactoryImp
         } catch (Exception e) {
             errorMessages.append(e.toString());
             throw new ClusterJUserException(errorMessages.toString(), e);
+        } finally {
+            // if handler is null, there may be a problem with the schema, so remove it from the local dictionary
+            if (handler == null) {
+                String tableName = DomainTypeHandlerImpl.getTableName(domainClass);
+                if (tableName != null) {
+                    logger.info(local.message("MSG_Removing_Schema", tableName, domainClass.getName()));
+                    dictionary.removeCachedTable(tableName);                    
+                }
+            }
         }
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java	2011-10-27 23:43:25 +0000
@@ -244,7 +244,29 @@ public class DomainTypeHandlerImpl<T> ex
         }
     }
 
-    protected <O extends DynamicObject> String getTableNameForDynamicObject(Class<O> cls) {
+    /** Get the table name mapped to the domain class.
+     * @param cls the domain class
+     * @return the table name for the domain class
+     */
+    protected static String getTableName(Class<?> cls) {
+        String tableName = null;
+        if (DynamicObject.class.isAssignableFrom(cls)) {
+            tableName = getTableNameForDynamicObject((Class<DynamicObject>)cls);
+        } else {
+            PersistenceCapable persistenceCapable = cls.getAnnotation(PersistenceCapable.class);
+            if (persistenceCapable != null) {
+                tableName = persistenceCapable.table();            
+            }
+        }
+        return tableName;
+    }
+
+    /** Get the table name for a dynamic object. The table name is available either from
+     * the PersistenceCapable annotation or via the table() method.
+     * @param cls the dynamic object class
+     * @return the table name for the dynamic object class
+     */
+    protected static <O extends DynamicObject> String getTableNameForDynamicObject(Class<O> cls) {
         DynamicObject dynamicObject;
         PersistenceCapable persistenceCapable = cls.getAnnotation(PersistenceCapable.class);
         String tableName = null;

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java	2011-11-03 17:22:01 +0000
@@ -1,6 +1,5 @@
 /*
-   Copyright 2010 Sun Microsystems, Inc.
-   All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -27,4 +26,6 @@ public interface Dictionary {
 
     public Table getTable(String tableName);
 
+    public void removeCachedTable(String tableName);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties'
--- a/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2011-08-29 08:17:26 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2011-10-27 23:43:25 +0000
@@ -133,3 +133,4 @@ ERR_Wrong_Parameter_Type_For_In:For fiel
 either an array of Object types or a List.
 ERR_Parameter_Too_Big_For_In:For field ''{0}'', the parameter of length {1} for query operator ''in'' \
 is too big; it must contain fewer than 4097 items.
+MSG_Removing_Schema:Removing schema {0} after failure to initialize domain type handler for class {1}.

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java	2011-05-10 01:19:27 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java	2011-10-28 23:29:26 +0000
@@ -37,6 +37,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.SQLWarning;
 import java.sql.Statement;
 
 import java.util.ArrayList;
@@ -319,6 +320,77 @@ public abstract class AbstractClusterJTe
         }
     }
 
+    /** Execute the sql in its own statement. If the connection is not
+     * currently autocommit, set autocommit to true and restore it after
+     * the statement is executed.
+     * @param sql the sql to execute
+     */
+    protected void executeSQL(String sql) {
+        Statement statement = null;
+        try {
+            boolean autoCommit = connection.getAutoCommit();
+            if (!autoCommit) {
+                connection.setAutoCommit(true);
+            }
+            statement = connection.createStatement();
+            statement.execute(sql);
+            if (!autoCommit) {
+                connection.setAutoCommit(autoCommit);
+            }
+        } catch (SQLException e) {
+            error("Caught " + e.getClass() + " trying: " + sql);
+            if (statement == null) {
+                error(analyzeWarnings(connection));
+            } else {
+                error(analyzeWarnings(statement));
+            }
+        } finally {
+            if (statement != null) {
+                try {
+                    statement.close();
+                } catch (SQLException e) {
+                    // nothing can be done here
+                    error("Error closing statement " + sql);
+                }
+            }
+        }
+    }
+
+    protected String analyzeWarnings(Connection connection) {
+        SQLWarning warning = null;
+        StringBuilder builder = new StringBuilder();
+        try {
+            warning = connection.getWarnings();
+            analyzeWarnings(warning, builder);
+        } catch (SQLException e) {
+            builder.append("Error getting warnings from connection:\n");
+            builder.append(e.getMessage());
+        }
+        return builder.toString();
+    }
+
+    protected String analyzeWarnings(Statement statement) {
+        SQLWarning warning = null;
+        StringBuilder builder = new StringBuilder();
+        try {
+            warning = statement.getWarnings();
+            analyzeWarnings(warning, builder);
+        } catch (SQLException e) {
+            builder.append("Error getting warnings from statement:\n");
+            builder.append(e.getMessage());
+        }
+        return builder.toString();
+    }
+
+    protected StringBuilder analyzeWarnings(SQLWarning warning, StringBuilder builder) {
+        if (warning != null) {
+            builder.append(warning.getMessage());
+            builder.append("\n");
+            analyzeWarnings(warning.getNextWarning(), builder);
+        }
+        return builder;
+    }
+
     Properties getProperties(String fileName) {
         Properties result = null;
         try {

=== added file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java	2011-10-28 23:29:26 +0000
@@ -0,0 +1,92 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package testsuite.clusterj;
+
+import com.mysql.clusterj.ClusterJDatastoreException;
+import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.ColumnMetadata;
+import com.mysql.clusterj.DynamicObject;
+import com.mysql.clusterj.annotation.PersistenceCapable;
+
+import testsuite.clusterj.model.StringTypes;
+
+public class SchemaChangeTest extends AbstractClusterJModelTest {
+
+    private static final String modifyTableStatement = 
+        "alter table stringtypes drop column string_not_null_none";
+
+    private static final String restoreTableStatement = 
+        "alter table stringtypes add string_not_null_none varchar(20) DEFAULT NULL";
+
+    @Override
+    public void localSetUp() {
+        createSessionFactory();
+        session = sessionFactory.getSession();
+        // create will cache the schema
+        session.deletePersistentAll(StringTypes.class);
+        session.makePersistent(session.newInstance(StringTypes.class, 0));
+        addTearDownClasses(StringTypes.class);
+    }
+
+    public void testFind() {
+        // change the schema (drop a column)
+        executeSQL(modifyTableStatement);
+        try {
+            // find the row (with a different schema) which will fail
+            session.find(StringTypes.class, 0);
+        } catch (ClusterJDatastoreException dex) {
+            // make sure it's the right exception
+            if (!dex.getMessage().contains("code 284")) {
+                error("ClusterJDatastoreException must contain code 284 but contains only " + dex.getMessage());
+            }
+            // unload the schema for StringTypes which also clears the cached dictionary table
+            String tableName = session.unloadSchema(StringTypes.class);
+            // make sure we unloaded the right table
+            errorIfNotEqual("Table name mismatch", "stringtypes", tableName);
+            // it should work with a different schema that doesn't include the dropped column
+            StringTypes2 zero = session.find(StringTypes2.class, 0);
+            // verify that column string_not_null_none does not exist
+            ColumnMetadata[] metadatas = zero.columnMetadata();
+            for (ColumnMetadata metadata: metadatas) {
+                if ("string_not_null_none".equals(metadata.name())) {
+                    error("Column string_not_null_none should not exist after schema change.");
+                }
+            }
+            try {
+                // find the row (with a different schema) which will fail with a user exception
+                session.find(StringTypes.class, 0);
+                error("Unexpected success using StringTypes class without column string_not_null_none defined");
+            } catch (ClusterJUserException uex) {
+                // StringTypes can't be loaded because of the missing column, but
+                // the cached dictionary table was removed when the domain type handler couldn't be created
+                executeSQL(restoreTableStatement);
+                // after restoreTableDefinition, string_not_null_none is defined again
+                // find the row (with a different schema) which will now work
+                session.find(StringTypes.class, 0);
+            }
+        }
+        failOnError();
+    }
+
+    /** StringTypes dynamic class to map stringtypes after column string_not_null_none is removed.
+     */
+    @PersistenceCapable(table="stringtypes")
+    public static class StringTypes2 extends DynamicObject {
+        public StringTypes2() {}
+    }
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2011-10-27 23:43:25 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -17,11 +17,12 @@
 
 package com.mysql.clusterj.tie;
 
+import com.mysql.ndbjtie.ndbapi.NdbDictionary.Dictionary;
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.DictionaryConst;
-import com.mysql.ndbjtie.ndbapi.NdbDictionary.IndexConst;
-import com.mysql.ndbjtie.ndbapi.NdbDictionary.TableConst;
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.DictionaryConst.ListConst.Element;
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.DictionaryConst.ListConst.ElementArray;
+import com.mysql.ndbjtie.ndbapi.NdbDictionary.IndexConst;
+import com.mysql.ndbjtie.ndbapi.NdbDictionary.TableConst;
 
 import com.mysql.clusterj.core.store.Index;
 import com.mysql.clusterj.core.store.Table;
@@ -43,9 +44,9 @@ class DictionaryImpl implements com.mysq
     static final Logger logger = LoggerFactoryService.getFactory()
             .getInstance(DictionaryImpl.class);
 
-    private DictionaryConst ndbDictionary;
+    private Dictionary ndbDictionary;
 
-    public DictionaryImpl(DictionaryConst ndbDictionary) {
+    public DictionaryImpl(Dictionary ndbDictionary) {
         this.ndbDictionary = ndbDictionary;
     }
 
@@ -122,4 +123,8 @@ class DictionaryImpl implements com.mysq
         }
     }
 
+    public void removeCachedTable(String tableName) {
+        ndbDictionary.removeCachedTable(tableName);
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java	2011-10-27 23:43:25 +0000
@@ -0,0 +1,22 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package testsuite.clusterj.tie;
+
+public class SchemaChangeTest extends testsuite.clusterj.SchemaChangeTest {
+
+}

=== added file 'storage/ndb/include/kernel/statedesc.hpp'
--- a/storage/ndb/include/kernel/statedesc.hpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/include/kernel/statedesc.hpp	2011-10-28 10:16:23 +0000
@@ -0,0 +1,32 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_STATE_DESC_H
+#define NDB_STATE_DESC_H
+
+struct ndbkernel_state_desc
+{
+  unsigned value;
+  const char * name;
+  const char * friendly_name;
+  const char * description;
+};
+
+extern struct ndbkernel_state_desc g_dbtc_apiconnect_state_desc[];
+extern struct ndbkernel_state_desc g_dblqh_tcconnect_state_desc[];
+
+#endif

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-06-07 12:19:47 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-11-03 17:22:01 +0000
@@ -18,7 +18,7 @@ Next NDBCNTR 1002
 Next NDBFS 2000
 Next DBACC 3002
 Next DBTUP 4035
-Next DBLQH 5072
+Next DBLQH 5074
 Next DBDICT 6026
 Next DBDIH 7229
 Next DBTC 8092

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-21 12:36:44 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-11-03 17:22:01 +0000
@@ -65,7 +65,7 @@
 #include <signaldata/UtilPrepare.hpp>
 #include <signaldata/UtilExecute.hpp>
 #include <signaldata/UtilRelease.hpp>
-#include <signaldata/SumaImpl.hpp> 
+#include <signaldata/SumaImpl.hpp>
 
 #include <signaldata/LqhFrag.hpp>
 #include <signaldata/DictStart.hpp>
@@ -216,7 +216,7 @@ Dbdict::execDUMP_STATE_ORD(Signal* signa
     sendSignal(DBDICT_REF, GSN_DROP_TABLE_REQ, signal,
 	       DropTableReq::SignalLength, JBB);
   }
-#endif  
+#endif
 #define MEMINFO(x, y) infoEvent(x ": %d %d", y.getSize(), y.getNoOfFree())
   if(signal->theData[0] == 1226){
     MEMINFO("c_obj_pool", c_obj_pool);
@@ -226,20 +226,20 @@ Dbdict::execDUMP_STATE_ORD(Signal* signa
 
   if (signal->theData[0] == 1227)
   {
-    DLHashTable<DictObject>::Iterator iter;
+    DictObject_hash::Iterator iter;
     bool ok = c_obj_hash.first(iter);
     for(; ok; ok = c_obj_hash.next(iter))
     {
-      Rope name(c_rope_pool, iter.curr.p->m_name);
+      LocalRope name(c_rope_pool, iter.curr.p->m_name);
       char buf[1024];
       name.copy(buf);
-      ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count); 
+      ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count);
       if (iter.curr.p->m_trans_key != 0)
         ndbout_c("- m_trans_key: %u m_op_ref_count: %u",
                  iter.curr.p->m_trans_key, iter.curr.p->m_op_ref_count);
     }
-  }    
-  
+  }
+
   if (signal->theData[0] == 8004)
   {
     infoEvent("DICT: c_counterMgr size: %u free: %u",
@@ -380,7 +380,7 @@ void Dbdict::execDBINFO_SCANREQ(Signal *
 // processes.
 /* ---------------------------------------------------------------- */
 /* ---------------------------------------------------------------- */
-void Dbdict::execCONTINUEB(Signal* signal) 
+void Dbdict::execCONTINUEB(Signal* signal)
 {
   jamEntry();
   switch (signal->theData[0]) {
@@ -500,7 +500,7 @@ void Dbdict::packTableIntoPages(Signal*
   c_pageRecordArray.getPtr(pagePtr, pageId);
 
   memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
-  LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE], 
+  LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE],
 		 ZMAX_PAGES_OF_TABLE_DEFINITION * ZSIZE_OF_PAGES_IN_WORDS);
   w.first();
   switch((DictTabInfo::TableType)type) {
@@ -533,7 +533,7 @@ void Dbdict::packTableIntoPages(Signal*
       ref->senderData = c_retrieveRecord.m_senderData;
       ref->errorCode = err;
       Uint32 dstRef = c_retrieveRecord.blockRef;
-      sendSignal(dstRef, GSN_GET_TABINFOREF, signal, 
+      sendSignal(dstRef, GSN_GET_TABINFOREF, signal,
                  GetTabInfoRef::SignalLength, JBB);
       initRetrieveRecord(0,0,0);
       return;
@@ -563,7 +563,7 @@ void Dbdict::packTableIntoPages(Signal*
     break;
   }
   case DictTabInfo::HashMap:{
-    Ptr<HashMapRecord> hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tableId));
     packHashMapIntoPages(w, hm_ptr);
     break;
@@ -577,12 +577,12 @@ void Dbdict::packTableIntoPages(Signal*
   case DictTabInfo::ReorgTrigger:
     ndbrequire(false);
   }
-  
+
   Uint32 wordsOfTable = w.getWordsUsed();
   Uint32 pagesUsed = WORDS2PAGES(wordsOfTable);
-  pagePtr.p->word[ZPOS_CHECKSUM] = 
+  pagePtr.p->word[ZPOS_CHECKSUM] =
     computeChecksum(&pagePtr.p->word[0], pagesUsed * ZSIZE_OF_PAGES_IN_WORDS);
-  
+
   switch (c_packTable.m_state) {
   case PackTable::PTS_IDLE:
     ndbrequire(false);
@@ -603,7 +603,7 @@ void
 Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
 			       TableRecordPtr tablePtr,
 			       Signal* signal){
-  
+
   union {
     char tableName[MAX_TAB_NAME_SIZE];
     char frmData[MAX_FRM_DATA_SIZE];
@@ -622,18 +622,18 @@ Dbdict::packTableIntoPages(SimplePropert
   w.add(DictTabInfo::NoOfNullable, tablePtr.p->noOfNullAttr);
   w.add(DictTabInfo::NoOfVariable, (Uint32)0);
   w.add(DictTabInfo::KeyLength, tablePtr.p->tupKeyLength);
-  
-  w.add(DictTabInfo::TableLoggedFlag, 
+
+  w.add(DictTabInfo::TableLoggedFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_Logged));
-  w.add(DictTabInfo::RowGCIFlag, 
+  w.add(DictTabInfo::RowGCIFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_RowGCI));
-  w.add(DictTabInfo::RowChecksumFlag, 
+  w.add(DictTabInfo::RowChecksumFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_RowChecksum));
-  w.add(DictTabInfo::TableTemporaryFlag, 
+  w.add(DictTabInfo::TableTemporaryFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_Temporary));
   w.add(DictTabInfo::ForceVarPartFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_ForceVarPart));
-  
+
   w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor);
   w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor);
   w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
@@ -655,7 +655,7 @@ Dbdict::packTableIntoPages(SimplePropert
 
   if (tablePtr.p->hashMapObjectId != RNIL)
   {
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tablePtr.p->hashMapObjectId));
     w.add(DictTabInfo::HashMapVersion, hm_ptr.p->m_object_version);
   }
@@ -674,7 +674,7 @@ Dbdict::packTableIntoPages(SimplePropert
       err = CreateFragmentationRef::InvalidPrimaryTable;
     }
     if (unlikely(err != 0))
-    { 
+    {
       jam();
       signal->theData[0] = err;
       return;
@@ -693,7 +693,7 @@ Dbdict::packTableIntoPages(SimplePropert
     /* This part is run at CREATE_TABLEREQ, ALTER_TABLEREQ */
     ;
   }
-  
+
   if (tablePtr.p->primaryTableId != RNIL)
   {
     jam();
@@ -736,7 +736,7 @@ Dbdict::packTableIntoPages(SimplePropert
   }
 
   AttributeRecordPtr attrPtr;
-  LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool, 
+  LocalAttributeRecord_list list(c_attributeRecordPool,
 				    tablePtr.p->m_attributes);
   for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)){
     jam();
@@ -747,7 +747,7 @@ Dbdict::packTableIntoPages(SimplePropert
     w.add(DictTabInfo::AttributeName, attributeName);
     w.add(DictTabInfo::AttributeId, attrPtr.p->attributeId);
     w.add(DictTabInfo::AttributeKeyFlag, attrPtr.p->tupleKey > 0);
-    
+
     const Uint32 desc = attrPtr.p->attributeDescriptor;
     const Uint32 attrType = AttributeDescriptor::getType(desc);
     const Uint32 attrSize = AttributeDescriptor::getSize(desc);
@@ -757,7 +757,7 @@ Dbdict::packTableIntoPages(SimplePropert
     const Uint32 DKey = AttributeDescriptor::getDKey(desc);
     const Uint32 disk= AttributeDescriptor::getDiskBased(desc);
     const Uint32 dynamic= AttributeDescriptor::getDynamic(desc);
-    
+
 
     // AttributeType deprecated
     w.add(DictTabInfo::AttributeSize, attrSize);
@@ -770,14 +770,14 @@ Dbdict::packTableIntoPages(SimplePropert
     w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision);
     w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale);
     w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength);
-    w.add(DictTabInfo::AttributeAutoIncrement, 
+    w.add(DictTabInfo::AttributeAutoIncrement,
 	  (Uint32)attrPtr.p->autoIncrement);
 
     if(disk)
       w.add(DictTabInfo::AttributeStorageType, (Uint32)NDB_STORAGETYPE_DISK);
     else
       w.add(DictTabInfo::AttributeStorageType, (Uint32)NDB_STORAGETYPE_MEMORY);
-    
+
     ConstRope def(c_rope_pool, attrPtr.p->defaultValue);
     def.copy(defaultValue);
 
@@ -790,9 +790,9 @@ Dbdict::packTableIntoPages(SimplePropert
       memcpy(&a, defaultValue, sizeof(Uint32));
       a = htonl(a);
       memcpy(defaultValue, &a, sizeof(Uint32));
-      
+
       Uint32 remainBytes = def.size() - sizeof(Uint32);
-      
+
       if (remainBytes)
         NdbSqlUtil::convertByteOrder(attrType,
                                      attrSize,
@@ -806,7 +806,7 @@ Dbdict::packTableIntoPages(SimplePropert
     w.add(DictTabInfo::AttributeDefaultValue, defaultValue, def.size());
     w.add(DictTabInfo::AttributeEnd, 1);
   }
-  
+
   w.add(DictTabInfo::TableEnd, 1);
 }
 
@@ -815,7 +815,7 @@ Dbdict::packFilegroupIntoPages(SimplePro
 			       FilegroupPtr fg_ptr,
 			       const Uint32 undo_free_hi,
 			       const Uint32 undo_free_lo){
-  
+
   DictFilegroupInfo::Filegroup fg; fg.init();
   ConstRope r(c_rope_pool, fg_ptr.p->m_name);
   r.copy(fg.FilegroupName);
@@ -842,20 +842,20 @@ Dbdict::packFilegroupIntoPages(SimplePro
   default:
     ndbrequire(false);
   }
-  
+
   SimpleProperties::UnpackStatus s;
-  s = SimpleProperties::pack(w, 
+  s = SimpleProperties::pack(w,
 			     &fg,
-			     DictFilegroupInfo::Mapping, 
+			     DictFilegroupInfo::Mapping,
 			     DictFilegroupInfo::MappingSize, true);
-  
+
   ndbrequire(s == SimpleProperties::Eof);
 }
 
 void
 Dbdict::packFileIntoPages(SimpleProperties::Writer & w,
 			  FilePtr f_ptr, const Uint32 free_extents){
-  
+
   DictFilegroupInfo::File f; f.init();
   ConstRope r(c_rope_pool, f_ptr.p->m_path);
   r.copy(f.FileName);
@@ -873,11 +873,11 @@ Dbdict::packFileIntoPages(SimpleProperti
   f.FilegroupVersion = lfg_ptr.p->m_version;
 
   SimpleProperties::UnpackStatus s;
-  s = SimpleProperties::pack(w, 
+  s = SimpleProperties::pack(w,
 			     &f,
-			     DictFilegroupInfo::FileMapping, 
+			     DictFilegroupInfo::FileMapping,
 			     DictFilegroupInfo::FileMappingSize, true);
-  
+
   ndbrequire(s == SimpleProperties::Eof);
 }
 
@@ -936,7 +936,7 @@ Dbdict::execCREATE_FRAGMENTATION_REQ(Sig
 /* ---------------------------------------------------------------- */
 // A file was successfully closed.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSCLOSECONF(Signal* signal) 
+void Dbdict::execFSCLOSECONF(Signal* signal)
 {
   FsConnectRecordPtr fsPtr;
   FsConf * const fsConf = (FsConf *)&signal->theData[0];
@@ -976,7 +976,7 @@ void Dbdict::execFSCLOSECONF(Signal* sig
 /* ---------------------------------------------------------------- */
 // A file was successfully opened.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSOPENCONF(Signal* signal) 
+void Dbdict::execFSOPENCONF(Signal* signal)
 {
   FsConnectRecordPtr fsPtr;
   jamEntry();
@@ -1026,7 +1026,7 @@ void Dbdict::execFSOPENCONF(Signal* sign
 /* ---------------------------------------------------------------- */
 // An open file was refused.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSOPENREF(Signal* signal) 
+void Dbdict::execFSOPENREF(Signal* signal)
 {
   jamEntry();
   FsRef * const fsRef = (FsRef *)&signal->theData[0];
@@ -1054,7 +1054,7 @@ void Dbdict::execFSOPENREF(Signal* signa
 /* ---------------------------------------------------------------- */
 // A file was successfully read.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSREADCONF(Signal* signal) 
+void Dbdict::execFSREADCONF(Signal* signal)
 {
   jamEntry();
   FsConf * const fsConf = (FsConf *)&signal->theData[0];
@@ -1091,7 +1091,7 @@ void Dbdict::execFSREADCONF(Signal* sign
 /* ---------------------------------------------------------------- */
 // A read file was refused.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSREADREF(Signal* signal) 
+void Dbdict::execFSREADREF(Signal* signal)
 {
   jamEntry();
   FsRef * const fsRef = (FsRef *)&signal->theData[0];
@@ -1119,7 +1119,7 @@ void Dbdict::execFSREADREF(Signal* signa
 /* ---------------------------------------------------------------- */
 // A file was successfully written.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSWRITECONF(Signal* signal) 
+void Dbdict::execFSWRITECONF(Signal* signal)
 {
   FsConf * const fsConf = (FsConf *)&signal->theData[0];
   FsConnectRecordPtr fsPtr;
@@ -1145,11 +1145,11 @@ void Dbdict::execFSWRITECONF(Signal* sig
 // Routines to handle Read/Write of Table Files
 /* ---------------------------------------------------------------- */
 void
-Dbdict::writeTableFile(Signal* signal, Uint32 tableId, 
+Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
 		       SegmentedSectionPtr tabInfoPtr, Callback* callback){
-  
+
   ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
-  
+
   Uint32 pages = WORDS2PAGES(tabInfoPtr.sz);
   c_writeTableRecord.no_of_words = tabInfoPtr.sz;
   c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
@@ -1157,16 +1157,16 @@ Dbdict::writeTableFile(Signal* signal, U
 
   c_writeTableRecord.pageId = 0;
   ndbrequire(pages == 1);
-  
+
   PageRecordPtr pageRecPtr;
   c_pageRecordArray.getPtr(pageRecPtr, c_writeTableRecord.pageId);
   copy(&pageRecPtr.p->word[ZPAGE_HEADER_SIZE], tabInfoPtr);
-  
+
   memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
-  pageRecPtr.p->word[ZPOS_CHECKSUM] = 
-    computeChecksum(&pageRecPtr.p->word[0], 
+  pageRecPtr.p->word[ZPOS_CHECKSUM] =
+    computeChecksum(&pageRecPtr.p->word[0],
 		    pages * ZSIZE_OF_PAGES_IN_WORDS);
-  
+
   startWriteTableFile(signal, tableId);
 }
 
@@ -1220,7 +1220,7 @@ void Dbdict::openTableFile(Signal* signa
                            Uint32 fileNo,
                            Uint32 fsConPtr,
                            Uint32 tableId,
-                           bool   writeFlag) 
+                           bool   writeFlag)
 {
   FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
 
@@ -1228,10 +1228,10 @@ void Dbdict::openTableFile(Signal* signa
   fsOpenReq->userPointer = fsConPtr;
   if (writeFlag) {
     jam();
-    fsOpenReq->fileFlags = 
-      FsOpenReq::OM_WRITEONLY | 
-      FsOpenReq::OM_TRUNCATE | 
-      FsOpenReq::OM_CREATE | 
+    fsOpenReq->fileFlags =
+      FsOpenReq::OM_WRITEONLY |
+      FsOpenReq::OM_TRUNCATE |
+      FsOpenReq::OM_CREATE |
       FsOpenReq::OM_SYNC;
   } else {
     jam();
@@ -1255,7 +1255,7 @@ void Dbdict::openTableFile(Signal* signa
   sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
 }//openTableFile()
 
-void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1264,7 +1264,7 @@ void Dbdict::writeTableFile(Signal* sign
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_TABLE_FILE;
   fsRWReq->numberOfPages = WORDS2PAGES(c_writeTableRecord.no_of_words);
@@ -1290,7 +1290,7 @@ void Dbdict::closeWriteTableConf(Signal*
     fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
     openTableFile(signal, 1, fsPtr.i, c_writeTableRecord.tableId, true);
     return;
-  } 
+  }
   ndbrequire(c_writeTableRecord.noOfTableFilesHandled == 2);
   c_fsConnectRecordPool.release(fsPtr);
   WriteTableRecord::TableWriteState state = c_writeTableRecord.tableWriteState;
@@ -1315,7 +1315,7 @@ void Dbdict::startReadTableFile(Signal*
 {
   //globalSignalLoggers.log(number(), "startReadTableFile");
   ndbrequire(!c_readTableRecord.inUse);
-  
+
   FsConnectRecordPtr fsPtr;
   c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
   c_readTableRecord.inUse = true;
@@ -1325,14 +1325,14 @@ void Dbdict::startReadTableFile(Signal*
 }//Dbdict::startReadTableFile()
 
 void Dbdict::openReadTableRef(Signal* signal,
-                              FsConnectRecordPtr fsPtr) 
+                              FsConnectRecordPtr fsPtr)
 {
   fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
   openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
   return;
 }//Dbdict::openReadTableConf()
 
-void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1341,7 +1341,7 @@ void Dbdict::readTableFile(Signal* signa
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_TABLE_FILE;
   fsRWReq->numberOfPages = WORDS2PAGES(c_readTableRecord.no_of_words);
@@ -1367,10 +1367,10 @@ void Dbdict::readTableConf(Signal* signa
 
   PageRecordPtr tmpPagePtr;
   c_pageRecordArray.getPtr(tmpPagePtr, c_readTableRecord.pageId);
-  Uint32 sz = 
+  Uint32 sz =
     WORDS2PAGES(c_readTableRecord.no_of_words)*ZSIZE_OF_PAGES_IN_WORDS;
   Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
-  
+
   ndbrequire((chk == 0) || !crashInd);
   if(chk != 0){
     jam();
@@ -1378,7 +1378,7 @@ void Dbdict::readTableConf(Signal* signa
     readTableRef(signal, fsPtr);
     return;
   }//if
-  
+
   fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_TAB_FILE;
   closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
   return;
@@ -1400,7 +1400,7 @@ void Dbdict::closeReadTableConf(Signal*
 {
   c_fsConnectRecordPool.release(fsPtr);
   c_readTableRecord.inUse = false;
-  
+
   execute(signal, c_readTableRecord.m_callback, 0);
   return;
 }//Dbdict::closeReadTableConf()
@@ -1411,7 +1411,7 @@ void Dbdict::closeReadTableConf(Signal*
 NdbOut& operator<<(NdbOut& out, const SchemaFile::TableEntry entry);
 
 void
-Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, 
+Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
 			  SchemaFile::TableEntry* te, Callback* callback,
                           bool savetodisk, bool dicttrans)
 {
@@ -1435,11 +1435,11 @@ Dbdict::updateSchemaState(Signal* signal
   * tableEntry = * te;
   computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
 #else
-  SchemaFile::TableState newState = 
+  SchemaFile::TableState newState =
     (SchemaFile::TableState)te->m_tableState;
-  SchemaFile::TableState oldState = 
+  SchemaFile::TableState oldState =
     (SchemaFile::TableState)tableEntry->m_tableState;
-  
+
   Uint32 newVersion = te->m_tableVersion;
   Uint32 oldVersion = tableEntry->m_tableVersion;
 
@@ -1507,7 +1507,7 @@ Dbdict::updateSchemaState(Signal* signal
     ndbrequire((oldState == SchemaFile::ADD_STARTED));
   }//if
   ndbrequire(ok);
-  
+
   * tableEntry = * te;
   computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
 
@@ -1515,13 +1515,13 @@ Dbdict::updateSchemaState(Signal* signal
   {
     ndbrequire(c_writeSchemaRecord.inUse == false);
     c_writeSchemaRecord.inUse = true;
-    
+
     c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
     c_writeSchemaRecord.newFile = false;
     c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
     c_writeSchemaRecord.noOfPages = 1;
     c_writeSchemaRecord.m_callback = * callback;
-    
+
     startWriteSchemaFile(signal);
   }
   else
@@ -1555,12 +1555,12 @@ void Dbdict::openSchemaFile(Signal* sign
   fsOpenReq->userPointer = fsConPtr;
   if (writeFlag) {
     jam();
-    fsOpenReq->fileFlags = 
-      FsOpenReq::OM_WRITEONLY | 
+    fsOpenReq->fileFlags =
+      FsOpenReq::OM_WRITEONLY |
       FsOpenReq::OM_SYNC;
     if (newFile)
       fsOpenReq->fileFlags |=
-        FsOpenReq::OM_TRUNCATE | 
+        FsOpenReq::OM_TRUNCATE |
         FsOpenReq::OM_CREATE;
   } else {
     jam();
@@ -1582,7 +1582,7 @@ void Dbdict::openSchemaFile(Signal* sign
   sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
 }//openSchemaFile()
 
-void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1597,7 +1597,7 @@ void Dbdict::writeSchemaFile(Signal* sig
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
   fsRWReq->numberOfPages = wr.noOfPages;
@@ -1615,7 +1615,7 @@ void Dbdict::writeSchemaConf(Signal* sig
   return;
 }//Dbdict::writeSchemaConf()
 
-void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
   fsCloseReq->filePointer = filePtr;
@@ -1635,9 +1635,9 @@ void Dbdict::closeWriteSchemaConf(Signal
     fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
     openSchemaFile(signal, 1, fsPtr.i, true, c_writeSchemaRecord.newFile);
     return;
-  } 
+  }
   ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2);
-  
+
   c_fsConnectRecordPool.release(fsPtr);
 
   c_writeSchemaRecord.inUse = false;
@@ -1655,13 +1655,13 @@ void Dbdict::startReadSchemaFile(Signal*
 }//Dbdict::startReadSchemaFile()
 
 void Dbdict::openReadSchemaRef(Signal* signal,
-                               FsConnectRecordPtr fsPtr) 
+                               FsConnectRecordPtr fsPtr)
 {
   fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
   openSchemaFile(signal, 1, fsPtr.i, false, false);
 }//Dbdict::openReadSchemaRef()
 
-void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1676,7 +1676,7 @@ void Dbdict::readSchemaFile(Signal* sign
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
   fsRWReq->numberOfPages = rr.noOfPages;
@@ -1736,8 +1736,8 @@ void Dbdict::readSchemaConf(Signal* sign
     return;
   }
 
-  if (sf0->NdbVersion < NDB_MAKE_VERSION(6,4,0) && 
-      ! convertSchemaFileTo_6_4(xsf)) 
+  if (sf0->NdbVersion < NDB_MAKE_VERSION(6,4,0) &&
+      ! convertSchemaFileTo_6_4(xsf))
   {
     jam();
     ndbrequire(! crashInd);
@@ -1812,7 +1812,7 @@ void Dbdict::closeReadSchemaConf(Signal*
     jam();
     {
       // write back both copies
-      
+
       ndbrequire(c_writeSchemaRecord.inUse == false);
       XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0 ];
       Uint32 noOfPages =
@@ -1826,7 +1826,7 @@ void Dbdict::closeReadSchemaConf(Signal*
       c_writeSchemaRecord.firstPage = 0;
       c_writeSchemaRecord.noOfPages = xsf->noOfPages;
 
-      c_writeSchemaRecord.m_callback.m_callbackFunction = 
+      c_writeSchemaRecord.m_callback.m_callbackFunction =
         safe_cast(&Dbdict::initSchemaFile_conf);
 
       startWriteSchemaFile(signal);
@@ -1901,7 +1901,7 @@ Dbdict::convertSchemaFileTo_6_4(XSchemaF
     {
       Uint32 n = i * NDB_SF_PAGE_ENTRIES + j;
       SchemaFile::TableEntry * transEntry = getTableEntry(xsf, n);
-      
+
       switch(SchemaFile::Old::TableState(transEntry->m_tableState)) {
       case SchemaFile::Old::INIT:
         transEntry->m_tableState = SchemaFile::SF_UNUSED;
@@ -1962,7 +1962,7 @@ Dbdict::Dbdict(Block_context& ctx):
   c_opRecordSequence(0)
 {
   BLOCK_CONSTRUCTOR(Dbdict);
-  
+
   // Transit signals
   addRecSignal(GSN_DUMP_STATE_ORD, &Dbdict::execDUMP_STATE_ORD);
   addRecSignal(GSN_GET_TABINFOREQ, &Dbdict::execGET_TABINFOREQ);
@@ -2095,11 +2095,11 @@ Dbdict::Dbdict(Block_context& ctx):
   addRecSignal(GSN_LIST_TABLES_REQ, &Dbdict::execLIST_TABLES_REQ);
 
   addRecSignal(GSN_DROP_TABLE_REQ, &Dbdict::execDROP_TABLE_REQ);
-  
+
   addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdict::execPREP_DROP_TAB_REQ);
   addRecSignal(GSN_PREP_DROP_TAB_REF, &Dbdict::execPREP_DROP_TAB_REF);
   addRecSignal(GSN_PREP_DROP_TAB_CONF, &Dbdict::execPREP_DROP_TAB_CONF);
-  
+
   addRecSignal(GSN_DROP_TAB_REF, &Dbdict::execDROP_TAB_REF);
   addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF);
 
@@ -2173,7 +2173,7 @@ Dbdict::Dbdict(Block_context& ctx):
   addRecSignal(GSN_INDEX_STAT_REP, &Dbdict::execINDEX_STAT_REP);
 }//Dbdict::Dbdict()
 
-Dbdict::~Dbdict() 
+Dbdict::~Dbdict()
 {
 }//Dbdict::~Dbdict()
 
@@ -2193,7 +2193,7 @@ Dbdict::getParam(const char * name, Uint
   return false;
 }
 
-void Dbdict::initCommonData() 
+void Dbdict::initCommonData()
 {
 /* ---------------------------------------------------------------- */
 // Initialise all common variables.
@@ -2231,7 +2231,7 @@ void Dbdict::initCommonData()
 
 }//Dbdict::initCommonData()
 
-void Dbdict::initRecords() 
+void Dbdict::initRecords()
 {
   initNodeRecords();
   initPageRecords();
@@ -2239,7 +2239,7 @@ void Dbdict::initRecords()
   initTriggerRecords();
 }//Dbdict::initRecords()
 
-void Dbdict::initSendSchemaRecord() 
+void Dbdict::initSendSchemaRecord()
 {
   c_sendSchemaRecord.noOfWords = (Uint32)-1;
   c_sendSchemaRecord.pageId = RNIL;
@@ -2249,7 +2249,7 @@ void Dbdict::initSendSchemaRecord()
   //c_sendSchemaRecord.sendSchemaState = SendSchemaRecord::IDLE;
 }//initSendSchemaRecord()
 
-void Dbdict::initReadTableRecord() 
+void Dbdict::initReadTableRecord()
 {
   c_readTableRecord.no_of_words= 0;
   c_readTableRecord.pageId = RNIL;
@@ -2257,7 +2257,7 @@ void Dbdict::initReadTableRecord()
   c_readTableRecord.inUse = false;
 }//initReadTableRecord()
 
-void Dbdict::initWriteTableRecord() 
+void Dbdict::initWriteTableRecord()
 {
   c_writeTableRecord.no_of_words= 0;
   c_writeTableRecord.pageId = RNIL;
@@ -2266,20 +2266,20 @@ void Dbdict::initWriteTableRecord()
   c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
 }//initWriteTableRecord()
 
-void Dbdict::initReadSchemaRecord() 
+void Dbdict::initReadSchemaRecord()
 {
   c_readSchemaRecord.pageId = RNIL;
   c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
 }//initReadSchemaRecord()
 
-void Dbdict::initWriteSchemaRecord() 
+void Dbdict::initWriteSchemaRecord()
 {
   c_writeSchemaRecord.inUse = false;
   c_writeSchemaRecord.pageId = RNIL;
   c_writeSchemaRecord.noOfSchemaFilesHandled = 3;
 }//initWriteSchemaRecord()
 
-void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode) 
+void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
 {
   c_retrieveRecord.busyState = false;
   c_retrieveRecord.blockRef = 0;
@@ -2291,13 +2291,13 @@ void Dbdict::initRetrieveRecord(Signal*
   c_retrieveRecord.m_useLongSig = false;
 }//initRetrieveRecord()
 
-void Dbdict::initSchemaRecord() 
+void Dbdict::initSchemaRecord()
 {
   c_schemaRecord.schemaPage = RNIL;
   c_schemaRecord.oldSchemaPage = RNIL;
 }//Dbdict::initSchemaRecord()
 
-void Dbdict::initNodeRecords() 
+void Dbdict::initNodeRecords()
 {
   jam();
   for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
@@ -2309,7 +2309,7 @@ void Dbdict::initNodeRecords()
   }//for
 }//Dbdict::initNodeRecords()
 
-void Dbdict::initPageRecords() 
+void Dbdict::initPageRecords()
 {
   c_retrieveRecord.retrievePage =  ZMAX_PAGES_OF_TABLE_DEFINITION;
   ndbrequire(ZNUMBER_OF_PAGES >= (ZMAX_PAGES_OF_TABLE_DEFINITION + 1));
@@ -2317,7 +2317,7 @@ void Dbdict::initPageRecords()
   c_schemaRecord.oldSchemaPage = NDB_SF_MAX_PAGES;
 }//Dbdict::initPageRecords()
 
-void Dbdict::initTableRecords() 
+void Dbdict::initTableRecords()
 {
   TableRecordPtr tablePtr;
   while (1) {
@@ -2332,7 +2332,7 @@ void Dbdict::initTableRecords()
   }//while
 }//Dbdict::initTableRecords()
 
-void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) 
+void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
 {
   new (tablePtr.p) TableRecord();
   tablePtr.p->filePtr[0] = RNIL;
@@ -2401,7 +2401,7 @@ void Dbdict::initialiseTriggerRecord(Tri
   triggerPtr.p->indexId = RNIL;
 }
 
-Uint32 Dbdict::getFsConnRecord() 
+Uint32 Dbdict::getFsConnRecord()
 {
   FsConnectRecordPtr fsPtr;
   c_fsConnectRecordPool.seize(fsPtr);
@@ -2439,13 +2439,8 @@ Uint32 Dbdict::getFreeObjId(Uint32 minId
   return RNIL;
 }
 
-Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId) 
+Uint32 Dbdict::getFreeTableRecord()
 {
-  Uint32 minId = (primaryTableId == RNIL ? 0 : primaryTableId + 1);
-  if (ERROR_INSERTED(6012) && minId < 4096){
-    minId = 4096;
-    CLEAR_ERROR_INSERT_VALUE;
-  }
   Uint32 i = getFreeObjId(0);
   if (i == RNIL) {
     jam();
@@ -2541,7 +2536,7 @@ Dbdict::check_write_obj(Uint32 objId, Ui
       jam();
       return GetTabInfoRef::TableNotDefined;
     }
-    
+
     if (te->m_transId == 0 || te->m_transId == transId)
     {
       jam();
@@ -2583,7 +2578,7 @@ Dbdict::check_write_obj(Uint32 objId, Ui
 /* ---------------------------------------------------------------- */
 // This is sent as the first signal during start/restart.
 /* ---------------------------------------------------------------- */
-void Dbdict::execSTTOR(Signal* signal) 
+void Dbdict::execSTTOR(Signal* signal)
 {
   jamEntry();
   c_startPhase = signal->theData[1];
@@ -2624,7 +2619,7 @@ void Dbdict::sendSTTORRY(Signal* signal)
 /* ---------------------------------------------------------------- */
 // We receive information about sizes of records.
 /* ---------------------------------------------------------------- */
-void Dbdict::execREAD_CONFIG_REQ(Signal* signal) 
+void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
 {
   const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
   Uint32 ref = req->senderRef;
@@ -2632,13 +2627,13 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ndbrequire(req->noOfParameters == 0);
 
   jamEntry();
- 
-  const ndb_mgm_configuration_iterator * p = 
+
+  const ndb_mgm_configuration_iterator * p =
     m_ctx.m_config.getOwnConfigIterator();
   ndbrequire(p != 0);
-  
+
   Uint32 attributesize, tablerecSize;
-  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, 
+  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS,
 					&c_maxNoOfTriggers));
   ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,&attributesize));
   ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &tablerecSize));
@@ -2740,7 +2735,7 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ndb_mgm_get_int_parameter(p, CFG_DB_STRING_MEMORY, &sm);
   if (sm == 0)
     sm = 25;
-  
+
   Uint64 sb = 0;
   if (sm <= 100)
   {
@@ -2750,12 +2745,12 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   {
     sb = sm;
   }
-  
-  sb /= (Rope::getSegmentSize() * sizeof(Uint32));
+
+  sb /= (LocalRope::getSegmentSize() * sizeof(Uint32));
   sb += 100; // more safty
   ndbrequire(sb < (Uint64(1) << 32));
   c_rope_pool.setSize(Uint32(sb));
-  
+
   // Initialize BAT for interface to file system
   NewVARIABLE* bat = allocateBat(2);
   bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0];
@@ -2775,12 +2770,12 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
   conf->senderRef = reference();
   conf->senderData = senderData;
-  sendSignal(ref, GSN_READ_CONFIG_CONF, signal, 
+  sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
 	     ReadConfigConf::SignalLength, JBB);
 
   {
-    Ptr<DictObject> ptr;
-    SLList<DictObject> objs(c_obj_pool);
+    DictObjectPtr ptr;
+    DictObject_list objs(c_obj_pool);
     while(objs.seize(ptr))
       new (ptr.p) DictObject();
     objs.release();
@@ -2803,7 +2798,7 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
 // Start phase signals sent by CNTR. We reply with NDB_STTORRY when
 // we completed this phase.
 /* ---------------------------------------------------------------- */
-void Dbdict::execNDB_STTOR(Signal* signal) 
+void Dbdict::execNDB_STTOR(Signal* signal)
 {
   jamEntry();
   c_startPhase = signal->theData[2];
@@ -2851,7 +2846,7 @@ void Dbdict::execNDB_STTOR(Signal* signa
   }//switch
 }//execNDB_STTOR()
 
-void Dbdict::sendNDB_STTORRY(Signal* signal) 
+void Dbdict::sendNDB_STTORRY(Signal* signal)
 {
   signal->theData[0] = reference();
   sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
@@ -2861,7 +2856,7 @@ void Dbdict::sendNDB_STTORRY(Signal* sig
 /* ---------------------------------------------------------------- */
 // We receive the information about which nodes that are up and down.
 /* ---------------------------------------------------------------- */
-void Dbdict::execREAD_NODESCONF(Signal* signal) 
+void Dbdict::execREAD_NODESCONF(Signal* signal)
 {
   jamEntry();
 
@@ -2898,7 +2893,7 @@ void Dbdict::execREAD_NODESCONF(Signal*
   sendNDB_STTORRY(signal);
 }//execREAD_NODESCONF()
 
-void Dbdict::initSchemaFile(Signal* signal) 
+void Dbdict::initSchemaFile(Signal* signal)
 {
   XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
   xsf->noOfPages = (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1)
@@ -2908,8 +2903,8 @@ void Dbdict::initSchemaFile(Signal* sign
   XSchemaFile * oldxsf = &c_schemaFile[SchemaRecord::OLD_SCHEMA_FILE];
   oldxsf->noOfPages = xsf->noOfPages;
   memcpy(&oldxsf->schemaPage[0], &xsf->schemaPage[0], xsf->schemaPage[0].FileSize);
-  
-  if (c_initialStart || c_initialNodeRestart) {    
+
+  if (c_initialStart || c_initialNodeRestart) {
     jam();
     ndbrequire(c_writeSchemaRecord.inUse == false);
     c_writeSchemaRecord.inUse = true;
@@ -2918,9 +2913,9 @@ void Dbdict::initSchemaFile(Signal* sign
     c_writeSchemaRecord.firstPage = 0;
     c_writeSchemaRecord.noOfPages = xsf->noOfPages;
 
-    c_writeSchemaRecord.m_callback.m_callbackFunction = 
+    c_writeSchemaRecord.m_callback.m_callbackFunction =
       safe_cast(&Dbdict::initSchemaFile_conf);
-    
+
     startWriteSchemaFile(signal);
   } else if (c_systemRestart || c_nodeRestart) {
     jam();
@@ -3106,7 +3101,7 @@ Dbdict::activateIndex_fromEndTrans(Signa
   {
     DictObjectPtr obj_ptr;
     c_obj_pool.getPtr(obj_ptr, indexPtr.p->m_obj_ptr_i);
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     name.copy(indexName);
   }
 
@@ -3267,7 +3262,7 @@ Dbdict::rebuildIndex_fromEndTrans(Signal
   {
     DictObjectPtr obj_ptr;
     c_obj_pool.getPtr(obj_ptr, indexPtr.p->m_obj_ptr_i);
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     name.copy(indexName);
   }
 
@@ -3307,7 +3302,7 @@ Dbdict::rebuildIndex_fromEndTrans(Signal
 // tables that should be started as part of this system restart.
 // DICT will also activate the tables in TC as part of this process.
 /* ---------------------------------------------------------------- */
-void Dbdict::execDICTSTARTREQ(Signal* signal) 
+void Dbdict::execDICTSTARTREQ(Signal* signal)
 {
   jamEntry();
   c_restartRecord.gciToRestart = signal->theData[0];
@@ -3319,7 +3314,7 @@ void Dbdict::execDICTSTARTREQ(Signal* si
     c_restartRecord.m_senderData = 0;
   }
   if (c_nodeRestart || c_initialNodeRestart) {
-    jam();   
+    jam();
 
     CRASH_INSERTION(6000);
 
@@ -3332,7 +3327,7 @@ void Dbdict::execDICTSTARTREQ(Signal* si
   ndbrequire(c_masterNodeId == getOwnNodeId());
 
   c_schemaRecord.m_callback.m_callbackData = 0;
-  c_schemaRecord.m_callback.m_callbackFunction = 
+  c_schemaRecord.m_callback.m_callbackFunction =
     safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete);
 
   /**
@@ -3373,7 +3368,7 @@ Dbdict::masterRestart_checkSchemaStatusC
   Callback c = { 0, 0 };
   sendFragmentedSignal(rg,
 		       GSN_SCHEMA_INFO,
-		       signal, 
+		       signal,
 		       1, //SchemaInfo::SignalLength,
 		       JBB,
 		       ptr,
@@ -3389,27 +3384,27 @@ Dbdict::masterRestart_checkSchemaStatusC
   sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB);
 }
 
-void 
+void
 Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
 
   const Uint32 ref = signal->getSendersBlockRef();
   //const Uint32 senderData = signal->theData[0];
-  
+
   ndbrequire(c_sendSchemaRecord.inUse == false);
   c_sendSchemaRecord.inUse = true;
 
   LinearSectionPtr ptr[3];
-  
+
   XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
   ndbrequire(xsf->noOfPages != 0);
-  
+
   ptr[0].p = (Uint32*)&xsf->schemaPage[0];
   ptr[0].sz = xsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
 
   Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 };
   sendFragmentedSignal(ref,
 		       GSN_SCHEMA_INFO,
-		       signal, 
+		       signal,
 		       1, //GetSchemaInfoConf::SignalLength,
 		       JBB,
 		       ptr,
@@ -3418,7 +3413,7 @@ Dbdict::execGET_SCHEMA_INFOREQ(Signal* s
 }//Dbdict::execGET_SCHEMA_INFOREQ()
 
 void
-Dbdict::sendSchemaComplete(Signal * signal, 
+Dbdict::sendSchemaComplete(Signal * signal,
 			   Uint32 callbackData,
 			   Uint32 returnCode){
   ndbrequire(c_sendSchemaRecord.inUse == true);
@@ -3431,7 +3426,7 @@ Dbdict::sendSchemaComplete(Signal * sign
 // We receive the schema info from master as part of all restarts
 // except the initial start where no tables exists.
 /* ---------------------------------------------------------------- */
-void Dbdict::execSCHEMA_INFO(Signal* signal) 
+void Dbdict::execSCHEMA_INFO(Signal* signal)
 {
   jamEntry();
   if(!assembleFragments(signal)){
@@ -3463,13 +3458,13 @@ void Dbdict::execSCHEMA_INFO(Signal* sig
   xsf->noOfPages = schemaDataPtr.sz / NDB_SF_PAGE_SIZE_IN_WORDS;
   copy((Uint32*)&xsf->schemaPage[0], schemaDataPtr);
   releaseSections(handle);
-  
+
   SchemaFile * sf0 = &xsf->schemaPage[0];
   if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6) {
     bool ok = convertSchemaFileTo_5_0_6(xsf);
     ndbrequire(ok);
   }
-    
+
   if (sf0->NdbVersion < NDB_MAKE_VERSION(6,4,0))
   {
     jam();
@@ -3484,7 +3479,7 @@ void Dbdict::execSCHEMA_INFO(Signal* sig
   resizeSchemaFile(xsf, ownxsf->noOfPages);
 
   ndbrequire(signal->getSendersBlockRef() != reference());
-    
+
   /* ---------------------------------------------------------------- */
   // Synchronise our view on data with other nodes in the cluster.
   // This is an important part of restart handling where we will handle
@@ -3498,7 +3493,7 @@ void Dbdict::execSCHEMA_INFO(Signal* sig
 }//execSCHEMA_INFO()
 
 void
-Dbdict::restart_checkSchemaStatusComplete(Signal * signal, 
+Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
 					  Uint32 callbackData,
 					  Uint32 returnCode)
 {
@@ -3512,14 +3507,14 @@ Dbdict::restart_checkSchemaStatusComplet
 	       signal, 1, JBB);
     return;
   }
-  
+
   ndbrequire(c_restartRecord.m_op_cnt == 0);
   ndbrequire(c_nodeRestart || c_initialNodeRestart);
   activateIndexes(signal, 0);
   return;
 }
 
-void Dbdict::execSCHEMA_INFOCONF(Signal* signal) 
+void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
 {
   jamEntry();
   ndbrequire(signal->getNoOfSections() == 0);
@@ -3539,7 +3534,7 @@ void Dbdict::execSCHEMA_INFOCONF(Signal*
   activateIndexes(signal, 0);
 }//execSCHEMA_INFOCONF()
 
-static bool 
+static bool
 checkSchemaStatus(Uint32 tableType, Uint32 pass)
 {
   switch(tableType){
@@ -3622,14 +3617,14 @@ void Dbdict::initRestartRecord(Uint32 st
  * Pass 3 Create old HashMap
  * Pass 4 Create old Table           // NOT DONE DUE TO DIH
  * Pass 5 Create old Index           // NOT DONE DUE TO DIH
- 
+
  * Pass 6 Drop old Index             // NOT DONE DUE TO DIH
  * Pass 7 Drop old Table             // NOT DONE DUE TO DIH
  * Pass 8 Drop old HashMap
  * Pass 9 Drop old Datafile/Undofile
  * Pass 10 Drop old Tablespace
  * Pass 11 Drop old Logfilegroup
- 
+
  * Pass 12 Create new LogfileGroup
  * Pass 13 Create new Tablespace
  * Pass 14 Create new Datafile/Undofile
@@ -3638,7 +3633,7 @@ void Dbdict::initRestartRecord(Uint32 st
  * Pass 17 Create new Index
  */
 
-void Dbdict::checkSchemaStatus(Signal* signal) 
+void Dbdict::checkSchemaStatus(Signal* signal)
 {
   // masterxsf == schema file of master (i.e what's currently in cluster)
   // ownxsf = schema file read from disk
@@ -3754,7 +3749,7 @@ void Dbdict::checkSchemaStatus(Signal* s
     jam();
 
     c_restartRecord.m_op_cnt = 0;
-    
+
     TxHandlePtr tx_ptr;
     c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
@@ -3781,7 +3776,7 @@ Dbdict::checkPendingSchemaTrans(XSchemaF
         transEntry->m_transId != 0)
     {
       jam();
-  
+
       bool commit = false;
       switch(transEntry->m_tableState){
       case SchemaFile::SF_STARTED:
@@ -3896,7 +3891,7 @@ void
 Dbdict::restart_fromBeginTrans(Signal* signal, Uint32 tx_key, Uint32 ret)
 {
   ndbrequire(ret == 0);
-  
+
   TxHandlePtr tx_ptr;
   findTxHandle(tx_ptr, tx_key);
   ndbrequire(!tx_ptr.isNull());
@@ -3928,7 +3923,7 @@ Dbdict::restart_nextOp(Signal* signal, b
     jam();
     c_restartRecord.m_op_cnt = 0;
 
-    Ptr<TxHandle> tx_ptr;
+    TxHandlePtr tx_ptr;
     c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
     Callback c = {
@@ -4061,19 +4056,19 @@ Dbdict::restartNextPass(Signal* signal)
      * Complete last trans
      */
     jam();
-    
-    c_restartRecord.m_pass--;    
+
+    c_restartRecord.m_pass--;
     c_restartRecord.m_op_cnt = 0;
 
-    Ptr<TxHandle> tx_ptr;
+    TxHandlePtr tx_ptr;
     c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
-    
-    Callback c = { 
+
+    Callback c = {
       safe_cast(&Dbdict::restartEndPass_fromEndTrans),
       tx_ptr.p->tx_key
     };
     tx_ptr.p->m_callback = c;
-    
+
     Uint32 flags = 0;
     endSchemaTrans(signal, tx_ptr, flags);
     return;
@@ -4128,7 +4123,7 @@ Dbdict::execGET_TABINFO_CONF(Signal* sig
     jam();
     return;
   }
-  
+
   GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
 
   switch(conf->tableType){
@@ -4202,12 +4197,12 @@ Dbdict::execGET_TABINFO_CONF(Signal* sig
  * Create Obj during NR/SR
  */
 void
-Dbdict::restartCreateObj(Signal* signal, 
-			 Uint32 tableId, 
+Dbdict::restartCreateObj(Signal* signal,
+			 Uint32 tableId,
 			 const SchemaFile::TableEntry * new_entry,
 			 bool file){
   jam();
-  
+
 
 #ifdef PRINT_SCHEMA_RESTART
   ndbout_c("restartCreateObj table: %u file: %u", tableId, Uint32(file));
@@ -4219,9 +4214,9 @@ Dbdict::restartCreateObj(Signal* signal,
     c_readTableRecord.no_of_words = new_entry->m_info_words;
     c_readTableRecord.pageId = 0;
     c_readTableRecord.m_callback.m_callbackData = tableId;
-    c_readTableRecord.m_callback.m_callbackFunction = 
+    c_readTableRecord.m_callback.m_callbackFunction =
       safe_cast(&Dbdict::restartCreateObj_readConf);
-    
+
     ndbout_c("restartCreateObj(%u) file: %u", tableId, file);
     startReadTableFile(signal, tableId);
   }
@@ -4256,7 +4251,7 @@ Dbdict::restartCreateObj_getTabInfoConf(
 
 void
 Dbdict::restartCreateObj_readConf(Signal* signal,
-				  Uint32 callbackData, 
+				  Uint32 callbackData,
 				  Uint32 returnCode)
 {
   jam();
@@ -4264,7 +4259,7 @@ Dbdict::restartCreateObj_readConf(Signal
 
   PageRecordPtr pageRecPtr;
   c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId);
-  
+
   Uint32 sz = c_readTableRecord.no_of_words;
 
   Ptr<SectionSegment> ptr;
@@ -4279,12 +4274,12 @@ Dbdict::restartCreateObj_parse(Signal* s
                                bool file)
 {
   jam();
-  Ptr<SchemaOp> op_ptr;
-  
-  Ptr<TxHandle> tx_ptr;
+  SchemaOpPtr op_ptr;
+
+  TxHandlePtr tx_ptr;
   c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
-  Ptr<SchemaTrans> trans_ptr;
+  SchemaTransPtr trans_ptr;
   findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
 
   switch(c_restartRecord.m_entry.m_tableType){
@@ -4295,27 +4290,27 @@ Dbdict::restartCreateObj_parse(Signal* s
   case DictTabInfo::UniqueOrderedIndex:
   case DictTabInfo::OrderedIndex:
   {
-    Ptr<CreateTableRec> opRecPtr;
+    CreateTableRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::Undofile:
   case DictTabInfo::Datafile:
   {
-    Ptr<CreateFileRec> opRecPtr;
+    CreateFileRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::Tablespace:
   case DictTabInfo::LogfileGroup:
   {
-    Ptr<CreateFilegroupRec> opRecPtr;
+    CreateFilegroupRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::HashMap:
   {
-    Ptr<CreateHashMapRec> opRecPtr;
+    CreateHashMapRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
@@ -4323,7 +4318,7 @@ Dbdict::restartCreateObj_parse(Signal* s
 
   op_ptr.p->m_restart = file ? 1 : 2;
   op_ptr.p->m_state = SchemaOp::OS_PARSE_MASTER;
-  
+
   SectionHandle handle(this, ptr.i);
   ErrorInfo error;
   const OpInfo& info = getOpInfo(op_ptr);
@@ -4355,20 +4350,20 @@ Dbdict::restartCreateObj_parse(Signal* s
  * Drop object during NR/SR
  */
 void
-Dbdict::restartDropObj(Signal* signal, 
-                       Uint32 tableId, 
+Dbdict::restartDropObj(Signal* signal,
+                       Uint32 tableId,
                        const SchemaFile::TableEntry * entry)
 {
   jam();
   c_restartRecord.m_entry = *entry;
 
   jam();
-  Ptr<SchemaOp> op_ptr;
+  SchemaOpPtr op_ptr;
 
-  Ptr<TxHandle> tx_ptr;
+  TxHandlePtr tx_ptr;
   c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
-  Ptr<SchemaTrans> trans_ptr;
+  SchemaTransPtr trans_ptr;
   findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
 
   switch(c_restartRecord.m_entry.m_tableType){
@@ -4378,14 +4373,14 @@ Dbdict::restartDropObj(Signal* signal,
   case DictTabInfo::HashIndex:
   case DictTabInfo::UniqueOrderedIndex:
   case DictTabInfo::OrderedIndex:
-    Ptr<DropTableRec> opRecPtr;
+    DropTableRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     ndbrequire(false);
     break;
   case DictTabInfo::Undofile:
   case DictTabInfo::Datafile:
   {
-    Ptr<DropFileRec> opRecPtr;
+    DropFileRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     opRecPtr.p->m_request.file_id = tableId;
     opRecPtr.p->m_request.file_version = entry->m_tableVersion;
@@ -4394,7 +4389,7 @@ Dbdict::restartDropObj(Signal* signal,
   case DictTabInfo::Tablespace:
   case DictTabInfo::LogfileGroup:
   {
-    Ptr<DropFilegroupRec> opRecPtr;
+    DropFilegroupRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     opRecPtr.p->m_request.filegroup_id = tableId;
     opRecPtr.p->m_request.filegroup_version = entry->m_tableVersion;
@@ -4403,10 +4398,10 @@ Dbdict::restartDropObj(Signal* signal,
   }
 
   ndbout_c("restartDropObj(%u)", tableId);
-  
+
   op_ptr.p->m_restart = 1; //
   op_ptr.p->m_state = SchemaOp::OS_PARSE_MASTER;
-  
+
   SectionHandle handle(this);
   ErrorInfo error;
   const OpInfo& info = getOpInfo(op_ptr);
@@ -4443,12 +4438,12 @@ Dbdict::restartDropObj(Signal* signal,
 /* ---------------------------------------------------------------- */
 /* **************************************************************** */
 
-void Dbdict::handleApiFailureCallback(Signal* signal, 
+void Dbdict::handleApiFailureCallback(Signal* signal,
                                       Uint32 failedNodeId,
                                       Uint32 ignoredRc)
 {
   jamEntry();
-  
+
   signal->theData[0] = failedNodeId;
   signal->theData[1] = reference();
   sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
@@ -4457,7 +4452,7 @@ void Dbdict::handleApiFailureCallback(Si
 /* ---------------------------------------------------------------- */
 // We receive a report of an API that failed.
 /* ---------------------------------------------------------------- */
-void Dbdict::execAPI_FAILREQ(Signal* signal) 
+void Dbdict::execAPI_FAILREQ(Signal* signal)
 {
   jamEntry();
   Uint32 failedApiNode = signal->theData[0];
@@ -4476,8 +4471,8 @@ void Dbdict::execAPI_FAILREQ(Signal* sig
   handleApiFail(signal, failedApiNode);
 }//execAPI_FAILREQ()
 
-void Dbdict::handleNdbdFailureCallback(Signal* signal, 
-                                       Uint32 failedNodeId, 
+void Dbdict::handleNdbdFailureCallback(Signal* signal,
+                                       Uint32 failedNodeId,
                                        Uint32 ignoredRc)
 {
   jamEntry();
@@ -4487,14 +4482,14 @@ void Dbdict::handleNdbdFailureCallback(S
   nfCompRep->blockNo      = DBDICT;
   nfCompRep->nodeId       = getOwnNodeId();
   nfCompRep->failedNodeId = failedNodeId;
-  sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal, 
+  sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
              NFCompleteRep::SignalLength, JBB);
 }
 
 /* ---------------------------------------------------------------- */
 // We receive a report of one or more node failures of kernel nodes.
 /* ---------------------------------------------------------------- */
-void Dbdict::execNODE_FAILREP(Signal* signal) 
+void Dbdict::execNODE_FAILREP(Signal* signal)
 {
   jamEntry();
   NodeFailRep nodeFailRep = *(NodeFailRep *)&signal->theData[0];
@@ -4583,11 +4578,11 @@ void Dbdict::handle_master_takeover(Sign
     pending schema transactions.
     Ask all slave nodes about state of any pending
     transactions
-  */      
+  */
   jam();
   NodeRecordPtr masterNodePtr;
   c_nodes.getPtr(masterNodePtr, c_masterNodeId);
-  
+
   masterNodePtr.p->m_nodes = c_aliveNodes;
   NodeReceiverGroup rg(DBDICT, masterNodePtr.p->m_nodes);
   {
@@ -4616,7 +4611,7 @@ void Dbdict::handle_master_takeover(Sign
 // Include a starting node in list of nodes to be part of adding
 // and dropping tables.
 /* ---------------------------------------------------------------- */
-void Dbdict::execINCL_NODEREQ(Signal* signal) 
+void Dbdict::execINCL_NODEREQ(Signal* signal)
 {
   jamEntry();
   NodeRecordPtr nodePtr;
@@ -4648,15 +4643,15 @@ void Dbdict::execINCL_NODEREQ(Signal* si
 inline
 void Dbdict::printTables()
 {
-  DLHashTable<DictObject>::Iterator iter;
+  DictObject_hash::Iterator iter;
   bool moreTables = c_obj_hash.first(iter);
   printf("OBJECTS IN DICT:\n");
   char name[PATH_MAX];
   while (moreTables) {
-    Ptr<DictObject> tablePtr = iter.curr;
+    DictObjectPtr tablePtr = iter.curr;
     ConstRope r(c_rope_pool, tablePtr.p->m_name);
     r.copy(name);
-    printf("%s ", name); 
+    printf("%s ", name);
     moreTables = c_obj_hash.next(iter);
   }
   printf("\n");
@@ -4674,7 +4669,7 @@ void Dbdict::printTables()
 
 Dbdict::DictObject *
 Dbdict::get_object(const char * name, Uint32 len, Uint32 hash){
-  Ptr<DictObject> old_ptr;
+  DictObjectPtr old_ptr;
   if (get_object(old_ptr, name, len, hash))
   {
     return old_ptr.p;
@@ -4695,10 +4690,10 @@ Dbdict::get_object(DictObjectPtr& obj_pt
 
 void
 Dbdict::release_object(Uint32 obj_ptr_i, DictObject* obj_ptr_p){
-  Rope name(c_rope_pool, obj_ptr_p->m_name);
+  LocalRope name(c_rope_pool, obj_ptr_p->m_name);
   name.erase();
 
-  Ptr<DictObject> ptr = { obj_ptr_p, obj_ptr_i };
+  DictObjectPtr ptr = { obj_ptr_p, obj_ptr_i };
   c_obj_hash.release(ptr);
 }
 
@@ -4706,7 +4701,7 @@ void
 Dbdict::increase_ref_count(Uint32 obj_ptr_i)
 {
   DictObject* ptr = c_obj_pool.getPtr(obj_ptr_i);
-  ptr->m_ref_count++;  
+  ptr->m_ref_count++;
 }
 
 void
@@ -4714,13 +4709,13 @@ Dbdict::decrease_ref_count(Uint32 obj_pt
 {
   DictObject* ptr = c_obj_pool.getPtr(obj_ptr_i);
   ndbrequire(ptr->m_ref_count);
-  ptr->m_ref_count--;  
+  ptr->m_ref_count--;
 }
 
 void Dbdict::handleTabInfoInit(Signal * signal, SchemaTransPtr & trans_ptr,
                                SimpleProperties::Reader & it,
 			       ParseDictTabInfoRecord * parseP,
-			       bool checkExist) 
+			       bool checkExist)
 {
 /* ---------------------------------------------------------------- */
 // We always start by handling table name since this must be the first
@@ -4732,11 +4727,11 @@ void Dbdict::handleTabInfoInit(Signal *
 
   SimpleProperties::UnpackStatus status;
   c_tableDesc.init();
-  status = SimpleProperties::unpack(it, &c_tableDesc, 
-				    DictTabInfo::TableMapping, 
-				    DictTabInfo::TableMappingSize, 
+  status = SimpleProperties::unpack(it, &c_tableDesc,
+				    DictTabInfo::TableMapping,
+				    DictTabInfo::TableMappingSize,
 				    true, true);
-  
+
   if(status != SimpleProperties::Break){
     parseP->errorCode = CreateTableRef::InvalidFormat;
     parseP->status    = status;
@@ -4746,24 +4741,24 @@ void Dbdict::handleTabInfoInit(Signal *
   }
 
   if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
-  {  
+  {
     ndbrequire(!checkExist);
   }
   if(!checkExist)
   {
     ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
   }
-  
+
   /* ---------------------------------------------------------------- */
   // Verify that table name is an allowed table name.
   // TODO
   /* ---------------------------------------------------------------- */
   const Uint32 tableNameLength = Uint32(strlen(c_tableDesc.TableName) + 1);
-  const Uint32 name_hash = Rope::hash(c_tableDesc.TableName, tableNameLength);
+  const Uint32 name_hash = LocalRope::hash(c_tableDesc.TableName, tableNameLength);
 
   if(checkExist){
     jam();
-    tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0, 
+    tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0,
 	       CreateTableRef::TableAlreadyExist);
   }
 
@@ -4772,7 +4767,7 @@ void Dbdict::handleTabInfoInit(Signal *
     jam();
     parseP->requestType = DictTabInfo::AddTableFromDict;
   }
-  
+
   TableRecordPtr tablePtr;
   switch (parseP->requestType) {
   case DictTabInfo::CreateTableFromAPI: {
@@ -4780,12 +4775,12 @@ void Dbdict::handleTabInfoInit(Signal *
   }
   case DictTabInfo::AlterTableFromAPI:{
     jam();
-    tablePtr.i = getFreeTableRecord(c_tableDesc.PrimaryTableId);
+    tablePtr.i = getFreeTableRecord();
     /* ---------------------------------------------------------------- */
     // Check if no free tables existed.
     /* ---------------------------------------------------------------- */
     tabRequire(tablePtr.i != RNIL, CreateTableRef::NoMoreTableRecords);
-    
+
     c_tableRecordPool.getPtr(tablePtr);
     break;
   }
@@ -4797,16 +4792,16 @@ void Dbdict::handleTabInfoInit(Signal *
 // Get table id and check that table doesn't already exist
 /* ---------------------------------------------------------------- */
     tablePtr.i = c_tableDesc.TableId;
-    
+
     if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
       ndbrequire(tablePtr.i == c_restartRecord.activeTable);
     }//if
     if (parseP->requestType == DictTabInfo::GetTabInfoConf) {
       ndbrequire(tablePtr.i == c_restartRecord.activeTable);
     }//if
-    
+
     c_tableRecordPool.getPtr(tablePtr);
-    
+
     //Uint32 oldTableVersion = tablePtr.p->tableVersion;
     initialiseTableRecord(tablePtr);
 
@@ -4815,21 +4810,21 @@ void Dbdict::handleTabInfoInit(Signal *
 /* ---------------------------------------------------------------- */
     Uint32 tableVersion = c_tableDesc.TableVersion;
     tablePtr.p->tableVersion = tableVersion;
-    
+
     break;
   }
   default:
     ndbrequire(false);
     break;
   }//switch
-  
-  { 
-    Rope name(c_rope_pool, tablePtr.p->tableName);
+
+  {
+    LocalRope name(c_rope_pool, tablePtr.p->tableName);
     tabRequire(name.assign(c_tableDesc.TableName, tableNameLength, name_hash),
 	       CreateTableRef::OutOfStringBuffer);
   }
 
-  Ptr<DictObject> obj_ptr;
+  DictObjectPtr obj_ptr;
   if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
     jam();
     ndbrequire(c_obj_hash.seize(obj_ptr));
@@ -4843,8 +4838,8 @@ void Dbdict::handleTabInfoInit(Signal *
 
     if (g_trace)
     {
-      g_eventLogger->info("Dbdict: create name=%s,id=%u,obj_ptr_i=%d", 
-                          c_tableDesc.TableName, 
+      g_eventLogger->info("Dbdict: create name=%s,id=%u,obj_ptr_i=%d",
+                          c_tableDesc.TableName,
                           tablePtr.i, tablePtr.p->m_obj_ptr_i);
     }
     send_event(signal, trans_ptr,
@@ -4854,20 +4849,20 @@ void Dbdict::handleTabInfoInit(Signal *
                c_tableDesc.TableType);
   }
   parseP->tablePtr = tablePtr;
-  
+
   // Disallow logging of a temporary table.
   tabRequire(!(c_tableDesc.TableTemporaryFlag && c_tableDesc.TableLoggedFlag),
              CreateTableRef::NoLoggingTemporaryTable);
 
   tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes;
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0);
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0);
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0);
 #if DOES_NOT_WORK_CURRENTLY
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.TableTemporaryFlag ? TableRecord::TR_Temporary : 0);
 #endif
   tablePtr.p->m_bits |=
@@ -4878,13 +4873,13 @@ void Dbdict::handleTabInfoInit(Signal *
   tablePtr.p->tableType = (DictTabInfo::TableType)c_tableDesc.TableType;
   tablePtr.p->kValue = c_tableDesc.TableKValue;
   tablePtr.p->fragmentCount = c_tableDesc.FragmentCount;
-  tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId; 
-  tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow; 
-  tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh; 
+  tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId;
+  tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow;
+  tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh;
   tablePtr.p->minRowsLow = c_tableDesc.MinRowsLow;
   tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh;
-  tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; 
-  tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; 
+  tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag;
+  tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag;
   tablePtr.p->singleUserMode = c_tableDesc.SingleUserMode;
   tablePtr.p->hashMapObjectId = c_tableDesc.HashMapObjectId;
   tablePtr.p->hashMapVersion = c_tableDesc.HashMapVersion;
@@ -4913,7 +4908,7 @@ void Dbdict::handleTabInfoInit(Signal *
     if (dictObj && dictObj->m_type == DictTabInfo::HashMap)
     {
       jam();
-      HashMapPtr hm_ptr;
+      HashMapRecordPtr hm_ptr;
       ndbrequire(c_hash_map_hash.find(hm_ptr, dictObj->m_id));
       tablePtr.p->hashMapObjectId = hm_ptr.p->m_object_id;
       tablePtr.p->hashMapVersion = hm_ptr.p->m_object_version;
@@ -4923,7 +4918,7 @@ void Dbdict::handleTabInfoInit(Signal *
   if (tablePtr.p->fragmentType == DictTabInfo::HashMapPartition)
   {
     jam();
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     tabRequire(c_hash_map_hash.find(hm_ptr, tablePtr.p->hashMapObjectId),
                CreateTableRef::InvalidHashMap);
 
@@ -4944,21 +4939,21 @@ void Dbdict::handleTabInfoInit(Signal *
                  CreateTableRef::InvalidHashMap);
     }
   }
-  
+
   {
-    Rope frm(c_rope_pool, tablePtr.p->frmData);
+    LocalRope frm(c_rope_pool, tablePtr.p->frmData);
     tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen),
 	       CreateTableRef::OutOfStringBuffer);
-    Rope range(c_rope_pool, tablePtr.p->rangeData);
+    LocalRope range(c_rope_pool, tablePtr.p->rangeData);
     tabRequire(range.assign((const char*)c_tableDesc.RangeListData,
                c_tableDesc.RangeListDataLen),
 	      CreateTableRef::OutOfStringBuffer);
-    Rope fd(c_rope_pool, tablePtr.p->ngData);
+    LocalRope fd(c_rope_pool, tablePtr.p->ngData);
     tabRequire(fd.assign((const char*)c_tableDesc.FragmentData,
                          c_tableDesc.FragmentDataLen),
 	       CreateTableRef::OutOfStringBuffer);
   }
-  
+
   c_fragDataLen = c_tableDesc.FragmentDataLen;
   memcpy(c_fragData, c_tableDesc.FragmentData,
          c_tableDesc.FragmentDataLen);
@@ -5023,9 +5018,9 @@ void Dbdict::handleTabInfoInit(Signal *
     tablePtr.p->triggerId = RNIL;
   }
   tablePtr.p->buildTriggerId = RNIL;
-  
+
   handleTabInfo(it, parseP, c_tableDesc);
-  
+
   if(parseP->errorCode != 0)
   {
     /**
@@ -5048,7 +5043,7 @@ void Dbdict::handleTabInfoInit(Signal *
 }//handleTabInfoInit()
 
 void
-Dbdict::upgrade_seizeTrigger(Ptr<TableRecord> tabPtr,
+Dbdict::upgrade_seizeTrigger(TableRecordPtr tabPtr,
                              Uint32 insertTriggerId,
                              Uint32 updateTriggerId,
                              Uint32 deleteTriggerId)
@@ -5081,11 +5076,11 @@ Dbdict::upgrade_seizeTrigger(Ptr<TableRe
       BaseString::snprintf(buf, sizeof(buf),
                            "UPG_UPD_NDB$INDEX_%u_UI", tabPtr.i);
       {
-        Rope name(c_rope_pool, triggerPtr.p->triggerName);
+        LocalRope name(c_rope_pool, triggerPtr.p->triggerName);
         name.assign(buf);
       }
 
-      Ptr<DictObject> obj_ptr;
+      DictObjectPtr obj_ptr;
       bool ok = c_obj_hash.seize(obj_ptr);
       ndbrequire(ok);
       new (obj_ptr.p) DictObject();
@@ -5119,11 +5114,11 @@ Dbdict::upgrade_seizeTrigger(Ptr<TableRe
                            "UPG_DEL_NDB$INDEX_%u_UI", tabPtr.i);
 
       {
-        Rope name(c_rope_pool, triggerPtr.p->triggerName);
+        LocalRope name(c_rope_pool, triggerPtr.p->triggerName);
         name.assign(buf);
       }
 
-      Ptr<DictObject> obj_ptr;
+      DictObjectPtr obj_ptr;
       bool ok = c_obj_hash.seize(obj_ptr);
       ndbrequire(ok);
       new (obj_ptr.p) DictObject();
@@ -5144,9 +5139,9 @@ void Dbdict::handleTabInfo(SimplePropert
 			   DictTabInfo::Table &tableDesc)
 {
   TableRecordPtr tablePtr = parseP->tablePtr;
-  
+
   SimpleProperties::UnpackStatus status;
-  
+
   Uint32 keyCount = 0;
   Uint32 keyLength = 0;
   Uint32 attrCount = tablePtr.p->noOfAttributes;
@@ -5158,21 +5153,21 @@ void Dbdict::handleTabInfo(SimplePropert
   AttributeRecordPtr attrPtr;
   c_attributeRecordHash.removeAll();
 
-  LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool, 
+  LocalAttributeRecord_list list(c_attributeRecordPool,
 					tablePtr.p->m_attributes);
 
   Uint32 counts[] = {0,0,0,0,0};
-  
+
   for(Uint32 i = 0; i<attrCount; i++){
     /**
      * Attribute Name
      */
     DictTabInfo::Attribute attrDesc; attrDesc.init();
-    status = SimpleProperties::unpack(it, &attrDesc, 
-				      DictTabInfo::AttributeMapping, 
-				      DictTabInfo::AttributeMappingSize, 
+    status = SimpleProperties::unpack(it, &attrDesc,
+				      DictTabInfo::AttributeMapping,
+				      DictTabInfo::AttributeMappingSize,
 				      true, true);
-    
+
     if(status != SimpleProperties::Break){
       parseP->errorCode = CreateTableRef::InvalidFormat;
       parseP->status    = status;
@@ -5185,38 +5180,38 @@ void Dbdict::handleTabInfo(SimplePropert
      * Check that attribute is not defined twice
      */
     const Uint32 len = Uint32(strlen(attrDesc.AttributeName)+1);
-    const Uint32 name_hash = Rope::hash(attrDesc.AttributeName, len);
+    const Uint32 name_hash = LocalRope::hash(attrDesc.AttributeName, len);
     {
       AttributeRecord key;
       key.m_key.m_name_ptr = attrDesc.AttributeName;
       key.m_key.m_name_len = len;
       key.attributeName.m_hash = name_hash;
       key.m_key.m_pool = &c_rope_pool;
-      Ptr<AttributeRecord> old_ptr;
+      AttributeRecordPtr old_ptr;
       c_attributeRecordHash.find(old_ptr, key);
-      
+
       if(old_ptr.i != RNIL){
 	parseP->errorCode = CreateTableRef::AttributeNameTwice;
 	return;
       }
     }
-    
+
     list.seize(attrPtr);
     if(attrPtr.i == RNIL){
       jam();
       parseP->errorCode = CreateTableRef::NoMoreAttributeRecords;
       return;
     }
-    
+
     new (attrPtr.p) AttributeRecord();
     attrPtr.p->attributeDescriptor = 0x00012255; //Default value
     attrPtr.p->tupleKey = 0;
-    
+
     /**
      * TmpAttrib to Attribute mapping
      */
     {
-      Rope name(c_rope_pool, attrPtr.p->attributeName);
+      LocalRope name(c_rope_pool, attrPtr.p->attributeName);
       if (!name.assign(attrDesc.AttributeName, len, name_hash))
       {
 	jam();
@@ -5228,7 +5223,7 @@ void Dbdict::handleTabInfo(SimplePropert
     attrPtr.p->attributeId = i;
     //attrPtr.p->attributeId = attrDesc.AttributeId;
     attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag;
-    
+
     attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
     attrPtr.p->extScale = attrDesc.AttributeExtScale;
     attrPtr.p->extLength = attrDesc.AttributeExtLength;
@@ -5275,14 +5270,6 @@ void Dbdict::handleTabInfo(SimplePropert
       parseP->errorLine = __LINE__;
       return;
     }
-    
-    // XXX old test option, remove
-    if(!attrDesc.AttributeKeyFlag && 
-       tablePtr.i > 1 &&
-       !tablePtr.p->isIndex())
-    {
-      //attrDesc.AttributeStorageType= NDB_STORAGETYPE_DISK;
-    }
 
     Uint32 desc = 0;
     AttributeDescriptor::setType(desc, attrDesc.AttributeExtType);
@@ -5298,14 +5285,14 @@ void Dbdict::handleTabInfo(SimplePropert
     attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement;
     {
       char defaultValueBuf [MAX_ATTR_DEFAULT_VALUE_SIZE];
-      
+
       if (attrDesc.AttributeDefaultValueLen)
       {
         ndbrequire(attrDesc.AttributeDefaultValueLen >= sizeof(Uint32));
 
         memcpy(defaultValueBuf, attrDesc.AttributeDefaultValue,
                attrDesc.AttributeDefaultValueLen);
-        
+
         /* Table meta-info is normally stored in network byte order by
          * SimpleProperties.
          * For the default value, we convert as necessary here
@@ -5315,9 +5302,9 @@ void Dbdict::handleTabInfo(SimplePropert
         memcpy(&a, defaultValueBuf, sizeof(Uint32));
         a = ntohl(a);
         memcpy(defaultValueBuf, &a, sizeof(Uint32));
-        
+
         Uint32 remainBytes = attrDesc.AttributeDefaultValueLen - sizeof(Uint32);
-        
+
         if (remainBytes)
         {
           /* Convert attribute */
@@ -5330,26 +5317,26 @@ void Dbdict::handleTabInfo(SimplePropert
         }
       }
 
-      Rope defaultValue(c_rope_pool, attrPtr.p->defaultValue);
+      LocalRope defaultValue(c_rope_pool, attrPtr.p->defaultValue);
       defaultValue.assign(defaultValueBuf,
                           attrDesc.AttributeDefaultValueLen);
     }
-    
+
     keyCount += attrDesc.AttributeKeyFlag;
     nullCount += attrDesc.AttributeNullableFlag;
-    
+
     const Uint32 aSz = (1 << attrDesc.AttributeSize);
     Uint32 sz;
     if(aSz != 1)
     {
       sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
-    }    
+    }
     else
     {
       sz = 0;
-      nullBits += attrDesc.AttributeArraySize;      
+      nullBits += attrDesc.AttributeArraySize;
     }
-    
+
     if(attrDesc.AttributeArraySize == 0)
     {
       parseP->errorCode = CreateTableRef::InvalidArraySize;
@@ -5358,7 +5345,7 @@ void Dbdict::handleTabInfo(SimplePropert
       parseP->errorLine = __LINE__;
       return;
     }
-    
+
     recordLength += sz;
     if(attrDesc.AttributeKeyFlag){
       keyLength += sz;
@@ -5371,7 +5358,7 @@ void Dbdict::handleTabInfo(SimplePropert
 	return;
       }
     }
-    
+
     c_attributeRecordHash.add(attrPtr);
 
     int a= AttributeDescriptor::getDiskBased(desc);
@@ -5387,25 +5374,25 @@ void Dbdict::handleTabInfo(SimplePropert
       parseP->errorLine = __LINE__;
       return;
     }
-    
+
     if(!it.next())
       break;
-    
+
     if(it.getKey() != DictTabInfo::AttributeName)
       break;
   }//while
-  
+
   tablePtr.p->noOfPrimkey = keyCount;
   tablePtr.p->noOfNullAttr = nullCount;
   tablePtr.p->noOfCharsets = noOfCharsets;
   tablePtr.p->tupKeyLength = keyLength;
   tablePtr.p->noOfNullBits = nullCount + nullBits;
 
-  tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS, 
+  tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
 	     CreateTableRef::RecordTooBig);
-  tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS, 
+  tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS,
 	     CreateTableRef::InvalidPrimaryKeySize);
-  tabRequire(keyLength > 0, 
+  tabRequire(keyLength > 0,
 	     CreateTableRef::InvalidPrimaryKeySize);
   tabRequire(CHECK_SUMA_MESSAGE_SIZE(keyCount, keyLength, attrCount, recordLength),
              CreateTableRef::RecordTooBig);
@@ -5438,12 +5425,12 @@ void Dbdict::handleTabInfo(SimplePropert
     {
       tabRequire(false, CreateTableRef::InvalidTablespace);
     }
-    
+
     if(tablespacePtr.p->m_type != DictTabInfo::Tablespace)
     {
       tabRequire(false, CreateTableRef::NotATablespace);
     }
-    
+
     if(tablespacePtr.p->m_version != tableDesc.TablespaceVersion)
     {
       tabRequire(false, CreateTableRef::InvalidTablespaceVersion);
@@ -5504,7 +5491,7 @@ Dbdict::wait_gcp(Signal* signal, SchemaO
              WaitGCPReq::SignalLength, JBB);
 }
 
-void Dbdict::execWAIT_GCP_CONF(Signal* signal) 
+void Dbdict::execWAIT_GCP_CONF(Signal* signal)
 {
   WaitGCPConf* conf = (WaitGCPConf*)signal->getDataPtr();
   handleDictConf(signal, conf);
@@ -5513,7 +5500,7 @@ void Dbdict::execWAIT_GCP_CONF(Signal* s
 /* ---------------------------------------------------------------- */
 // Refused new global checkpoint.
 /* ---------------------------------------------------------------- */
-void Dbdict::execWAIT_GCP_REF(Signal* signal) 
+void Dbdict::execWAIT_GCP_REF(Signal* signal)
 {
   jamEntry();
   WaitGCPRef* ref = (WaitGCPRef*)signal->getDataPtr();
@@ -5669,7 +5656,7 @@ Dbdict::create_fragmentation(Signal* sig
   if (tabPtr.p->hashMapObjectId != RNIL)
   {
     jam();
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tabPtr.p->hashMapObjectId));
     frag_req->map_ptr_i = hm_ptr.p->m_map_ptr_i;
   }
@@ -5766,8 +5753,8 @@ Dbdict::createTable_parse(Signal* signal
 
     if (parseRecord.errorCode == 0)
     {
-      if (ERROR_INSERTED(6200) || 
-          (ERROR_INSERTED(6201) && 
+      if (ERROR_INSERTED(6200) ||
+          (ERROR_INSERTED(6201) &&
            DictTabInfo::isIndex(parseRecord.tablePtr.p->tableType)))
       {
         jam();
@@ -5776,7 +5763,7 @@ Dbdict::createTable_parse(Signal* signal
       }
     }
 
-    if (parseRecord.errorCode != 0) 
+    if (parseRecord.errorCode != 0)
     {
       jam();
       if (!parseRecord.tablePtr.isNull())
@@ -5809,8 +5796,8 @@ Dbdict::createTable_parse(Signal* signal
     impl_req->tableId = tabPtr.i;
     impl_req->tableVersion = tabPtr.p->tableVersion;
 
-    if (ERROR_INSERTED(6202) || 
-        (ERROR_INSERTED(6203) && 
+    if (ERROR_INSERTED(6202) ||
+        (ERROR_INSERTED(6203) &&
          DictTabInfo::isIndex(parseRecord.tablePtr.p->tableType)))
     {
       jam();
@@ -6162,8 +6149,8 @@ Dbdict::createTab_local(Signal* signal,
     }
 
     Uint32 key = 0;
-    Ptr<AttributeRecord> attrPtr;
-    LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool,
+    AttributeRecordPtr attrPtr;
+    LocalAttributeRecord_list list(c_attributeRecordPool,
                                           tabPtr.p->m_attributes);
     for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr))
     {
@@ -6301,7 +6288,7 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal
       {
         jam();
         AttributeDescriptor::clearArrayType(entry.attrDescriptor);
-        AttributeDescriptor::setArrayType(entry.attrDescriptor, 
+        AttributeDescriptor::setArrayType(entry.attrDescriptor,
                                           NDB_ARRAYTYPE_NONE_VAR);
       }
     }
@@ -6400,7 +6387,7 @@ Dbdict::createTab_dih(Signal* signal, Sc
 
   if (tabPtr.p->hashMapObjectId != RNIL)
   {
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tabPtr.p->hashMapObjectId));
     req->hashMapPtrI = hm_ptr.p->m_map_ptr_i;
   }
@@ -6580,7 +6567,7 @@ Dbdict::execLQHFRAGCONF(Signal * signal)
   {
     jam();
     SchemaOpPtr op_ptr;
-    Ptr<TableRecord> tabPtr;
+    TableRecordPtr tabPtr;
     c_tableRecordPool.getPtr(tabPtr, tableId);
     if (DictTabInfo::isTable(tabPtr.p->tableType))
     {
@@ -6633,7 +6620,7 @@ Dbdict::execLQHFRAGREF(Signal * signal)
   {
     jam();
     SchemaOpPtr op_ptr;
-    Ptr<TableRecord> tabPtr;
+    TableRecordPtr tabPtr;
     c_tableRecordPool.getPtr(tabPtr, tableId);
     if (DictTabInfo::isTable(tabPtr.p->tableType))
     {
@@ -6832,10 +6819,10 @@ Dbdict::createTable_commit(Signal* signa
 
   if (DictTabInfo::isIndex(tabPtr.p->tableType))
   {
-    Ptr<TableRecord> basePtr;
+    TableRecordPtr basePtr;
     c_tableRecordPool.getPtr(basePtr, tabPtr.p->primaryTableId);
 
-    LocalDLFifoList<TableRecord> list(c_tableRecordPool, basePtr.p->m_indexes);
+    LocalTableRecord_list list(c_tableRecordPool, basePtr.p->m_indexes);
     list.add(tabPtr);
   }
 }
@@ -7075,7 +7062,7 @@ void Dbdict::execCREATE_TABLE_REF(Signal
   handleDictRef(signal, ref);
 }
 
-void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) 
+void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
 {
   TableRecordPtr tablePtr;
   c_tableRecordPool.getPtr(tablePtr, tableId);
@@ -7087,31 +7074,31 @@ void Dbdict::releaseTableObject(Uint32 t
   }
   else
   {
-    Rope tmp(c_rope_pool, tablePtr.p->tableName);
+    LocalRope tmp(c_rope_pool, tablePtr.p->tableName);
     tmp.erase();
   }
-  
+
   {
-    Rope tmp(c_rope_pool, tablePtr.p->frmData);
+    LocalRope tmp(c_rope_pool, tablePtr.p->frmData);
     tmp.erase();
   }
 
   {
-    Rope tmp(c_rope_pool, tablePtr.p->ngData);
+    LocalRope tmp(c_rope_pool, tablePtr.p->ngData);
     tmp.erase();
   }
 
   {
-    Rope tmp(c_rope_pool, tablePtr.p->rangeData);
+    LocalRope tmp(c_rope_pool, tablePtr.p->rangeData);
     tmp.erase();
   }
 
-  LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool, 
+  LocalAttributeRecord_list list(c_attributeRecordPool,
 					tablePtr.p->m_attributes);
   AttributeRecordPtr attrPtr;
   for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)){
-    Rope name(c_rope_pool, attrPtr.p->attributeName);
-    Rope def(c_rope_pool, attrPtr.p->defaultValue);
+    LocalRope name(c_rope_pool, attrPtr.p->attributeName);
+    LocalRope def(c_rope_pool, attrPtr.p->defaultValue);
     name.erase();
     def.erase();
   }
@@ -7443,7 +7430,7 @@ Dbdict::dropTable_commit(Signal* signal,
   // from a newer execDROP_TAB_REQ version
   {
     char buf[1024];
-    Rope name(c_rope_pool, tablePtr.p->tableName);
+    LocalRope name(c_rope_pool, tablePtr.p->tableName);
     name.copy(buf);
     g_eventLogger->info("Dbdict: drop name=%s,id=%u,obj_id=%u", buf, tablePtr.i,
                         tablePtr.p->m_obj_ptr_i);
@@ -7457,10 +7444,10 @@ Dbdict::dropTable_commit(Signal* signal,
 
   if (DictTabInfo::isIndex(tablePtr.p->tableType))
   {
-    Ptr<TableRecord> basePtr;
+    TableRecordPtr basePtr;
     c_tableRecordPool.getPtr(basePtr, tablePtr.p->primaryTableId);
 
-    LocalDLFifoList<TableRecord> list(c_tableRecordPool, basePtr.p->m_indexes);
+    LocalTableRecord_list list(c_tableRecordPool, basePtr.p->m_indexes);
     list.remove(tablePtr);
   }
   dropTabPtr.p->m_block = 0;
@@ -7509,7 +7496,7 @@ Dbdict::dropTable_commit_nextStep(Signal
                PrepDropTabRef::SignalLength, JBB);
     return;
   }
- 
+
 
   PrepDropTabReq* prep = (PrepDropTabReq*)signal->getDataPtrSend();
   prep->senderRef = reference();
@@ -7815,11 +7802,11 @@ Dbdict::alterTable_release(SchemaOpPtr o
   AlterTableRecPtr alterTabPtr;
   getOpRec(op_ptr, alterTabPtr);
   {
-    Rope r(c_rope_pool, alterTabPtr.p->m_oldTableName);
+    LocalRope r(c_rope_pool, alterTabPtr.p->m_oldTableName);
     r.erase();
   }
   {
-    Rope r(c_rope_pool, alterTabPtr.p->m_oldFrmData);
+    LocalRope r(c_rope_pool, alterTabPtr.p->m_oldFrmData);
     r.erase();
   }
   LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
@@ -8132,7 +8119,7 @@ Dbdict::alterTable_parse(Signal* signal,
       return;
     }
 
-    LocalDLFifoList<AttributeRecord>
+    LocalAttributeRecord_list
       list(c_attributeRecordPool, newTablePtr.p->m_attributes);
     AttributeRecordPtr attrPtr;
     list.first(attrPtr);
@@ -8341,10 +8328,10 @@ Dbdict::check_supported_reorg(Uint32 org
     return 0;
   }
 
-  HashMapPtr orgmap_ptr;
+  HashMapRecordPtr orgmap_ptr;
   ndbrequire(c_hash_map_hash.find(orgmap_ptr, org_map_id));
 
-  HashMapPtr newmap_ptr;
+  HashMapRecordPtr newmap_ptr;
   ndbrequire(c_hash_map_hash.find(newmap_ptr, new_map_id));
 
   Ptr<Hash2FragmentMap> orgptr;
@@ -8443,7 +8430,7 @@ Dbdict::alterTable_subOps(Signal* signal
       TableRecordPtr tabPtr;
       TableRecordPtr indexPtr;
       c_tableRecordPool.getPtr(tabPtr, impl_req->tableId);
-      LocalDLFifoList<TableRecord> list(c_tableRecordPool, tabPtr.p->m_indexes);
+      LocalTableRecord_list list(c_tableRecordPool, tabPtr.p->m_indexes);
       Uint32 ptrI = alterTabPtr.p->m_sub_add_frag_index_ptr;
 
       if (ptrI == RNIL)
@@ -8905,7 +8892,7 @@ Dbdict::alterTable_prepare(Signal* signa
      * Get DIH connectPtr for future commit
      */
     {
-      Ptr<SchemaOp> tmp = op_ptr;
+      SchemaOpPtr tmp = op_ptr;
       LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       for (list.prev(tmp); !tmp.isNull(); list.prev(tmp))
       {
@@ -9084,7 +9071,7 @@ Dbdict::alterTable_toLocal(Signal* signa
     if (AlterTableReq::getReorgFragFlag(req->changeMask))
     {
       jam();
-      HashMapPtr hm_ptr;
+      HashMapRecordPtr hm_ptr;
       ndbrequire(c_hash_map_hash.find(hm_ptr,
                                       alterTabPtr.p->m_newTablePtr.p->hashMapObjectId));
       req->new_map_ptr_i = hm_ptr.p->m_map_ptr_i;
@@ -9186,7 +9173,7 @@ Dbdict::alterTable_commit(Signal* signal
         << " old=" << copyRope<sz>(tablePtr.p->tableName)
         << " new=" << copyRope<sz>(newTablePtr.p->tableName));
 
-      Ptr<DictObject> obj_ptr;
+      DictObjectPtr obj_ptr;
       c_obj_pool.getPtr(obj_ptr, tablePtr.p->m_obj_ptr_i);
 
       // remove old name from hash
@@ -9219,9 +9206,9 @@ Dbdict::alterTable_commit(Signal* signal
       jam();
 
       /* Move the column definitions to the real table definitions. */
-      LocalDLFifoList<AttributeRecord>
+      LocalAttributeRecord_list
         list(c_attributeRecordPool, tablePtr.p->m_attributes);
-      LocalDLFifoList<AttributeRecord>
+      LocalAttributeRecord_list
         newlist(c_attributeRecordPool, newTablePtr.p->m_attributes);
 
       const Uint32 noOfNewAttr = impl_req->noOfNewAttr;
@@ -9438,7 +9425,7 @@ Dbdict::alterTable_fromCommitComplete(Si
     sendSignal(SUMA_REF, GSN_ALTER_TAB_REQ, signal,
                AlterTabReq::SignalLength, JBB, &handle);
   }
-  
+
   // older way to notify  wl3600_todo disable to find SUMA problems
   {
     ApiBroadcastRep* api= (ApiBroadcastRep*)signal->getDataPtrSend();
@@ -9752,10 +9739,10 @@ Dbdict::execALTER_TABLE_REF(Signal* sign
 /* ---------------------------------------------------------------- */
 /* **************************************************************** */
 
-void Dbdict::execGET_TABLEDID_REQ(Signal * signal) 
+void Dbdict::execGET_TABLEDID_REQ(Signal * signal)
 {
   jamEntry();
-  ndbrequire(signal->getNoOfSections() == 1);  
+  ndbrequire(signal->getNoOfSections() == 1);
   GetTableIdReq const * req = (GetTableIdReq *)signal->getDataPtr();
   Uint32 senderData = req->senderData;
   Uint32 senderRef = req->senderRef;
@@ -9764,8 +9751,8 @@ void Dbdict::execGET_TABLEDID_REQ(Signal
   if(len>PATH_MAX)
   {
     jam();
-    sendGET_TABLEID_REF((Signal*)signal, 
-			(GetTableIdReq *)req, 
+    sendGET_TABLEID_REF((Signal*)signal,
+			(GetTableIdReq *)req,
 			GetTableIdRef::TableNameTooLong);
     return;
   }
@@ -9776,31 +9763,31 @@ void Dbdict::execGET_TABLEDID_REQ(Signal
   handle.getSection(ssPtr,GetTableIdReq::TABLE_NAME);
   copy((Uint32*)tableName, ssPtr);
   releaseSections(handle);
-    
+
   DictObject * obj_ptr_p = get_object(tableName, len);
   if(obj_ptr_p == 0 || !DictTabInfo::isTable(obj_ptr_p->m_type)){
     jam();
-    sendGET_TABLEID_REF(signal, 
-			(GetTableIdReq *)req, 
+    sendGET_TABLEID_REF(signal,
+			(GetTableIdReq *)req,
 			GetTableIdRef::TableNotDefined);
     return;
   }
 
   TableRecordPtr tablePtr;
-  c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id); 
-  
+  c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id);
+
   GetTableIdConf * conf = (GetTableIdConf *)req;
   conf->tableId = tablePtr.p->tableId;
   conf->schemaVersion = tablePtr.p->tableVersion;
   conf->senderData = senderData;
   sendSignal(senderRef, GSN_GET_TABLEID_CONF, signal,
-	     GetTableIdConf::SignalLength, JBB);  
+	     GetTableIdConf::SignalLength, JBB);
 }
 
 
-void Dbdict::sendGET_TABLEID_REF(Signal* signal, 
+void Dbdict::sendGET_TABLEID_REF(Signal* signal,
 				 GetTableIdReq * req,
-				 GetTableIdRef::ErrorCode errorCode) 
+				 GetTableIdRef::ErrorCode errorCode)
 {
   GetTableIdRef * const ref = (GetTableIdRef *)req;
   /**
@@ -9808,20 +9795,20 @@ void Dbdict::sendGET_TABLEID_REF(Signal*
    */
   BlockReference retRef = req->senderRef;
   ref->err = errorCode;
-  sendSignal(retRef, GSN_GET_TABLEID_REF, signal, 
+  sendSignal(retRef, GSN_GET_TABLEID_REF, signal,
 	     GetTableIdRef::SignalLength, JBB);
 }
 
 /* ---------------------------------------------------------------- */
 // Get a full table description.
 /* ---------------------------------------------------------------- */
-void Dbdict::execGET_TABINFOREQ(Signal* signal) 
+void Dbdict::execGET_TABINFOREQ(Signal* signal)
 {
   jamEntry();
-  if(!assembleFragments(signal)) 
-  { 
+  if(!assembleFragments(signal))
+  {
     return;
-  }  
+  }
 
   GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
   SectionHandle handle(this, signal);
@@ -9831,26 +9818,26 @@ void Dbdict::execGET_TABINFOREQ(Signal*
    * it's is a one from the time queue
    */
   bool fromTimeQueue = (signal->senderBlockRef() == reference());
-  
+
   if (c_retrieveRecord.busyState && fromTimeQueue == true) {
     jam();
-    
-    sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30, 
+
+    sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
 			signal->length(),
 			&handle);
     return;
   }//if
 
   const Uint32 MAX_WAITERS = 5;
-  
+
   if(c_retrieveRecord.busyState && fromTimeQueue == false)
   {
     jam();
     if(c_retrieveRecord.noOfWaiters < MAX_WAITERS){
       jam();
       c_retrieveRecord.noOfWaiters++;
-      
-      sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30, 
+
+      sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
 			  signal->length(),
 			  &handle);
       return;
@@ -9859,11 +9846,11 @@ void Dbdict::execGET_TABINFOREQ(Signal*
     sendGET_TABINFOREF(signal, req, GetTabInfoRef::Busy, __LINE__);
     return;
   }
-  
+
   if(fromTimeQueue){
     jam();
     c_retrieveRecord.noOfWaiters--;
-  } 
+  }
 
   const bool useLongSig = (req->requestType & GetTabInfoReq::LongSignalConf);
   const bool byName = (req->requestType & GetTabInfoReq::RequestByName);
@@ -9874,7 +9861,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
     jam();
     ndbrequire(handle.m_cnt == 1);
     const Uint32 len = req->tableNameLen;
-    
+
     if(len > PATH_MAX){
       jam();
       releaseSections(handle);
@@ -9886,7 +9873,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
     SegmentedSectionPtr ssPtr;
     handle.getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
     copy(tableName, ssPtr);
-    
+
     DictObject * old_ptr_p = get_object((char*)tableName, len);
     if(old_ptr_p)
       obj_id = old_ptr_p->m_id;
@@ -9900,7 +9887,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
   if(obj_id != RNIL)
   {
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
-    objEntry = getTableEntry(xsf, obj_id);      
+    objEntry = getTableEntry(xsf, obj_id);
   }
 
   // The table seached for was not found
@@ -10012,7 +9999,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
   sendSignal(reference(), GSN_CONTINUEB, signal, len, JBB);
 }//execGET_TABINFOREQ()
 
-void Dbdict::sendGetTabResponse(Signal* signal) 
+void Dbdict::sendGetTabResponse(Signal* signal)
 {
   PageRecordPtr pagePtr;
   DictTabInfo * const conf = (DictTabInfo *)&signal->theData[0];
@@ -10023,7 +10010,7 @@ void Dbdict::sendGetTabResponse(Signal*
 
   c_pageRecordArray.getPtr(pagePtr, c_retrieveRecord.retrievePage);
   Uint32* pagePointer = (Uint32*)&pagePtr.p->word[0] + ZPAGE_HEADER_SIZE;
-  
+
   if(c_retrieveRecord.m_useLongSig){
     jam();
     GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
@@ -10039,7 +10026,7 @@ void Dbdict::sendGetTabResponse(Signal*
     ptr[0].sz = c_retrieveRecord.retrievedNoOfWords;
     sendFragmentedSignal(c_retrieveRecord.blockRef,
 			 GSN_GET_TABINFO_CONF,
-			 signal, 
+			 signal,
 			 GetTabInfoConf::SignalLength,
 			 JBB,
 			 ptr,
@@ -10051,7 +10038,7 @@ void Dbdict::sendGetTabResponse(Signal*
   ndbrequire(false);
 }//sendGetTabResponse()
 
-void Dbdict::sendGET_TABINFOREF(Signal* signal, 
+void Dbdict::sendGET_TABINFOREF(Signal* signal,
 				GetTabInfoReq * req,
 				GetTabInfoRef::ErrorCode errorCode,
                                 Uint32 errorLine)
@@ -10067,7 +10054,7 @@ void Dbdict::sendGET_TABINFOREF(Signal*
   ref->schemaTransId = req_copy.schemaTransId;
   ref->errorCode = (Uint32)errorCode;
   ref->errorLine = errorLine;
-  
+
   BlockReference retRef = req_copy.senderRef;
   sendSignal(retRef, GSN_GET_TABINFOREF, signal,
              GetTabInfoRef::SignalLength, JBB);
@@ -10104,7 +10091,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
   conf->counter = 0;
   Uint32 pos = 0;
 
-  DLHashTable<DictObject>::Iterator iter;
+  DictObject_hash::Iterator iter;
   bool ok = c_obj_hash.first(iter);
   for(; ok; ok = c_obj_hash.next(iter)){
     Uint32 type = iter.curr.p->m_type;
@@ -10120,7 +10107,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
 
       if(reqListIndexes && (reqTableId != tablePtr.p->primaryTableId))
 	continue;
-      
+
       conf->tableData[pos] = 0;
       conf->setTableId(pos, tablePtr.i); // id
       conf->setTableType(pos, type); // type
@@ -10132,7 +10119,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
         switch(te->m_tableState){
         case SchemaFile::SF_CREATE:
           jam();
-          conf->setTableState(pos, DictTabInfo::StateBuilding);          
+          conf->setTableState(pos, DictTabInfo::StateBuilding);
           break;
         case SchemaFile::SF_ALTER:
           jam();
@@ -10234,7 +10221,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
       conf->setTableState(pos, DictTabInfo::StateOnline); // XXX todo
       pos++;
     }
-    
+
     if (pos >= OldListTablesConf::DataLength) {
       sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
 		 OldListTablesConf::SignalLength, JBB);
@@ -10244,8 +10231,8 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
 
     if (! reqListNames)
       continue;
-    
-    Rope name(c_rope_pool, iter.curr.p->m_name);
+
+    LocalRope name(c_rope_pool, iter.curr.p->m_name);
     const Uint32 size = name.size();
     conf->tableData[pos] = size;
     pos++;
@@ -10292,7 +10279,7 @@ void Dbdict::sendLIST_TABLES_CONF(Signal
   XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
   NodeReceiverGroup rg(senderRef);
 
-  DLHashTable<DictObject>::Iterator iter;
+  DictObject_hash::Iterator iter;
   bool done = !c_obj_hash.first(iter);
 
   if (done)
@@ -10465,7 +10452,7 @@ void Dbdict::sendLIST_TABLES_CONF(Signal
     if (reqListNames)
     {
       jam();
-      Rope name(c_rope_pool, iter.curr.p->m_name);
+      LocalRope name(c_rope_pool, iter.curr.p->m_name);
       const Uint32 size = name.size(); // String length including \0
       const Uint32 wsize = (size + 3) / 4;
       tableNamesWriter.putWord(size);
@@ -10844,7 +10831,7 @@ Dbdict::createIndex_parse(Signal* signal
 
       // find the attribute
       {
-        LocalDLFifoList<AttributeRecord>
+        LocalAttributeRecord_list
           list(c_attributeRecordPool, tablePtr.p->m_attributes);
         list.first(attrPtr);
         while (!attrPtr.isNull()) {
@@ -11002,7 +10989,7 @@ Dbdict::createIndex_toCreateTable(Signal
   }
   w.add(DictTabInfo::FragmentTypeVal, createIndexPtr.p->m_fragmentType);
   w.add(DictTabInfo::TableTypeVal, createIndexPtr.p->m_request.indexType);
-  { Rope name(c_rope_pool, tablePtr.p->tableName);
+  { LocalRope name(c_rope_pool, tablePtr.p->tableName);
     char tableName[MAX_TAB_NAME_SIZE];
     name.copy(tableName);
     w.add(DictTabInfo::PrimaryTable, tableName);
@@ -11027,7 +11014,7 @@ Dbdict::createIndex_toCreateTable(Signal
     AttributeRecordPtr attrPtr;
     c_attributeRecordPool.getPtr(attrPtr, attrMap[k].attr_ptr_i);
 
-    { Rope attrName(c_rope_pool, attrPtr.p->attributeName);
+    { LocalRope attrName(c_rope_pool, attrPtr.p->attributeName);
       char attributeName[MAX_ATTR_NAME_SIZE];
       attrName.copy(attributeName);
       w.add(DictTabInfo::AttributeName, attributeName);
@@ -11061,7 +11048,7 @@ Dbdict::createIndex_toCreateTable(Signal
     jam();
     Uint32 key_type = NDB_ARRAYTYPE_FIXED;
     AttributeRecordPtr attrPtr;
-    LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool,
+    LocalAttributeRecord_list list(c_attributeRecordPool,
                                           tablePtr.p->m_attributes);
     // XXX move to parse
     for (list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr))
@@ -11075,7 +11062,7 @@ Dbdict::createIndex_toCreateTable(Signal
         {
           jam();
           /**
-           * We can only set this new array type "globally" if 
+           * We can only set this new array type "globally" if
            *   version >= X, this to allow down-grade(s) within minor versions
            *   if unique index has been added in newer version
            *
@@ -12046,7 +12033,7 @@ Dbdict::alterIndex_parse(Signal* signal,
        *       or if prev op is AlterIndex using baseop.p->m_base_op_ptr_i
        *   (i.e recursivly, assuming that no operation can come inbetween)
        */
-      Ptr<SchemaOp> baseop = op_ptr;
+      SchemaOpPtr baseop = op_ptr;
       LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       ndbrequire(list.prev(baseop));
       Uint32 sz = sizeof(baseop.p->m_oprec_ptr.p->m_opType);
@@ -12783,9 +12770,9 @@ Dbdict::alterIndex_fromAddPartitions(Sig
 
     const AlterTabConf* conf =
       (const AlterTabConf*)signal->getDataPtr();
-    
+
     alterIndexPtr.p->m_dihAddFragPtr = conf->connectPtr;
-    
+
     sendTransConf(signal, op_ptr);
   } else {
     jam();
@@ -13732,7 +13719,7 @@ Dbdict::buildIndex_complete(Signal* sign
   jam();
   sendTransConf(signal, op_ptr);
 }
-  
+
 // BuildIndex: ABORT
 
 void
@@ -14707,7 +14694,7 @@ Dbdict::copyData_prepare(Signal* signal,
   TableRecordPtr tabPtr;
   c_tableRecordPool.getPtr(tabPtr, impl_req->srcTableId);
   {
-    LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+    LocalAttributeRecord_list alist(c_attributeRecordPool,
                                            tabPtr.p->m_attributes);
     AttributeRecordPtr attrPtr;
     for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr))
@@ -14726,14 +14713,14 @@ Dbdict::copyData_prepare(Signal* signal,
       }
     }
   }
-  
+
   /* Request Tup-ordered copy when we have disk columns for efficiency */
   if (tabHasDiskCols)
   {
     jam();
     req->requestInfo |= CopyDataReq::TupOrder;
   }
-  
+
   LinearSectionPtr ls_ptr[3];
   ls_ptr[0].sz = cnt;
   ls_ptr[0].p = tmp;
@@ -14755,7 +14742,7 @@ Dbdict::copyData_fromLocal(Signal* signa
     CLEAR_ERROR_INSERT_VALUE;
     ret = 1;
   }
-  
+
   if (ret == 0) {
     jam();
     sendTransConf(signal, op_ptr);
@@ -14808,7 +14795,7 @@ Dbdict::copyData_complete(Signal* signal
   TableRecordPtr tabPtr;
   c_tableRecordPool.getPtr(tabPtr, impl_req->srcTableId);
   {
-    LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+    LocalAttributeRecord_list alist(c_attributeRecordPool,
                                            tabPtr.p->m_attributes);
     AttributeRecordPtr attrPtr;
     for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr))
@@ -14903,10 +14890,10 @@ Dbdict::execCOPY_DATA_IMPL_REF(Signal* s
 
 int
 Dbdict::sendSignalUtilReq(Callback *pcallback,
-			  BlockReference ref, 
-			  GlobalSignalNumber gsn, 
-			  Signal* signal, 
-			  Uint32 length, 
+			  BlockReference ref,
+			  GlobalSignalNumber gsn,
+			  Signal* signal,
+			  Uint32 length,
 			  JobBufferLevel jbuf,
 			  LinearSectionPtr ptr[3],
 			  Uint32 noOfSections)
@@ -14970,7 +14957,7 @@ void
 Dbdict::execUTIL_PREPARE_REF(Signal *signal)
 {
   jamEntry();
-  const UtilPrepareRef * ref = CAST_CONSTPTR(UtilPrepareRef, 
+  const UtilPrepareRef * ref = CAST_CONSTPTR(UtilPrepareRef,
                                              signal->getDataPtr());
   Uint32 code = ref->errorCode;
   if (code == UtilPrepareRef::DICT_TAB_INFO_ERROR)
@@ -14999,7 +14986,7 @@ void Dbdict::execUTIL_EXECUTE_REF(Signal
   ndbout_c("errorCode %u",ref->getErrorCode());
   ndbout_c("TCErrorCode %u",ref->getTCErrorCode());
 #endif
-  
+
   ndbrequire(recvSignalUtilReq(signal, 1) == 0);
 }
 void Dbdict::execUTIL_RELEASE_CONF(Signal *signal)
@@ -15021,7 +15008,7 @@ void Dbdict::execUTIL_RELEASE_REF(Signal
  * MODULE: Create event
  *
  * Create event in DICT.
- * 
+ *
  *
  * Request type in CREATE_EVNT signals:
  *
@@ -15079,7 +15066,7 @@ Dbdict::prepareTransactionEventSysTable
   TableRecordPtr tablePtr;
   c_tableRecordPool.getPtr(tablePtr, opj_ptr_p->m_id);
   ndbrequire(tablePtr.i != RNIL); // system table must exist
-  
+
   Uint32 tableId = tablePtr.p->tableId; /* System table */
   Uint32 noAttr = tablePtr.p->noOfAttributes;
   if (noAttr > EVENT_SYSTEM_TABLE_LENGTH)
@@ -15118,9 +15105,9 @@ Dbdict::prepareUtilTransaction(Callback
   jam();
   EVENT_TRACE;
 
-  UtilPrepareReq * utilPrepareReq = 
+  UtilPrepareReq * utilPrepareReq =
     (UtilPrepareReq *)signal->getDataPtrSend();
-  
+
   utilPrepareReq->setSenderRef(reference());
   utilPrepareReq->setSenderData(senderData);
 
@@ -15162,7 +15149,7 @@ Dbdict::prepareUtilTransaction(Callback
   sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed();
 
   sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal,
-		    UtilPrepareReq::SignalLength, JBB, 
+		    UtilPrepareReq::SignalLength, JBB,
 		    sectionsPtr, UtilPrepareReq::NoOfSections);
 }
 
@@ -15278,7 +15265,7 @@ Dbdict::execCREATE_EVNT_REQ(Signal* sign
   {
     jam();
     releaseSections(handle);
-    
+
     CreateEvntRef * ref = (CreateEvntRef *)signal->getDataPtrSend();
     ref->setUserRef(reference());
     ref->setErrorCode(CreateEvntRef::NotMaster);
@@ -15310,7 +15297,7 @@ Dbdict::execCREATE_EVNT_REQ(Signal* sign
 #ifdef EVENT_DEBUG
   ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i);
 #endif
-  
+
   ndbrequire(req->getUserRef() == signal->getSendersBlockRef());
 
   evntRecPtr.p->init(req,this);
@@ -15349,11 +15336,11 @@ Dbdict::execCREATE_EVNT_REQ(Signal* sign
 #endif
   jam();
   releaseSections(handle);
-    
+
   evntRecPtr.p->m_errorCode = 1;
   evntRecPtr.p->m_errorLine = __LINE__;
   evntRecPtr.p->m_errorNode = reference();
-  
+
   createEvent_sendReply(signal, evntRecPtr);
 }
 
@@ -15375,7 +15362,7 @@ Dbdict::createEvent_RT_USER_CREATE(Signa
 #ifdef EVENT_DEBUG
   ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl;
   char buf[128] = {0};
-  AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask(); 
+  AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask();
   mask.getText(buf);
   ndbout_c("mask = %s", buf);
 #endif
@@ -15421,12 +15408,12 @@ Dbdict::createEvent_RT_USER_CREATE(Signa
       (r0.getValueType() != SimpleProperties::StringValue) ||
       (r0.getValueLen() <= 0)) {
     jam();
-    
+
     evntRecPtr.p->m_errorCode = 1;
 sendref:
     evntRecPtr.p->m_errorLine = __LINE__;
     evntRecPtr.p->m_errorNode = reference();
-    
+
     releaseSections(handle);
 
     createEvent_sendReply(signal, evntRecPtr);
@@ -15473,19 +15460,19 @@ sendref:
   }
 
   releaseSections(handle);
-  
+
   // Send request to SUMA
 
   CreateSubscriptionIdReq * sumaIdReq =
     (CreateSubscriptionIdReq *)signal->getDataPtrSend();
-  
+
   // make sure we save the original sender for later
   sumaIdReq->senderRef  = reference();
   sumaIdReq->senderData = evntRecPtr.i;
 #ifdef EVENT_DEBUG
   ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl;
 #endif
-  sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal, 
+  sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal,
 	     CreateSubscriptionIdReq::SignalLength, JBB);
   // we should now return in either execCREATE_SUBID_CONF
   // or execCREATE_SUBID_REF
@@ -15601,7 +15588,7 @@ void interpretUtilPrepareErrorCode(UtilP
   DBUG_VOID_RETURN;
 }
 
-void 
+void
 Dbdict::createEventUTIL_PREPARE(Signal* signal,
 				Uint32 callbackData,
 				Uint32 returnCode)
@@ -15614,9 +15601,9 @@ Dbdict::createEventUTIL_PREPARE(Signal*
     jam();
     evntRecPtr.i = req->getSenderData();
     const Uint32 prepareId = req->getPrepareId();
-    
+
     ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-    
+
     Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 };
 
     switch (evntRecPtr.p->m_requestType) {
@@ -15640,7 +15627,7 @@ Dbdict::createEventUTIL_PREPARE(Signal*
 		    evntRecPtr.p->m_eventRec.TABLE_NAME,
 		    evntRecPtr.p->m_eventRec.TABLEID,
 		    evntRecPtr.p->m_eventRec.TABLEVERSION));
-  
+
       }
       jam();
       executeTransEventSysTable(&c, signal,
@@ -15784,13 +15771,13 @@ void Dbdict::executeTransEventSysTable(C
   default:
     ndbrequire(false);
   }
-    
+
   LinearSectionPtr headerPtr;
   LinearSectionPtr lsdataPtr;
-    
+
   headerPtr.p = attrHdr;
   headerPtr.sz = id;
-    
+
   lsdataPtr.p = attrHdr + noAttr;
   lsdataPtr.sz = total_len/4;
 
@@ -15799,14 +15786,14 @@ void Dbdict::executeTransEventSysTable(C
     for(int i = 0; i < (int)headerPtr.sz; i++)
       printf("H'%.8x ", attrHdr[i]);
     printf("\n");
-    
+
     printf("Data size %u\n", lsdataPtr.sz);
     for(int i = 0; i < (int)lsdataPtr.sz; i++)
       printf("H'%.8x ", dataPage[i]);
     printf("\n");
 #endif
 
-  executeTransaction(pcallback, signal, 
+  executeTransaction(pcallback, signal,
 		     ptrI,
 		     prepareId,
 		     id,
@@ -15815,7 +15802,7 @@ void Dbdict::executeTransEventSysTable(C
 }
 
 void Dbdict::executeTransaction(Callback *pcallback,
-				Signal* signal, 
+				Signal* signal,
 				Uint32 senderData,
 				Uint32 prepareId,
 				Uint32 noAttr,
@@ -15825,7 +15812,7 @@ void Dbdict::executeTransaction(Callback
   jam();
   EVENT_TRACE;
 
-  UtilExecuteReq * utilExecuteReq = 
+  UtilExecuteReq * utilExecuteReq =
     (UtilExecuteReq *)signal->getDataPtrSend();
 
   utilExecuteReq->setSenderRef(reference());
@@ -15838,7 +15825,7 @@ void Dbdict::executeTransaction(Callback
   for(int i = 0; i < (int)headerPtr.sz; i++)
     printf("H'%.8x ", headerBuffer[i]);
   printf("\n");
-  
+
   printf("Data size %u\n", dataPtr.sz);
   for(int i = 0; i < (int)dataPtr.sz; i++)
     printf("H'%.8x ", dataBuffer[i]);
@@ -15864,10 +15851,10 @@ void Dbdict::parseReadEventSys(Signal* s
 
   handle.getSection(headerPtr, UtilExecuteReq::HEADER_SECTION);
   SectionReader headerReader(headerPtr, getSectionSegmentPool());
-      
+
   handle.getSection(dataPtr, UtilExecuteReq::DATA_SECTION);
   SectionReader dataReader(dataPtr, getSectionSegmentPool());
-  
+
   char *base = (char*)&m_eventRec;
 
   DictObject * opj_ptr_p = get_object(EVENT_SYSTEM_TABLE_NAME,
@@ -15916,7 +15903,7 @@ void Dbdict::parseReadEventSys(Signal* s
   }
 }
 
-void Dbdict::createEventUTIL_EXECUTE(Signal *signal, 
+void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
 				     Uint32 callbackData,
 				     Uint32 returnCode)
 {
@@ -15928,10 +15915,10 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
     jam();
     OpCreateEventPtr evntRecPtr;
     evntRecPtr.i = conf->getSenderData();
-    
+
     ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
     OpCreateEvent *evntRec = evntRecPtr.p;
-    
+
     switch (evntRec->m_requestType) {
     case CreateEvntReq::RT_USER_GET: {
       parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
@@ -15951,7 +15938,7 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
 		  evntRecPtr.p->m_eventRec.TABLE_NAME,
 		  evntRecPtr.p->m_eventRec.TABLEID,
 		  evntRecPtr.p->m_eventRec.TABLEVERSION));
-      
+
       // find table id for event table
       DictObject* obj_ptr_p = get_object(evntRecPtr.p->m_eventRec.TABLE_NAME);
       if(!obj_ptr_p){
@@ -15963,12 +15950,12 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
 	createEvent_sendReply(signal, evntRecPtr);
 	return;
       }
-      
+
       TableRecordPtr tablePtr;
       c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id);
       evntRec->m_request.setTableId(tablePtr.p->tableId);
       evntRec->m_request.setTableVersion(tablePtr.p->tableVersion);
-      
+
       createEventComplete_RT_USER_GET(signal, evntRecPtr);
       return;
     }
@@ -16015,7 +16002,7 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
       evntRecPtr.p->m_errorCode = ref->getErrorCode();
       break;
     }
-    
+
     createEvent_sendReply(signal, evntRecPtr);
   }
 }
@@ -16061,14 +16048,14 @@ Dbdict::createEvent_RT_USER_GET(Signal*
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
   int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
   memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
-  
+
   releaseSections(handle);
-  
+
   Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
-  
+
   prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
 				  UtilPrepareReq::Read);
-  /* 
+  /*
    * Will read systable and fill an OpCreateEventPtr
    * and return below
    */
@@ -16081,20 +16068,20 @@ Dbdict::createEventComplete_RT_USER_GET(
 
   // Send to oneself and the other DICT's
   CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend();
-      
+
   *req = evntRecPtr.p->m_request;
   req->senderRef = reference();
   req->senderData = evntRecPtr.i;
-      
+
   req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET);
-      
+
 #ifdef EVENT_PH2_DEBUG
   ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i);
 #endif
 
   NodeReceiverGroup rg(DBDICT, c_aliveNodes);
   RequestTracker & p = evntRecPtr.p->m_reqTracker;
-  if (!p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 
+  if (!p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF,
 			     evntRecPtr.i))
   {
     jam();
@@ -16115,9 +16102,9 @@ Dbdict::createEvent_nodeFailCallback(Sig
   createEvent_sendReply(signal, evntRecPtr);
 }
 
-void Dbdict::execCREATE_EVNT_REF(Signal* signal) 
+void Dbdict::execCREATE_EVNT_REF(Signal* signal)
 {
-  jamEntry();      
+  jamEntry();
   EVENT_TRACE;
   CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr();
   OpCreateEventPtr evntRecPtr;
@@ -16136,7 +16123,7 @@ void Dbdict::execCREATE_EVNT_REF(Signal*
   if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF)
   {
     jam();
-    evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, 
+    evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr,
                                          refToNode(ref->senderRef));
 
     /**
@@ -16160,11 +16147,11 @@ void Dbdict::execCREATE_EVNT_REF(Signal*
       noLSP = 2;
     }
   }
-  else 
+  else
   {
     jam();
     evntRecPtr.p->m_errorCode = ref->errorCode;
-    evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, 
+    evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr,
                                          refToNode(ref->senderRef));
   }
 
@@ -16215,7 +16202,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Si
   DBUG_ENTER("Dbdict::createEvent_RT_DICT_AFTER_GET");
   jam();
   evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
-  
+
 #ifdef EVENT_PH2_DEBUG
   ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i);
 #endif
@@ -16229,7 +16216,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Si
   CRASH_INSERTION2(6009, getOwnNodeId() != c_masterNodeId);
 
   SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend();
-  
+
   sumaReq->senderRef        = reference(); // reference to DICT
   sumaReq->senderData       = evntRecPtr.i;
   sumaReq->subscriptionId   = evntRecPtr.p->m_request.getEventId();
@@ -16245,7 +16232,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Si
   }
   sumaReq->tableId          = evntRecPtr.p->m_request.getTableId();
   sumaReq->schemaTransId    = 0;
-    
+
 #ifdef EVENT_PH2_DEBUG
   ndbout_c("sending GSN_SUB_CREATE_REQ");
 #endif
@@ -16336,7 +16323,7 @@ void Dbdict::createEvent_sendReply(Signa
       evntRecPtr.p->m_errorLine = __LINE__;
       evntRecPtr.p->m_errorNode = reference();
       jam();
-    } else 
+    } else
       jam();
   }
 
@@ -16350,7 +16337,7 @@ void Dbdict::createEvent_sendReply(Signa
     jam();
     EVENT_TRACE;
     CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
-    
+
     ret->setEventId(evntRecPtr.p->m_request.getEventId());
     ret->setEventKey(evntRecPtr.p->m_request.getEventKey());
     ret->setUserData(evntRecPtr.p->m_request.getUserData());
@@ -16376,7 +16363,7 @@ void Dbdict::createEvent_sendReply(Signa
     jam();
     EVENT_TRACE;
     CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend();
-    
+
     evntConf->setEventId(evntRecPtr.p->m_request.getEventId());
     evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey());
     evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
@@ -16471,7 +16458,7 @@ busy:
     subbPtr.p->m_subscriberData = req->subscriberData;
     bzero(subbPtr.p->m_buckets_per_ng, sizeof(subbPtr.p->m_buckets_per_ng));
   }
-  
+
   if (refToBlock(origSenderRef) != DBDICT) {
     /*
      * Coordinator
@@ -16493,7 +16480,7 @@ busy:
       errCode = SubStartRef::BusyWithNR;
       goto busy;
     }
-    
+
     subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
     NodeReceiverGroup rg(DBDICT, c_aliveNodes);
 
@@ -16504,12 +16491,12 @@ busy:
       errCode = SubStartRef::Busy;
       goto busy;
     }
-    
+
     SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
-    
+
     req->senderRef  = reference();
     req->senderData = subbPtr.i;
-    
+
 #ifdef EVENT_PH3_DEBUG
     ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i);
 #endif
@@ -16541,13 +16528,13 @@ busy:
   ndbrequire(refToBlock(origSenderRef) == DBDICT);
 
   CRASH_INSERTION(6007);
-  
+
   {
     SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
-    
+
     req->senderRef = reference();
     req->senderData = subbPtr.i;
-    
+
 #ifdef EVENT_PH3_DEBUG
     ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i);
 #endif
@@ -16745,7 +16732,7 @@ void Dbdict::completeSubStartReq(Signal*
 #ifdef EVENT_DEBUG
   ndbout_c("SUB_START_CONF");
 #endif
-  
+
   ndbrequire(c_outstanding_sub_startstop);
   c_outstanding_sub_startstop--;
   SubStartConf* conf = (SubStartConf*)signal->getDataPtrSend();
@@ -16810,7 +16797,7 @@ busy:
       req->requestInfo = 0;
     }
   }
-  
+
   if (refToBlock(origSenderRef) != DBDICT) {
     /*
      * Coordinator
@@ -16846,9 +16833,9 @@ busy:
       errCode = SubStopRef::Busy;
       goto busy;
     }
-    
+
     SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-    
+
     req->senderRef  = reference();
     req->senderData = subbPtr.i;
 
@@ -16868,10 +16855,10 @@ busy:
 
   {
     SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-    
+
     req->senderRef = reference();
     req->senderData = subbPtr.i;
-    
+
     sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
   }
 }
@@ -17044,7 +17031,7 @@ void Dbdict::completeSubStopReq(Signal*
  * MODULE: Drop event.
  *
  * Drop event.
- * 
+ *
  * TODO
  */
 
@@ -17081,7 +17068,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
     // Failed to allocate event record
     jam();
     releaseSections(handle);
- 
+
     DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
     ret->setErrorCode(747);
     ret->setErrorLine(__LINE__);
@@ -17132,7 +17119,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
     printf("\n");
 #endif
   }
-  
+
   releaseSections(handle);
 
   Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 };
@@ -17142,7 +17129,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
   DBUG_VOID_RETURN;
 }
 
-void 
+void
 Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal,
 				   Uint32 callbackData,
 				   Uint32 returnCode)
@@ -17169,7 +17156,7 @@ Dbdict::dropEventUTIL_PREPARE_READ(Signa
 			    prepareId, UtilPrepareReq::Read);
 }
 
-void 
+void
 Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
 				   Uint32 callbackData,
 				   Uint32 returnCode)
@@ -17199,7 +17186,7 @@ Dbdict::dropEventUTIL_EXECUTE_READ(Signa
     dropEvent_sendReply(signal, evntRecPtr);
     return;
   }
-  
+
   SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
 
   req->senderRef       = reference();
@@ -17247,7 +17234,7 @@ Dbdict::execSUB_REMOVE_REQ(Signal* signa
   }
 
   CRASH_INSERTION2(6010, getOwnNodeId() != c_masterNodeId);
-  
+
   SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
   req->senderRef = reference();
   req->senderData = subbPtr.i;
@@ -17396,7 +17383,7 @@ Dbdict::completeSubRemoveReq(Signal* sig
 				  UtilPrepareReq::Delete);
 }
 
-void 
+void
 Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal,
 				     Uint32 callbackData,
 				     Uint32 returnCode)
@@ -17414,11 +17401,11 @@ Dbdict::dropEventUTIL_PREPARE_DELETE(Sig
   jam();
   evntRecPtr.i = req->getSenderData();
   const Uint32 prepareId = req->getPrepareId();
-  
+
   ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
 #ifdef EVENT_DEBUG
   printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i);
-#endif    
+#endif
 
   Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 };
 
@@ -17427,7 +17414,7 @@ Dbdict::dropEventUTIL_PREPARE_DELETE(Sig
 			    prepareId, UtilPrepareReq::Delete);
 }
 
-void 
+void
 Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal,
 				     Uint32 callbackData,
 				     Uint32 returnCode)
@@ -17481,7 +17468,7 @@ Dbdict::dropEventUtilExecuteRef(Signal*
   jam();
   evntRecPtr.i = ref->getSenderData();
   ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-    
+
   evntRecPtr.p->m_errorNode = reference();
   evntRecPtr.p->m_errorLine = __LINE__;
 
@@ -17516,7 +17503,7 @@ void Dbdict::dropEvent_sendReply(Signal*
   if (evntRecPtr.p->hasError()) {
     jam();
     DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
-    
+
     ret->setUserData(evntRecPtr.p->m_request.getUserData());
     ret->setUserRef(evntRecPtr.p->m_request.getUserRef());
 
@@ -17529,7 +17516,7 @@ void Dbdict::dropEvent_sendReply(Signal*
   } else {
     jam();
     DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend();
-    
+
     evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
     evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef());
 
@@ -17822,7 +17809,7 @@ Dbdict::createTrigger_parse(Signal* sign
   }
 
   {
-    Rope name(c_rope_pool, triggerPtr.p->triggerName);
+    LocalRope name(c_rope_pool, triggerPtr.p->triggerName);
     if (!name.assign(createTriggerPtr.p->m_triggerName)) {
       jam();
       setError(error, CreateTrigRef::OutOfStringBuffer, __LINE__);
@@ -17832,7 +17819,7 @@ Dbdict::createTrigger_parse(Signal* sign
 
   // connect to new DictObject
   {
-    Ptr<DictObject> obj_ptr;
+    DictObjectPtr obj_ptr;
     seizeDictObject(op_ptr, obj_ptr, triggerPtr.p->triggerName);
 
     obj_ptr.p->m_id = impl_req->triggerId; // wl3600_todo id
@@ -17914,7 +17901,7 @@ Dbdict::createTrigger_parse_endpoint(Sig
     return;
   }
 
-  Ptr<TriggerRecord> triggerPtr;
+  TriggerRecordPtr triggerPtr;
   c_triggerRecordPool.getPtr(triggerPtr, impl_req->triggerId);
   switch(TriggerInfo::getTriggerType(triggerPtr.p->triggerInfo)){
   case TriggerType::REORG_TRIGGER:
@@ -18648,7 +18635,7 @@ Dbdict::dropTrigger_parse(Signal* signal
     c_triggerRecordPool.getPtr(triggerPtr, impl_req->triggerId);
     // wl3600_todo state check
   }
-  
+
   D("trigger " << copyRope<MAX_TAB_NAME_SIZE>(triggerPtr.p->triggerName));
   impl_req->triggerInfo = triggerPtr.p->triggerInfo;
   Uint32 requestType = impl_req->requestType;
@@ -19114,13 +19101,13 @@ Dbdict::execDROP_TRIG_IMPL_REF(Signal* s
 */
 
 void
-Dbdict::getTableKeyList(TableRecordPtr tablePtr, 
+Dbdict::getTableKeyList(TableRecordPtr tablePtr,
 			Id_array<MAX_ATTRIBUTES_IN_INDEX+1>& list)
 {
   jam();
   list.sz = 0;
   list.id[list.sz++] = AttributeHeader::FRAGMENT;
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
                                          tablePtr.p->m_attributes);
   AttributeRecordPtr attrPtr;
   for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) {
@@ -19150,7 +19137,7 @@ Dbdict::getIndexAttr(TableRecordPtr inde
     tmp.copy(name);
     len = tmp.size();
   }
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool, 
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
 					 tablePtr.p->m_attributes);
   for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)){
     ConstRope tmp(c_rope_pool, attrPtr.p->attributeName);
@@ -19170,7 +19157,7 @@ Dbdict::getIndexAttrList(TableRecordPtr
   memset(list.id, 0, sizeof(list.id));
   ndbrequire(indexPtr.p->noOfAttributes >= 2);
 
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
                                          indexPtr.p->m_attributes);
   AttributeRecordPtr attrPtr;
   for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) {
@@ -19194,12 +19181,12 @@ Dbdict::getIndexAttrMask(TableRecordPtr
   jam();
   mask.clear();
   ndbrequire(indexPtr.p->noOfAttributes >= 2);
-  
+
   AttributeRecordPtr attrPtr, currPtr;
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool, 
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
 					 indexPtr.p->m_attributes);
-  
-  
+
+
   for (alist.first(attrPtr); currPtr = attrPtr, alist.next(attrPtr); ){
     Uint32 id;
     getIndexAttr(indexPtr, currPtr.i, &id);
@@ -19231,7 +19218,7 @@ void
 Dbdict::sendDictLockInfoEvent(Signal*, const UtilLockReq* req, const char* text)
 {
   const Dbdict::DictLockType* lt = getDictLockType(req->extra);
-  
+
   infoEvent("DICT: %s %u for %s",
             text,
             (unsigned)refToNode(req->senderRef), lt->text);
@@ -19257,7 +19244,7 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
       req.lockType == DictLockReq::SumaHandOver)
   {
     jam();
-    
+
     if (c_outstanding_sub_startstop)
     {
       jam();
@@ -19265,7 +19252,7 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
       err = DictLockRef::TooManyRequests;
       goto ref;
     }
-    
+
     if (req.lockType == DictLockReq::SumaHandOver &&
         !c_sub_startstop_lock.isclear())
     {
@@ -19275,7 +19262,7 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
     }
 
     c_sub_startstop_lock.set(refToNode(req.userRef));
-    
+
     g_eventLogger->info("granting dict lock to %u", refToNode(req.userRef));
     DictLockConf* conf = (DictLockConf*)signal->getDataPtrSend();
     conf->userPtr = req.userPtr;
@@ -19294,14 +19281,14 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
 
   // make sure bad request crashes slave, not master (us)
   Uint32 res;
-  if (getOwnNodeId() != c_masterNodeId) 
+  if (getOwnNodeId() != c_masterNodeId)
   {
     jam();
     err = DictLockRef::NotMaster;
     goto ref;
   }
-  
-  if (lt == NULL) 
+
+  if (lt == NULL)
   {
     jam();
     err = DictLockRef::InvalidLockType;
@@ -19309,20 +19296,20 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
   }
 
   if (req.userRef != signal->getSendersBlockRef() ||
-      getNodeInfo(refToNode(req.userRef)).m_type != NodeInfo::DB) 
+      getNodeInfo(refToNode(req.userRef)).m_type != NodeInfo::DB)
   {
     jam();
     err = DictLockRef::BadUserRef;
     goto ref;
   }
 
-  if (c_aliveNodes.get(refToNode(req.userRef))) 
+  if (c_aliveNodes.get(refToNode(req.userRef)))
   {
     jam();
     err = DictLockRef::TooLate;
     goto ref;
   }
-  
+
   res = m_dict_lock.lock(this, m_dict_lock_pool, &lockReq, 0);
   switch(res){
   case 0:
@@ -19337,31 +19324,31 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
     break;
   default:
     jam();
-    sendDictLockInfoEvent(signal, &lockReq, "lock request by node");    
+    sendDictLockInfoEvent(signal, &lockReq, "lock request by node");
     break;
   }
   return;
-  
+
 ref:
   {
     DictLockRef* ref = (DictLockRef*)signal->getDataPtrSend();
     ref->userPtr = lockReq.senderData;
     ref->lockType = lockReq.extra;
     ref->errorCode = err;
-    
+
     sendSignal(lockReq.senderRef, GSN_DICT_LOCK_REF, signal,
                DictLockRef::SignalLength, JBB);
   }
   return;
-  
+
 conf:
   {
     DictLockConf* conf = (DictLockConf*)signal->getDataPtrSend();
-    
+
     conf->userPtr = lockReq.senderData;
     conf->lockType = lockReq.extra;
     conf->lockPtr = lockReq.senderData;
-    
+
     sendSignal(lockReq.senderRef, GSN_DICT_LOCK_CONF, signal,
                DictLockConf::SignalLength, JBB);
   }
@@ -19373,7 +19360,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* sign
 {
   jamEntry();
   const DictUnlockOrd* ord = (const DictUnlockOrd*)&signal->theData[0];
-  
+
   DictLockReq req;
   req.userPtr = ord->senderData;
   req.userRef = ord->senderRef;
@@ -19393,7 +19380,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* sign
     c_sub_startstop_lock.clear(refToNode(ord->senderRef));
     return;
   }
-  
+
   UtilLockReq lockReq;
   lockReq.senderData = req.userPtr;
   lockReq.senderRef = req.userRef;
@@ -19464,7 +19451,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
 #ifdef VM_TRACE
       ndbout_c("Dbdict::execDICT_TAKEOVER_REQ: trans %u(0x%8x), state %u, op_list %s", trans_ptr.i, (uint)trans_ptr.p->trans_key, trans_ptr.p->m_state, (trans_ptr.p->m_op_list.in_use)?"yes":"no");
 #endif
-     
+
      SchemaOpPtr op_ptr;
      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
      bool pending_op = list.first(op_ptr);
@@ -19496,7 +19483,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
 #ifdef VM_TRACE
        ndbout_c("Dbdict::execDICT_TAKEOVER_REQ: op %u state %u", op_ptr.p->op_key, op_ptr.p->m_state);
 #endif
-       
+
        /*
          Check if operation is busy
        */
@@ -19606,7 +19593,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
      ndbrequire(!(pending_trans = c_schemaTransList.next(trans_ptr)));
    }
  }
- 
+
 void
 Dbdict::execDICT_TAKEOVER_REF(Signal* signal)
 {
@@ -19655,7 +19642,7 @@ Dbdict::execDICT_TAKEOVER_CONF(Signal* s
   //Uint32 rollback_op = conf->rollback_op;
   //Uint32 rollback_op_state = conf->rollback_op_state;
   NodeRecordPtr masterNodePtr;
-  
+
   /*
     Accumulate all responses
   */
@@ -19806,7 +19793,7 @@ void Dbdict::check_takeover_replies(Sign
           trans_ptr.p->m_lowest_trans_state = SchemaTrans::TS_ENDING;
           trans_ptr.p->m_highest_trans_state = SchemaTrans::TS_INITIAL;
         }
-  
+
         trans_ptr.p->m_isMaster = true;
         trans_ptr.p->m_masterRef = reference();
         trans_ptr.p->m_clientRef = clientRef;
@@ -19843,7 +19830,7 @@ void Dbdict::check_takeover_replies(Sign
 	  trans_ptr.p->m_rollback_op = rollback_op;
 	  trans_ptr.p->m_rollback_op_state = rollback_op_state;
 	}
-        
+
         if (SchemaTrans::weight(trans_state) <
             SchemaTrans::weight(trans_ptr.p->m_lowest_trans_state))
         {
@@ -19893,7 +19880,7 @@ void Dbdict::check_takeover_replies(Sign
       trans_ptr.p->m_master_recovery_state = SchemaTrans::TRS_ROLLFORWARD;
       break;
     }
-    
+
     if (trans_ptr.p->m_master_recovery_state == SchemaTrans::TRS_ROLLFORWARD)
     {
       /*
@@ -19905,8 +19892,8 @@ void Dbdict::check_takeover_replies(Sign
       trans_ptr.p->check_partial_rollforward = true;
       trans_ptr.p->m_state = trans_ptr.p->m_lowest_trans_state;
 #ifdef VM_TRACE
-      ndbout_c("Setting transaction state to %u for rollforward", trans_ptr.p->m_state); 
-#endif     
+      ndbout_c("Setting transaction state to %u for rollforward", trans_ptr.p->m_state);
+#endif
     }
     else
     {
@@ -19918,8 +19905,8 @@ void Dbdict::check_takeover_replies(Sign
       infoEvent("Pending schema transaction %u will be rolled back", trans_ptr.p->trans_key);
       trans_ptr.p->m_state = trans_ptr.p->m_highest_trans_state;
 #ifdef VM_TRACE
-      ndbout_c("Setting transaction state to %u for rollback", trans_ptr.p->m_state); 
-#endif     
+      ndbout_c("Setting transaction state to %u for rollback", trans_ptr.p->m_state);
+#endif
     }
 #ifdef VM_TRACE
     ndbout_c("Setting start state for transaction %u to %u", trans_ptr.p->trans_key, trans_ptr.p->m_state);
@@ -19927,8 +19914,8 @@ void Dbdict::check_takeover_replies(Sign
     pending_trans = c_schemaTransList.next(trans_ptr);
   }
 
-  /* 
-     Initialize all node recovery states 
+  /*
+     Initialize all node recovery states
   */
   for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
     jam();
@@ -19954,7 +19941,7 @@ void Dbdict::check_takeover_replies(Sign
         c_nodes.getPtr(nodePtr, i);
 #ifdef VM_TRACE
         ndbout_c("Node %u had %u operations, master has %u",i , nodePtr.p->takeOverConf.op_count, masterNodePtr.p->takeOverConf.op_count);
-#endif        
+#endif
         if (nodePtr.p->takeOverConf.op_count == 0)
         {
           if (SchemaTrans::weight(trans_ptr.p->m_state)
@@ -19987,7 +19974,7 @@ void Dbdict::check_takeover_replies(Sign
           /*
               Operation is missing on slave
           */
-          if (SchemaTrans::weight(trans_ptr.p->m_state) < 
+          if (SchemaTrans::weight(trans_ptr.p->m_state) <
               SchemaTrans::weight(SchemaTrans::TS_PREPARING))
           {
             /*
@@ -20064,7 +20051,7 @@ void Dbdict::check_takeover_replies(Sign
               }
               trans_ptr.p->m_nodes.set(c_masterNodeId);
 #ifdef VM_TRACE
-              ndbout_c("Adding master node %u to transaction %u", c_masterNodeId, trans_ptr.p->trans_key);          
+              ndbout_c("Adding master node %u to transaction %u", c_masterNodeId, trans_ptr.p->trans_key);
 #endif
             }
           }
@@ -20100,7 +20087,7 @@ void Dbdict::check_takeover_replies(Sign
               nodePtr.p->recoveryState = NodeRecord::RS_PARTIAL_ROLLFORWARD;
               nodePtr.p->start_op = op_key;
               nodePtr.p->start_op_state = op_state;
-              
+
             }
             else
             {
@@ -20145,11 +20132,11 @@ void Dbdict::check_takeover_replies(Sign
           else if (SchemaOp::weight(nodePtr.p->takeOverConf.rollforward_op_state) >
                    SchemaOp::weight(trans_ptr.p->m_rollforward_op_state) ||
                    nodePtr.p->takeOverConf.rollforward_op >
-                   trans_ptr.p->m_rollforward_op)              
+                   trans_ptr.p->m_rollforward_op)
           {
             /*
               Slave has started committing, but other slaves have non-committed
-              operations. Node needs to be partially rollforward. 
+              operations. Node needs to be partially rollforward.
             */
             jam();
             nodePtr.p->recoveryState = NodeRecord::RS_PARTIAL_ROLLFORWARD;
@@ -20209,7 +20196,7 @@ void Dbdict::check_takeover_replies(Sign
 #ifdef VM_TRACE
             ndbout_c("Node %u will be partially rolled back from operation %u, state %u", nodePtr.i, nodePtr.p->start_op, nodePtr.p->start_op_state);
 #endif
-            if (i == c_masterNodeId && 
+            if (i == c_masterNodeId &&
                 (SchemaTrans::weight(trans_ptr.p->m_state) <=
                  SchemaTrans::weight(SchemaTrans::TS_PREPARING)))
             {
@@ -20240,7 +20227,7 @@ void Dbdict::check_takeover_replies(Sign
       jam();
       SchemaOpPtr rollforward_op_ptr;
       ndbrequire(findSchemaOp(rollforward_op_ptr, trans_ptr.p->m_rollforward_op));
-      trans_ptr.p->m_curr_op_ptr_i = rollforward_op_ptr.i;    
+      trans_ptr.p->m_curr_op_ptr_i = rollforward_op_ptr.i;
 #ifdef VM_TRACE
       ndbout_c("execDICT_TAKEOVER_CONF: Transaction %u rolled forward starting at %u(%u)", trans_ptr.p->trans_key,  trans_ptr.p->m_rollforward_op, trans_ptr.p->m_curr_op_ptr_i);
 #endif
@@ -20256,13 +20243,13 @@ void Dbdict::check_takeover_replies(Sign
         jam();
         SchemaOpPtr rollback_op_ptr;
         ndbrequire(findSchemaOp(rollback_op_ptr, trans_ptr.p->m_rollback_op));
-        trans_ptr.p->m_curr_op_ptr_i = rollback_op_ptr.i;    
+        trans_ptr.p->m_curr_op_ptr_i = rollback_op_ptr.i;
 #ifdef VM_TRACE
         ndbout_c("execDICT_TAKEOVER_CONF: Transaction %u rolled back starting at %u(%u)", trans_ptr.p->trans_key,  trans_ptr.p->m_rollback_op, trans_ptr.p->m_curr_op_ptr_i);
 #endif
       }
     }
-    
+
     trans_recover(signal, trans_ptr);
     pending_trans = c_schemaTransList.next(trans_ptr);
   }
@@ -20281,21 +20268,21 @@ Dbdict::removeStaleDictLocks(Signal* sig
     infoEvent("Iterating lock queue");
 #endif
     do {
-      if (NodeBitmask::get(theFailedNodes, 
+      if (NodeBitmask::get(theFailedNodes,
                            refToNode(iter.m_curr.p->m_req.senderRef)))
       {
         if (iter.m_curr.p->m_req.requestInfo & UtilLockReq::Granted)
         {
           jam();
           infoEvent("Removed lock for node %u", refToNode(iter.m_curr.p->m_req.senderRef));
-          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req, 
+          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req,
                                 "remove lock by failed node");
-        } 
-        else 
+        }
+        else
         {
           jam();
           infoEvent("Removed lock request for node %u", refToNode(iter.m_curr.p->m_req.senderRef));
-          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req, 
+          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req,
                                 "remove lock request by failed node");
         }
         DictUnlockOrd* ord = (DictUnlockOrd*)signal->getDataPtrSend();
@@ -20356,7 +20343,7 @@ Dbdict::dict_lock_unlock(Signal* signal,
   UtilUnlockReq req;
   req.senderData = _req->userPtr;
   req.senderRef = _req->userRef;
-  
+
   Uint32 res = m_dict_lock.unlock(this, m_dict_lock_pool, &req);
   switch(res){
   case UtilUnlockRef::OK:
@@ -20387,8 +20374,8 @@ Dbdict::dict_lock_unlock(Signal* signal,
         conf->lockType = lockReq.extra;
         sendSignal(lockReq.senderRef, GSN_DICT_LOCK_CONF, signal,
                    DictLockConf::SignalLength, JBB);
-      }        
-      
+      }
+
       if (!m_dict_lock.next(iter))
         break;
     }
@@ -20494,15 +20481,15 @@ Dbdict::resizeSchemaFile(XSchemaFile * x
 }
 
 void
-Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){ 
+Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){
   SchemaFile * sf = &xsf->schemaPage[pageNo];
   sf->CheckSum = 0;
   sf->CheckSum = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
 }
 
-bool 
+bool
 Dbdict::validateChecksum(const XSchemaFile * xsf){
-  
+
   for (Uint32 n = 0; n < xsf->noOfPages; n++) {
     SchemaFile * sf = &xsf->schemaPage[n];
     Uint32 c = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
@@ -20520,7 +20507,7 @@ Dbdict::computeChecksum(const Uint32 * s
   return ret;
 }
 
-SchemaFile::TableEntry * 
+SchemaFile::TableEntry *
 Dbdict::getTableEntry(Uint32 tableId)
 {
   return getTableEntry(&c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE], tableId);
@@ -20608,7 +20595,7 @@ Dbdict::execCREATE_FILE_REQ(Signal* sign
       jam();
       impl_req->requestInfo = CreateFileImplReq::CreateForce;
     }
-    
+
     handleClientReq(signal, op_ptr, handle);
     return;
   } while (0);
@@ -20663,7 +20650,7 @@ Dbdict::createFile_parse(Signal* signal,
   }
   SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool());
 
-  Ptr<DictObject> obj_ptr; obj_ptr.setNull();
+  DictObjectPtr obj_ptr; obj_ptr.setNull();
   FilePtr filePtr; filePtr.setNull();
 
   DictFilegroupInfo::File f; f.init();
@@ -20692,7 +20679,7 @@ Dbdict::createFile_parse(Signal* signal,
   if(fg_ptr.p->m_version != f.FilegroupVersion)
   {
     jam();
-    setError(error, CreateFileRef::InvalidFilegroupVersion, __LINE__, 
+    setError(error, CreateFileRef::InvalidFilegroupVersion, __LINE__,
              f.FileName);
     return;
   }
@@ -20725,7 +20712,7 @@ Dbdict::createFile_parse(Signal* signal,
   }
 
   Uint32 len = Uint32(strlen(f.FileName) + 1);
-  Uint32 hash = Rope::hash(f.FileName, len);
+  Uint32 hash = LocalRope::hash(f.FileName, len);
   if(get_object(f.FileName, len, hash) != 0)
   {
     jam();
@@ -20740,7 +20727,7 @@ Dbdict::createFile_parse(Signal* signal,
     if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl)
     {
       jam();
-      setError(error, CreateFileRef::NotSupportedWhenDiskless, __LINE__, 
+      setError(error, CreateFileRef::NotSupportedWhenDiskless, __LINE__,
                f.FileName);
       return;
     }
@@ -20773,7 +20760,7 @@ Dbdict::createFile_parse(Signal* signal,
   new (filePtr.p) File();
 
   {
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     if(!name.assign(f.FileName, len, hash))
     {
       jam();
@@ -20790,7 +20777,7 @@ Dbdict::createFile_parse(Signal* signal,
     if (objId == RNIL)
     {
       jam();
-      setError(error, CreateFilegroupRef::NoMoreObjectRecords, __LINE__, 
+      setError(error, CreateFilegroupRef::NoMoreObjectRecords, __LINE__,
                f.FileName);
       goto error;
     }
@@ -20924,7 +20911,7 @@ Dbdict::createFile_parse(Signal* signal,
                         filePtr.p->m_file_size,
                         createFilePtr.p->m_warningFlags);
   }
-  
+
   send_event(signal, trans_ptr,
              NDB_LE_CreateSchemaObject,
              impl_req->file_id,
@@ -21014,7 +21001,7 @@ Dbdict::createFile_reply(Signal* signal,
     ref->senderData = op_ptr.p->m_clientData;
     ref->transId = trans_ptr.p->m_transId;
     getError(error, ref);
-    
+
     Uint32 clientRef = op_ptr.p->m_clientRef;
     sendSignal(clientRef, GSN_CREATE_FILE_REF, signal,
                CreateFileRef::SignalLength, JBB);
@@ -21162,7 +21149,7 @@ Dbdict::createFile_abortPrepare(Signal*
   default:
     ndbrequire(false);
   }
-  
+
   sendSignal(ref, GSN_CREATE_FILE_IMPL_REQ, signal,
              CreateFileImplReq::AbortLength, JBB);
 
@@ -21383,7 +21370,7 @@ Dbdict::createFilegroup_parse(Signal* si
   }
   SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool());
 
-  Ptr<DictObject> obj_ptr; obj_ptr.setNull();
+  DictObjectPtr obj_ptr; obj_ptr.setNull();
   FilegroupPtr fg_ptr; fg_ptr.setNull();
 
   DictFilegroupInfo::Filegroup fg; fg.init();
@@ -21423,7 +21410,7 @@ Dbdict::createFilegroup_parse(Signal* si
   }
 
   Uint32 len = Uint32(strlen(fg.FilegroupName) + 1);
-  Uint32 hash = Rope::hash(fg.FilegroupName, len);
+  Uint32 hash = LocalRope::hash(fg.FilegroupName, len);
   if(get_object(fg.FilegroupName, len, hash) != 0)
   {
     jam();
@@ -21450,7 +21437,7 @@ Dbdict::createFilegroup_parse(Signal* si
   new (fg_ptr.p) Filegroup();
 
   {
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     if(!name.assign(fg.FilegroupName, len, hash))
     {
       jam();
@@ -21479,7 +21466,7 @@ Dbdict::createFilegroup_parse(Signal* si
 #endif
     fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId;
 
-    Ptr<Filegroup> lg_ptr;
+    FilegroupPtr lg_ptr;
     if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId))
     {
       jam();
@@ -22012,9 +21999,9 @@ Dbdict::dropFile_parse(Signal* signal, b
 #if defined VM_TRACE || defined ERROR_INSERT
   {
     char buf[1024];
-    Rope name(c_rope_pool, f_ptr.p->m_path);
+    LocalRope name(c_rope_pool, f_ptr.p->m_path);
     name.copy(buf);
-    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf, 
+    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf,
              impl_req->file_id,
              f_ptr.p->m_obj_ptr_i);
   }
@@ -22201,15 +22188,15 @@ Dbdict::send_drop_file(Signal* signal, U
   jam();
   ndbrequire(c_file_hash.find(f_ptr, fileId));
   ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
-  
+
   req->senderData = op_key;
   req->senderRef = reference();
   req->requestInfo = type;
-  
+
   req->file_id = f_ptr.p->key;
   req->filegroup_id = f_ptr.p->m_filegroup_id;
   req->filegroup_version = fg_ptr.p->m_version;
-  
+
   Uint32 ref= 0;
   switch(f_ptr.p->m_type){
   case DictTabInfo::Datafile:
@@ -22371,9 +22358,9 @@ Dbdict::dropFilegroup_parse(Signal* sign
 #if defined VM_TRACE || defined ERROR_INSERT
   {
     char buf[1024];
-    Rope name(c_rope_pool, fg_ptr.p->m_name);
+    LocalRope name(c_rope_pool, fg_ptr.p->m_name);
     name.copy(buf);
-    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf, 
+    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf,
              impl_req->filegroup_id,
              fg_ptr.p->m_obj_ptr_i);
   }
@@ -22455,7 +22442,7 @@ Dbdict::dropFilegroup_prepare(Signal* si
   if (fg_ptr.p->m_type == DictTabInfo::LogfileGroup)
   {
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
-    Ptr<File> filePtr;
+    FilePtr filePtr;
     Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
     for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
     {
@@ -22494,7 +22481,7 @@ Dbdict::dropFilegroup_abortPrepare(Signa
   {
     jam();
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
-    Ptr<File> filePtr;
+    FilePtr filePtr;
     Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
     for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
     {
@@ -22539,7 +22526,7 @@ Dbdict::dropFilegroup_commit(Signal* sig
      */
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
 
-    Ptr<File> filePtr;
+    FilePtr filePtr;
     Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
     for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
     {
@@ -22563,7 +22550,7 @@ Dbdict::dropFilegroup_commit(Signal* sig
     ndbrequire(c_filegroup_hash.
 	       find(lg_ptr,
 		    fg_ptr.p->m_tablespace.m_default_logfile_group_id));
-    
+
     decrease_ref_count(lg_ptr.p->m_obj_ptr_i);
   }
 }
@@ -22632,17 +22619,17 @@ Dbdict::send_drop_fg(Signal* signal, Uin
 		     DropFilegroupImplReq::RequestInfo type)
 {
   DropFilegroupImplReq* req = (DropFilegroupImplReq*)signal->getDataPtrSend();
-  
+
   FilegroupPtr fg_ptr;
   ndbrequire(c_filegroup_hash.find(fg_ptr, filegroupId));
-  
+
   req->senderData = op_key;
   req->senderRef = reference();
   req->requestInfo = type;
-  
+
   req->filegroup_id = fg_ptr.p->key;
   req->filegroup_version = fg_ptr.p->m_version;
-  
+
   Uint32 ref= 0;
   switch(fg_ptr.p->m_type){
   case DictTabInfo::Tablespace:
@@ -22654,7 +22641,7 @@ Dbdict::send_drop_fg(Signal* signal, Uin
   default:
     ndbrequire(false);
   }
-  
+
   sendSignal(ref, GSN_DROP_FILEGROUP_IMPL_REQ, signal,
 	     DropFilegroupImplReq::SignalLength, JBB);
 }
@@ -22712,7 +22699,7 @@ Dbdict::execCREATE_NODEGROUP_REQ(Signal*
     }
 
     impl_req->nodegroupId = req->nodegroupId;
-    for (Uint32 i = 0; i<NDB_ARRAY_SIZE(req->nodes) && 
+    for (Uint32 i = 0; i<NDB_ARRAY_SIZE(req->nodes) &&
            i<NDB_ARRAY_SIZE(impl_req->nodes); i++)
     {
       impl_req->nodes[i] = req->nodes[i];
@@ -22785,7 +22772,7 @@ Dbdict::createNodegroup_parse(Signal* si
    * createNodegroup blocks gcp
    *   so trans_ptr can *not* do this (endless loop)
    */
-  trans_ptr.p->m_wait_gcp_on_commit = false; 
+  trans_ptr.p->m_wait_gcp_on_commit = false;
 }
 
 void
@@ -23360,7 +23347,7 @@ Dbdict::dropNodegroup_parse(Signal* sign
    * dropNodegroup blocks gcp
    *   so trans_ptr can *not* do this (endless loop)
    */
-  trans_ptr.p->m_wait_gcp_on_commit = false; 
+  trans_ptr.p->m_wait_gcp_on_commit = false;
 }
 
 void
@@ -23787,14 +23774,14 @@ Dbdict::setError(ErrorInfo& e,
     e.errorNodeId = nodeId ? nodeId : getOwnNodeId();
     e.errorStatus = status;
     e.errorKey = key;
-    BaseString::snprintf(e.errorObjectName, sizeof(e.errorObjectName), "%s", 
+    BaseString::snprintf(e.errorObjectName, sizeof(e.errorObjectName), "%s",
                          name ? name : "");
   }
   e.errorCount++;
 }
 
 void
-Dbdict::setError(ErrorInfo& e, 
+Dbdict::setError(ErrorInfo& e,
                  Uint32 code,
                  Uint32 line,
                  const char * name)
@@ -24097,20 +24084,20 @@ Dbdict::getOpInfo(SchemaOpPtr op_ptr)
 bool
 Dbdict::seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Uint32 op_key, const OpInfo& info, bool linked)
 {
-  if ((ERROR_INSERTED(6111) && 
+  if ((ERROR_INSERTED(6111) &&
        (info.m_impl_req_gsn == GSN_CREATE_TAB_REQ ||
         info.m_impl_req_gsn == GSN_DROP_TAB_REQ ||
         info.m_impl_req_gsn == GSN_ALTER_TAB_REQ)) ||
-      (ERROR_INSERTED(6112) && 
+      (ERROR_INSERTED(6112) &&
        (info.m_impl_req_gsn == GSN_CREATE_INDX_IMPL_REQ ||
         info.m_impl_req_gsn == GSN_DROP_INDX_IMPL_REQ)) ||
-      (ERROR_INSERTED(6113) && 
+      (ERROR_INSERTED(6113) &&
        (info.m_impl_req_gsn == GSN_ALTER_INDX_IMPL_REQ)) ||
-      (ERROR_INSERTED(6114) && 
+      (ERROR_INSERTED(6114) &&
        (info.m_impl_req_gsn == GSN_CREATE_TRIG_IMPL_REQ ||
         info.m_impl_req_gsn == GSN_DROP_TRIG_IMPL_REQ)) ||
-      (ERROR_INSERTED(6116) && 
-       (info.m_impl_req_gsn == GSN_BUILD_INDX_IMPL_REQ))) 
+      (ERROR_INSERTED(6116) &&
+       (info.m_impl_req_gsn == GSN_BUILD_INDX_IMPL_REQ)))
   {
     jam();
     CLEAR_ERROR_INSERT_VALUE;
@@ -24918,7 +24905,7 @@ Dbdict::handleClientReq(Signal* signal,
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_PARSE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -25316,7 +25303,7 @@ Dbdict::trans_prepare_start(Signal* sign
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_FLUSH_PREPARE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -25400,7 +25387,7 @@ Dbdict::trans_prepare_next(Signal* signa
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_PREPARE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -25527,7 +25514,7 @@ Dbdict::trans_abort_parse_recv_reply(Sig
   trans_abort_parse_done(signal, trans_ptr);
 }
 
-void 
+void
 Dbdict::check_partial_trans_abort_parse_next(SchemaTransPtr trans_ptr,
                                              NdbNodeBitmask &nodes,
                                              SchemaOpPtr op_ptr)
@@ -25577,7 +25564,7 @@ Dbdict::trans_abort_parse_next(Signal* s
   jam();
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_ABORTING_PARSE);
 #ifdef MARTIN
-  ndbout_c("Dbdict::trans_abort_parse_next: op %u state %u", op_ptr.i,op_ptr.p->m_state); 
+  ndbout_c("Dbdict::trans_abort_parse_next: op %u state %u", op_ptr.i,op_ptr.p->m_state);
 #endif
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
   op_ptr.p->m_state = SchemaOp::OS_ABORTING_PARSE;
@@ -25605,7 +25592,7 @@ Dbdict::trans_abort_parse_next(Signal* s
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_ABORT_PARSE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -25737,8 +25724,8 @@ Dbdict::trans_abort_prepare_next(Signal*
   jam();
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_ABORTING_PREPARE);
 #ifdef MARTIN
-  ndbout_c("Dbdict::trans_abort_prepare_next: op %u state %u", op_ptr.p->op_key, op_ptr.p->m_state); 
-#endif 
+  ndbout_c("Dbdict::trans_abort_prepare_next: op %u state %u", op_ptr.p->op_key, op_ptr.p->m_state);
+#endif
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
 
   switch(op_ptr.p->m_state){
@@ -25794,7 +25781,7 @@ Dbdict::trans_abort_prepare_next(Signal*
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_ABORT_PREPARE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -25928,7 +25915,7 @@ Dbdict::trans_rollback_sp_next(Signal* s
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_ABORT_PARSE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -26048,7 +26035,7 @@ Dbdict::trans_commit_start(Signal* signa
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_FLUSH_COMMIT", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26107,11 +26094,11 @@ Dbdict::trans_commit_first(Signal* signa
     signal->theData[1] = 0; // Execute direct
     signal->theData[2] = 1; // Current
     EXECUTE_DIRECT(DBDIH, GSN_GETGCIREQ, signal, 3);
-    
+
     jamEntry();
     Uint32 gci_hi = signal->theData[1];
     Uint32 gci_lo = signal->theData[2];
-    
+
     signal->theData[0] = ZCOMMIT_WAIT_GCI;
     signal->theData[1] = trans_ptr.i;
     signal->theData[2] = gci_hi;
@@ -26126,7 +26113,7 @@ Dbdict::trans_commit_first(Signal* signa
     jam();
     Mutex mutex(signal, c_mutexMgr, trans_ptr.p->m_commit_mutex);
     Callback c = { safe_cast(&Dbdict::trans_commit_mutex_locked), trans_ptr.i };
-    
+
     // Todo should alloc mutex on SCHEMA_BEGIN
     bool ok = mutex.lock(c);
     ndbrequire(ok);
@@ -26141,7 +26128,7 @@ Dbdict::trans_commit_wait_gci(Signal* si
   c_schemaTransPool.getPtr(trans_ptr, signal->theData[1]);
 
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_COMMITTING);
-  
+
   Uint32 gci_hi = signal->theData[2];
   Uint32 gci_lo = signal->theData[3];
 
@@ -26174,7 +26161,7 @@ Dbdict::trans_commit_wait_gci(Signal* si
 
   Mutex mutex(signal, c_mutexMgr, trans_ptr.p->m_commit_mutex);
   Callback c = { safe_cast(&Dbdict::trans_commit_mutex_locked), trans_ptr.i };
-  
+
   // Todo should alloc mutex on SCHEMA_BEGIN
   bool ok = mutex.lock(c);
   ndbrequire(ok);
@@ -26262,7 +26249,7 @@ void Dbdict::check_partial_trans_commit_
     }
     trans_ptr.p->check_partial_rollforward = false;
   }
-  
+
 }
 void
 Dbdict::trans_commit_next(Signal* signal,
@@ -26271,7 +26258,7 @@ Dbdict::trans_commit_next(Signal* signal
 {
   jam();
 #ifdef MARTIN
-  ndbout_c("Dbdict::trans_commit_next: op %u state %u", op_ptr.i,op_ptr.p->m_state); 
+  ndbout_c("Dbdict::trans_commit_next: op %u state %u", op_ptr.i,op_ptr.p->m_state);
 #endif
   op_ptr.p->m_state = SchemaOp::OS_COMMITTING;
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
@@ -26299,7 +26286,7 @@ Dbdict::trans_commit_next(Signal* signal
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_COMMIT", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -26436,18 +26423,18 @@ Dbdict::check_partial_trans_complete_sta
     */
     for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
       NodeRecordPtr nodePtr;
-#ifdef VM_TRACE      
+#ifdef VM_TRACE
       ndbout_c("Node %u", i);
 #endif
       if (trans_ptr.p->m_nodes.get(i))
       {
         c_nodes.getPtr(nodePtr, i);
-#ifdef VM_TRACE      
+#ifdef VM_TRACE
         ndbout_c("Checking node %u(%u,%u)", nodePtr.i, nodePtr.p->recoveryState, nodePtr.p->takeOverConf.trans_state);
 #endif
         if (nodePtr.p->takeOverConf.trans_state >= SchemaTrans::TS_FLUSH_COMPLETE)
         {
-#ifdef VM_TRACE      
+#ifdef VM_TRACE
           ndbout_c("Skipping TS_FLUSH_COMPLETE of node %u", i);
 #endif
           nodes.clear(i);
@@ -26496,7 +26483,7 @@ Dbdict::trans_complete_start(Signal* sig
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_FLUSH_COMPLETE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26552,12 +26539,12 @@ Dbdict::trans_complete_first(Signal * si
 }
 
 void
-Dbdict::trans_complete_next(Signal* signal, 
+Dbdict::trans_complete_next(Signal* signal,
                             SchemaTransPtr trans_ptr, SchemaOpPtr op_ptr)
 {
   op_ptr.p->m_state = SchemaOp::OS_COMPLETING;
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
-  
+
   trans_ptr.p->m_nodes.bitAND(c_aliveNodes);
   NdbNodeBitmask nodes = trans_ptr.p->m_nodes;
   NodeReceiverGroup rg(DBDICT, nodes);
@@ -26577,7 +26564,7 @@ Dbdict::trans_complete_next(Signal* sign
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_COMPLETE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26594,7 +26581,7 @@ Dbdict::trans_complete_next(Signal* sign
   req->requestInfo = SchemaTransImplReq::RT_COMPLETE;
   req->transId = trans_ptr.p->m_transId;
   sendSignal(rg, GSN_SCHEMA_TRANS_IMPL_REQ, signal,
-             SchemaTransImplReq::SignalLength, JBB);  
+             SchemaTransImplReq::SignalLength, JBB);
 }
 
 void
@@ -26616,7 +26603,7 @@ Dbdict::trans_complete_recv_reply(Signal
     LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     next = list.next(op_ptr);
   }
-  
+
   if (next)
   {
     jam();
@@ -26652,7 +26639,7 @@ Dbdict::trans_end_start(Signal* signal,
     bool ok = sc.init<SchemaTransImplRef>(rg, trans_ptr.p->trans_key);
     ndbrequire(ok);
   }
-  
+
   if (ERROR_INSERTED(6150))
   {
     jam();
@@ -26663,7 +26650,7 @@ Dbdict::trans_end_start(Signal* signal,
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_END", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26680,7 +26667,7 @@ Dbdict::trans_end_start(Signal* signal,
   req->requestInfo = SchemaTransImplReq::RT_END;
   req->transId = trans_ptr.p->m_transId;
   sendSignal(rg, GSN_SCHEMA_TRANS_IMPL_REQ, signal,
-             SchemaTransImplReq::SignalLength, JBB);  
+             SchemaTransImplReq::SignalLength, JBB);
 }
 
 void
@@ -26728,7 +26715,7 @@ void Dbdict::trans_recover(Signal* signa
 
   jam();
 #ifdef VM_TRACE
-  ndbout_c("Dbdict::trans_recover trans %u, state %u", trans_ptr.p->trans_key, trans_ptr.p->m_state); 
+  ndbout_c("Dbdict::trans_recover trans %u, state %u", trans_ptr.p->trans_key, trans_ptr.p->m_state);
 #endif
 
   switch(trans_ptr.p->m_state) {
@@ -26793,7 +26780,7 @@ void Dbdict::trans_recover(Signal* signa
   }
   case SchemaTrans::TS_FLUSH_COMMIT:
     flush_commit:
-    /* 
+    /*
        Flush commit any unflushed slaves
     */
     jam();
@@ -27004,7 +26991,7 @@ Dbdict::execSCHEMA_TRANS_IMPL_REQ(Signal
       ndbrequire(false); // handled above
     case SchemaTransImplReq::RT_PREPARE:
       jam();
-      op_ptr.p->m_state = SchemaOp::OS_PREPARING;      
+      op_ptr.p->m_state = SchemaOp::OS_PREPARING;
       (this->*(info.m_prepare))(signal, op_ptr);
       return;
     case SchemaTransImplReq::RT_ABORT_PARSE:
@@ -27342,7 +27329,7 @@ Dbdict::slave_commit_mutex_locked(Signal
   c_schemaTransPool.getPtr(trans_ptr, transPtrI);
 
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_COMMITTING);
-  sendTransConfRelease(signal, trans_ptr);  
+  sendTransConfRelease(signal, trans_ptr);
 }
 
 void
@@ -27360,7 +27347,7 @@ Dbdict::slave_commit_mutex_unlocked(Sign
   trans_ptr.p->m_commit_mutex.release(c_mutexMgr);
 
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_COMPLETING);
-  sendTransConfRelease(signal, trans_ptr);  
+  sendTransConfRelease(signal, trans_ptr);
 }
 
 void Dbdict::sendTransConfRelease(Signal*signal, SchemaTransPtr trans_ptr)
@@ -27409,7 +27396,7 @@ Dbdict::update_op_state(SchemaOpPtr op_p
     op_ptr.p->m_state = SchemaOp::OS_COMPLETED;
     break;
   case SchemaOp::OS_COMPLETED:
-    ndbrequire(false);    
+    ndbrequire(false);
   }
 }
 
@@ -27861,7 +27848,7 @@ Dbdict::endSchemaTrans(Signal* signal, T
 void
 Dbdict::execSCHEMA_TRANS_BEGIN_CONF(Signal* signal)
 {
-  jamEntry(); 
+  jamEntry();
   const SchemaTransBeginConf* conf =
     (const SchemaTransBeginConf*)signal->getDataPtr();
 
@@ -28259,10 +28246,10 @@ Dbdict::execCREATE_HASH_MAP_REQ(Signal*
   ErrorInfo error;
   do {
     SchemaOpPtr op_ptr;
-    CreateHashMapRecPtr createHashMapPtr;
+    CreateHashMapRecPtr createHashMapRecordPtr;
     CreateHashMapImplReq* impl_req;
 
-    startClientReq(op_ptr, createHashMapPtr, req, impl_req, error);
+    startClientReq(op_ptr, createHashMapRecordPtr, req, impl_req, error);
     if (hasError(error)) {
       jam();
       break;
@@ -28313,9 +28300,9 @@ Dbdict::createHashMap_parse(Signal* sign
 {
 
   SchemaTransPtr trans_ptr = op_ptr.p->m_trans_ptr;
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   jam();
 
@@ -28413,7 +28400,7 @@ Dbdict::createHashMap_parse(Signal* sign
   }
 
   Uint32 len = Uint32(strlen(hm.HashMapName) + 1);
-  Uint32 hash = Rope::hash(hm.HashMapName, len);
+  Uint32 hash = LocalRope::hash(hm.HashMapName, len);
 
   if (ERROR_INSERTED(6205))
   {
@@ -28454,7 +28441,7 @@ Dbdict::createHashMap_parse(Signal* sign
       return;
     }
 
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, objptr->m_id));
 
     impl_req->objectId = objptr->m_id;
@@ -28480,7 +28467,7 @@ Dbdict::createHashMap_parse(Signal* sign
 
   RopeHandle name;
   {
-    Rope tmp(c_rope_pool, name);
+    LocalRope tmp(c_rope_pool, name);
     if(!tmp.assign(hm.HashMapName, len, hash))
     {
       jam();
@@ -28494,7 +28481,7 @@ Dbdict::createHashMap_parse(Signal* sign
   Uint32 errCode = 0;
   Uint32 errLine = 0;
   DictObjectPtr obj_ptr; obj_ptr.setNull();
-  HashMapPtr hm_ptr; hm_ptr.setNull();
+  HashMapRecordPtr hm_ptr; hm_ptr.setNull();
   Ptr<Hash2FragmentMap> map_ptr; map_ptr.setNull();
 
   if (master)
@@ -28669,7 +28656,7 @@ error:
   else
   {
     jam();
-    Rope tmp(c_rope_pool, name);
+    LocalRope tmp(c_rope_pool, name);
     tmp.erase();
   }
 }
@@ -28679,9 +28666,9 @@ Dbdict::createHashMap_abortParse(Signal*
 {
   D("createHashMap_abortParse" << *op_ptr.p);
 
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   if (impl_req->requestType & CreateHashMapReq::CreateIfNotExists)
   {
@@ -28693,7 +28680,7 @@ Dbdict::createHashMap_abortParse(Signal*
   {
     jam();
 
-    Ptr<HashMapRecord> hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, impl_req->objectId));
 
     release_object(hm_ptr.p->m_obj_ptr_i);
@@ -28719,9 +28706,9 @@ Dbdict::createHashMap_reply(Signal* sign
   D("createHashMap_reply");
 
   SchemaTransPtr& trans_ptr = op_ptr.p->m_trans_ptr;
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  const CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  const CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   if (!hasError(error)) {
     CreateHashMapConf* conf = (CreateHashMapConf*)signal->getDataPtrSend();
@@ -28758,9 +28745,9 @@ Dbdict::createHashMap_prepare(Signal* si
   jam();
   D("createHashMap_prepare");
 
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   if (impl_req->requestType & CreateHashMapReq::CreateIfNotExists)
   {
@@ -28781,8 +28768,8 @@ void
 Dbdict::createHashMap_writeObjConf(Signal* signal, Uint32 op_key, Uint32 ret)
 {
   SchemaOpPtr op_ptr;
-  CreateHashMapRecPtr createHashMapPtr;
-  findSchemaOp(op_ptr, createHashMapPtr, op_key);
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  findSchemaOp(op_ptr, createHashMapRecordPtr, op_key);
 
   ndbrequire(!op_ptr.isNull());
 
@@ -28797,8 +28784,8 @@ Dbdict::createHashMap_commit(Signal* sig
   jam();
   D("createHashMap_commit");
 
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
 
   sendTransConf(signal, op_ptr);
 }
@@ -28824,7 +28811,7 @@ Dbdict::createHashMap_abortPrepare(Signa
 
 void
 Dbdict::packHashMapIntoPages(SimpleProperties::Writer & w,
-                             Ptr<HashMapRecord> hm_ptr)
+                             HashMapRecordPtr hm_ptr)
 {
   DictHashMapInfo::HashMap hm; hm.init();
 
@@ -29132,7 +29119,7 @@ Dbdict::check_consistency_index(TableRec
     break;
   }
 
-  Ptr<TriggerRecord> triggerPtr;
+  TriggerRecordPtr triggerPtr;
   triggerPtr.i = indexPtr.p->triggerId;
   ndbrequire(triggerPtr.i != RNIL);
   c_triggerRecordPool.getPtr(triggerPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-10-07 18:15:59 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-11-03 17:22:01 +0000
@@ -217,9 +217,9 @@ public:
     Uint32 prevList;
     Uint32 nextHash;
     Uint32 prevHash;
- 
+
     Uint32 hashValue() const { return attributeName.hashValue();}
-    bool equal(const AttributeRecord& obj) const { 
+    bool equal(const AttributeRecord& obj) const {
       if(obj.hashValue() == hashValue()){
 	ConstRope r(* m_key.m_pool, obj.attributeName);
 	return r.compare(m_key.m_name_ptr, m_key.m_name_len) == 0;
@@ -228,14 +228,25 @@ public:
     }
   };
   typedef Ptr<AttributeRecord> AttributeRecordPtr;
-  ArrayPool<AttributeRecord> c_attributeRecordPool;
-  DLHashTable<AttributeRecord> c_attributeRecordHash;
+  typedef ArrayPool<AttributeRecord> AttributeRecord_pool;
+  typedef DLHashTable<AttributeRecord,AttributeRecord,AttributeRecord_pool> AttributeRecord_hash;
+  typedef DLFifoList<AttributeRecord,AttributeRecord,AttributeRecord_pool> AttributeRecord_list;
+  typedef LocalDLFifoList<AttributeRecord,AttributeRecord,AttributeRecord_pool> LocalAttributeRecord_list;
+
+  AttributeRecord_pool c_attributeRecordPool;
+  AttributeRecord_hash c_attributeRecordHash;
   RSS_AP_SNAPSHOT(c_attributeRecordPool);
 
   /**
    * Shared table / index record.  Most of this is permanent data stored
    * on disk.  Index trigger ids are volatile.
    */
+  struct TableRecord;
+  typedef Ptr<TableRecord> TableRecordPtr;
+  typedef ArrayPool<TableRecord> TableRecord_pool;
+  typedef DLFifoList<TableRecord,TableRecord,TableRecord_pool> TableRecord_list;
+  typedef LocalDLFifoList<TableRecord,TableRecord,TableRecord_pool> LocalTableRecord_list;
+
   struct TableRecord {
     TableRecord(){ m_upgrade_trigger_handling.m_upgrade = false;}
     Uint32 maxRowsLow;
@@ -344,7 +355,7 @@ public:
     bool isNonUniqueIndex() const;
     bool isHashIndex() const;
     bool isOrderedIndex() const;
-    
+
     /****************************************************
      *    Support variables for table handling
      ****************************************************/
@@ -353,7 +364,7 @@ public:
     Uint32 filePtr[2];
 
     /**    Pointer to first attribute in table */
-    DLFifoList<AttributeRecord>::Head m_attributes;
+    AttributeRecord_list::Head m_attributes;
 
     Uint32 nextPool;
 
@@ -390,9 +401,9 @@ public:
       Uint32 updateTriggerId;
       Uint32 deleteTriggerId;
     } m_upgrade_trigger_handling;
-    
+
     Uint32 noOfNullBits;
-    
+
     /**  frm data for this table */
     RopeHandle frmData;
     RopeHandle ngData;
@@ -402,7 +413,7 @@ public:
     Uint32 m_tablespace_id;
 
     /** List of indexes attached to table */
-    DLFifoList<TableRecord>::Head m_indexes;
+    TableRecord_list::Head m_indexes;
     Uint32 nextList, prevList;
 
     /*
@@ -423,8 +434,7 @@ public:
     Uint32 indexStatBgRequest;
   };
 
-  typedef Ptr<TableRecord> TableRecordPtr;
-  ArrayPool<TableRecord> c_tableRecordPool;
+  TableRecord_pool c_tableRecordPool;
   RSS_AP_SNAPSHOT(c_tableRecordPool);
 
   /**  Node Group and Tablespace id+version + range or list data.
@@ -447,7 +457,7 @@ public:
     TriggerRecord() {}
 
     /** Trigger state */
-    enum TriggerState { 
+    enum TriggerState {
       TS_NOT_DEFINED = 0,
       TS_DEFINING = 1,
       TS_OFFLINE  = 2,   // created globally in DICT
@@ -458,7 +468,7 @@ public:
     };
     TriggerState triggerState;
 
-    /** Trigger name, used by DICT to identify the trigger */ 
+    /** Trigger name, used by DICT to identify the trigger */
     RopeHandle triggerName;
 
     /** Trigger id, used by TRIX, TC, LQH, and TUP to identify the trigger */
@@ -486,10 +496,12 @@ public:
     /** Pointer to the next attribute used by ArrayPool */
     Uint32 nextPool;
   };
-  
-  Uint32 c_maxNoOfTriggers;
+
   typedef Ptr<TriggerRecord> TriggerRecordPtr;
-  ArrayPool<TriggerRecord> c_triggerRecordPool;
+  typedef ArrayPool<TriggerRecord> TriggerRecord_pool;
+
+  Uint32 c_maxNoOfTriggers;
+  TriggerRecord_pool c_triggerRecordPool;
   RSS_AP_SNAPSHOT(c_triggerRecordPool);
 
   /**
@@ -527,9 +539,10 @@ public:
     /** Used by Array Pool for free list handling */
     Uint32 nextPool;
   };
-  
+
   typedef Ptr<FsConnectRecord> FsConnectRecordPtr;
-  ArrayPool<FsConnectRecord> c_fsConnectRecordPool;
+  typedef ArrayPool<FsConnectRecord> FsConnectRecord_pool;
+  FsConnectRecord_pool c_fsConnectRecordPool;
 
   /**
    * This record stores all the information about a node and all its attributes
@@ -567,11 +580,11 @@ public:
   typedef Ptr<NodeRecord> NodeRecordPtr;
   CArray<NodeRecord> c_nodes;
   NdbNodeBitmask c_aliveNodes;
-  
+
   struct PageRecord {
     Uint32 word[8192];
   };
-  
+
   typedef Ptr<PageRecord> PageRecordPtr;
   CArray<PageRecord> c_pageRecordArray;
 
@@ -591,7 +604,7 @@ public:
 
   struct File {
     File() {}
-    
+
     Uint32 key;
     Uint32 m_magic;
     Uint32 m_version;
@@ -601,7 +614,7 @@ public:
     Uint64 m_file_size;
     Uint64 m_file_free;
     RopeHandle m_path;
-    
+
     Uint32 nextList;
     union {
       Uint32 prevList;
@@ -617,14 +630,14 @@ public:
   typedef DLListImpl<File_pool, File> File_list;
   typedef LocalDLListImpl<File_pool, File> Local_file_list;
   typedef KeyTableImpl<File_pool, File> File_hash;
-  
+
   struct Filegroup {
     Filegroup(){}
 
     Uint32 key;
     Uint32 m_obj_ptr_i;
     Uint32 m_magic;
-    
+
     Uint32 m_type;
     Uint32 m_version;
     RopeHandle m_name;
@@ -634,13 +647,13 @@ public:
 	Uint32 m_extent_size;
 	Uint32 m_default_logfile_group_id;
       } m_tablespace;
-      
+
       struct {
 	Uint32 m_undo_buffer_size;
 	File_list::HeadPOD m_files;
       } m_logfilegroup;
     };
-    
+
     union {
       Uint32 nextPool;
       Uint32 nextList;
@@ -654,12 +667,12 @@ public:
   typedef Ptr<Filegroup> FilegroupPtr;
   typedef RecordPool<Filegroup, RWPool> Filegroup_pool;
   typedef KeyTableImpl<Filegroup_pool, Filegroup> Filegroup_hash;
-  
+
   File_pool c_file_pool;
   Filegroup_pool c_filegroup_pool;
   File_hash c_file_hash;
   Filegroup_hash c_filegroup_hash;
-  
+
   RopePool c_rope_pool;
   RSS_AP_SNAPSHOT(c_rope_pool);
 
@@ -671,7 +684,7 @@ public:
     Uint32 m_id;
     Uint32 m_type;
     Uint32 m_ref_count;
-    RopeHandle m_name;  
+    RopeHandle m_name;
     union {
       struct {
 	Uint32 m_name_len;
@@ -683,9 +696,9 @@ public:
     };
     Uint32 nextHash;
     Uint32 prevHash;
-    
+
     Uint32 hashValue() const { return m_name.hashValue();}
-    bool equal(const DictObject& obj) const { 
+    bool equal(const DictObject& obj) const {
       if(obj.hashValue() == hashValue()){
 	ConstRope r(* m_key.m_pool, obj.m_name);
 	return r.compare(m_key.m_name_ptr, m_key.m_name_len) == 0;
@@ -702,20 +715,23 @@ public:
   };
 
   typedef Ptr<DictObject> DictObjectPtr;
-  
-  DLHashTable<DictObject> c_obj_hash; // Name
-  ArrayPool<DictObject> c_obj_pool;
+  typedef ArrayPool<DictObject> DictObject_pool;
+  typedef DLHashTable<DictObject,DictObject,DictObject_pool> DictObject_hash;
+  typedef SLList<DictObject> DictObject_list;
+
+  DictObject_hash c_obj_hash; // Name
+  DictObject_pool c_obj_pool;
   RSS_AP_SNAPSHOT(c_obj_pool);
-  
+
   // 1
   DictObject * get_object(const char * name){
     return get_object(name, Uint32(strlen(name) + 1));
   }
-  
+
   DictObject * get_object(const char * name, Uint32 len){
-    return get_object(name, len, Rope::hash(name, len));
+    return get_object(name, len, LocalRope::hash(name, len));
   }
-  
+
   DictObject * get_object(const char * name, Uint32 len, Uint32 hash);
 
   //2
@@ -724,7 +740,7 @@ public:
   }
 
   bool get_object(DictObjectPtr& obj_ptr, const char * name, Uint32 len){
-    return get_object(obj_ptr, name, len, Rope::hash(name, len));
+    return get_object(obj_ptr, name, len, LocalRope::hash(name, len));
   }
 
   bool get_object(DictObjectPtr&, const char* name, Uint32 len, Uint32 hash);
@@ -732,7 +748,7 @@ public:
   void release_object(Uint32 obj_ptr_i){
     release_object(obj_ptr_i, c_obj_pool.getPtr(obj_ptr_i));
   }
-  
+
   void release_object(Uint32 obj_ptr_i, DictObject* obj_ptr_p);
 
   void increase_ref_count(Uint32 obj_ptr_i);
@@ -747,7 +763,7 @@ private:
 
   // Signal receivers
   void execDICTSTARTREQ(Signal* signal);
-  
+
   void execGET_TABINFOREQ(Signal* signal);
   void execGET_TABLEDID_REQ(Signal* signal);
   void execGET_TABINFO_REF(Signal* signal);
@@ -869,12 +885,12 @@ private:
   void execDROP_TRIG_IMPL_REF(Signal* signal);
 
   void execDROP_TABLE_REQ(Signal* signal);
-  
+
   void execPREP_DROP_TAB_REQ(Signal* signal);
-  void execPREP_DROP_TAB_REF(Signal* signal);  
+  void execPREP_DROP_TAB_REF(Signal* signal);
   void execPREP_DROP_TAB_CONF(Signal* signal);
 
-  void execDROP_TAB_REF(Signal* signal);  
+  void execDROP_TAB_REF(Signal* signal);
   void execDROP_TAB_CONF(Signal* signal);
 
   void execCREATE_TABLE_REQ(Signal* signal);
@@ -894,7 +910,7 @@ private:
   void execLQHADDATTREF(Signal* signal);
   void execLQHADDATTCONF(Signal* signal);
   void execCREATE_TAB_REF(Signal* signal);
-  void execCREATE_TAB_CONF(Signal* signal);  
+  void execCREATE_TAB_CONF(Signal* signal);
   void execALTER_TAB_REF(Signal* signal);
   void execALTER_TAB_CONF(Signal* signal);
   void execALTER_TABLE_REF(Signal* signal);
@@ -949,7 +965,7 @@ private:
    */
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when the schema page is being sent to other nodes
    ***************************************************************************/
   struct SendSchemaRecord {
@@ -960,7 +976,7 @@ private:
 
     Uint32 nodeId;
     SignalCounter m_SCHEMAINFO_Counter;
-    
+
     Uint32 noOfWordsCurrentlySent;
     Uint32 noOfSignalsSentSinceDelay;
 
@@ -969,7 +985,7 @@ private:
   SendSchemaRecord c_sendSchemaRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a table file is being read from disk
    ****************************************************************************/
   struct ReadTableRecord {
@@ -979,14 +995,14 @@ private:
     Uint32 pageId;
     /** Table Id of read table */
     Uint32 tableId;
-    
+
     bool inUse;
     Callback m_callback;
   };
   ReadTableRecord c_readTableRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a table file is being written to disk
    ****************************************************************************/
   struct WriteTableRecord {
@@ -1013,7 +1029,7 @@ private:
   WriteTableRecord c_writeTableRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a schema file is being read from disk
    ****************************************************************************/
   struct ReadSchemaRecord {
@@ -1034,7 +1050,7 @@ private:
   ReadSchemaRecord c_readSchemaRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a schema file is being written to disk
    ****************************************************************************/
   struct WriteSchemaRecord {
@@ -1055,7 +1071,7 @@ private:
   WriteSchemaRecord c_writeSchemaRecord;
 
   /**
-   * This record stores all the information needed 
+   * This record stores all the information needed
    * when a file is being read from disk
    ****************************************************************************/
   struct RestartRecord {
@@ -1081,20 +1097,20 @@ private:
   RestartRecord c_restartRecord;
 
   /**
-   * This record stores all the information needed 
+   * This record stores all the information needed
    * when a file is being read from disk
    ****************************************************************************/
   struct RetrieveRecord {
     RetrieveRecord(){ noOfWaiters = 0;}
-    
+
     /**    Only one retrieve table definition at a time       */
     bool busyState;
-    
+
     /**
      * No of waiting in time queue
      */
     Uint32 noOfWaiters;
-    
+
     /**    Block Reference of retriever       */
     BlockReference blockRef;
 
@@ -1129,9 +1145,9 @@ private:
   RetrieveRecord c_retrieveRecord;
 
   /**
-   * This record stores all the information needed 
+   * This record stores all the information needed
    * when a file is being read from disk
-   * 
+   *
    * This is the info stored in one entry of the schema
    * page. Each table has 4 words of info.
    * Word 1: Schema version (upper 16 bits)
@@ -1152,7 +1168,7 @@ private:
 
     /**    Old Schema file first page (used at node restart)    */
     Uint32 oldSchemaPage;
-    
+
     Callback m_callback;
   };
   SchemaRecord c_schemaRecord;
@@ -1198,7 +1214,7 @@ private:
   /* ----------------------------------------------------------------------- */
 
   struct PackTable {
-    
+
     enum PackTableState {
       PTS_IDLE = 0,
       PTS_GET_TAB = 3
@@ -1222,7 +1238,7 @@ private:
     DictTabInfo::RequestType requestType;
     Uint32 errorCode;
     Uint32 errorLine;
-    
+
     SimpleProperties::UnpackStatus status;
     Uint32 errorKey;
     TableRecordPtr tablePtr;
@@ -1235,7 +1251,7 @@ private:
   copyRope(RopeHandle& rh_dst, const RopeHandle& rh_src)
   {
     char buf[sz];
-    Rope r_dst(c_rope_pool, rh_dst);
+    LocalRope r_dst(c_rope_pool, rh_dst);
     ConstRope r_src(c_rope_pool, rh_src);
     ndbrequire(r_src.size() <= sz);
     r_src.copy(buf);
@@ -1256,7 +1272,7 @@ private:
     return str;
   }
 #endif
- 
+
   // Operation records
 
   /**
@@ -1318,7 +1334,7 @@ private:
                 Uint32 key = 0,
                 const char * name = 0);
 
-  void setError(ErrorInfo&, 
+  void setError(ErrorInfo&,
                 Uint32 code,
                 Uint32 line,
                 const char * name);
@@ -1504,7 +1520,7 @@ private:
       case OS_ABORTING_PARSE:
         return 4;
       //case OS_ABORTED_PARSE    = 9,  // Not used, op released
-        //return 3: 
+        //return 3:
       case OS_COMMITTING:
         return 10;
       case OS_COMMITTED:
@@ -1596,9 +1612,9 @@ private:
   };
 
   typedef RecordPool<SchemaOp,ArenaPool> SchemaOp_pool;
-  typedef LocalDLFifoList<SchemaOp,SchemaOp,SchemaOp_pool> LocalSchemaOp_list;
   typedef DLHashTable<SchemaOp,SchemaOp,SchemaOp_pool> SchemaOp_hash;
   typedef DLFifoList<SchemaOp,SchemaOp,SchemaOp_pool>::Head  SchemaOp_head;
+  typedef LocalDLFifoList<SchemaOp,SchemaOp,SchemaOp_pool> LocalSchemaOp_list;
 
   SchemaOp_pool c_schemaOpPool;
   SchemaOp_hash c_schemaOpHash;
@@ -1681,7 +1697,7 @@ private:
     /*
       Store node id in high 8 bits to make op_key globally unique
      */
-    Uint32 op_key = 
+    Uint32 op_key =
       (getOwnNodeId() << 24) +
       ((c_opRecordSequence + 1) & 0x00FFFFFF);
     if (seizeSchemaOp<T>(trans_ptr, op_ptr, op_key, linked)) {
@@ -1959,9 +1975,12 @@ private:
   Uint32 check_write_obj(Uint32, Uint32, SchemaFile::EntryState, ErrorInfo&);
 
   typedef RecordPool<SchemaTrans,ArenaPool> SchemaTrans_pool;
+  typedef DLHashTable<SchemaTrans,SchemaTrans,SchemaTrans_pool> SchemaTrans_hash;
+  typedef DLFifoList<SchemaTrans,SchemaTrans,SchemaTrans_pool> SchemaTrans_list;
+
   SchemaTrans_pool c_schemaTransPool;
-  DLHashTable<SchemaTrans,SchemaTrans,SchemaTrans_pool> c_schemaTransHash;
-  DLFifoList<SchemaTrans,SchemaTrans,SchemaTrans_pool> c_schemaTransList;
+  SchemaTrans_hash c_schemaTransHash;
+  SchemaTrans_list c_schemaTransList;
   Uint32 c_schemaTransCount;
 
   bool seizeSchemaTrans(SchemaTransPtr&, Uint32 trans_key);
@@ -2226,8 +2245,11 @@ private:
 #endif
   };
 
-  ArrayPool<TxHandle> c_txHandlePool;
-  DLHashTable<TxHandle> c_txHandleHash;
+  typedef ArrayPool<TxHandle> TxHandle_pool;
+  typedef DLHashTable<TxHandle,TxHandle,TxHandle_pool> TxHandle_hash;
+
+  TxHandle_pool c_txHandlePool;
+  TxHandle_hash c_txHandleHash;
 
   bool seizeTxHandle(TxHandlePtr&);
   bool findTxHandle(TxHandlePtr&, Uint32 tx_key);
@@ -2932,12 +2954,12 @@ private:
     bool equal(const HashMapRecord& obj) const { return key == obj.key;}
 
   };
-  typedef Ptr<HashMapRecord> HashMapPtr;
-  typedef ArrayPool<HashMapRecord> HashMap_pool;
-  typedef KeyTableImpl<HashMap_pool, HashMapRecord> HashMap_hash;
+  typedef Ptr<HashMapRecord> HashMapRecordPtr;
+  typedef ArrayPool<HashMapRecord> HashMapRecord_pool;
+  typedef KeyTableImpl<HashMapRecord_pool, HashMapRecord> HashMapRecord_hash;
 
-  HashMap_pool c_hash_map_pool;
-  HashMap_hash c_hash_map_hash;
+  HashMapRecord_pool c_hash_map_pool;
+  HashMapRecord_hash c_hash_map_hash;
   RSS_AP_SNAPSHOT(c_hash_map_pool);
   RSS_AP_SNAPSHOT(g_hash_map);
 
@@ -3612,18 +3634,23 @@ private:
     Uint32 u_opSignalUtil   [PTR_ALIGN(opSignalUtilSize)];
     Uint32 nextPool;
   };
-  ArrayPool<OpRecordUnion> c_opRecordPool;
-  
+  typedef ArrayPool<OpRecordUnion> OpRecordUnion_pool;
+  OpRecordUnion_pool c_opRecordPool;
+
   // Operation records
-  KeyTable2C<OpCreateEvent, OpRecordUnion> c_opCreateEvent;
-  KeyTable2C<OpSubEvent, OpRecordUnion> c_opSubEvent;
-  KeyTable2C<OpDropEvent, OpRecordUnion> c_opDropEvent;
-  KeyTable2C<OpSignalUtil, OpRecordUnion> c_opSignalUtil;
+  typedef KeyTable2C<OpCreateEvent, OpRecordUnion> OpCreateEvent_pool;
+  typedef KeyTable2C<OpSubEvent, OpRecordUnion> OpSubEvent_pool;
+  typedef KeyTable2C<OpDropEvent, OpRecordUnion> OpDropEvent_pool;
+  typedef KeyTable2C<OpSignalUtil, OpRecordUnion> OpSignalUtil_pool;
+  OpCreateEvent_pool c_opCreateEvent;
+  OpSubEvent_pool c_opSubEvent;
+  OpDropEvent_pool c_opDropEvent;
+  OpSignalUtil_pool c_opSignalUtil;
 
   // Unique key for operation  XXX move to some system table
   Uint32 c_opRecordSequence;
 
-  void handleNdbdFailureCallback(Signal* signal, 
+  void handleNdbdFailureCallback(Signal* signal,
                                  Uint32 failedNodeId,
                                  Uint32 ignoredRc);
   void handleApiFailureCallback(Signal* signal,
@@ -3637,17 +3664,17 @@ private:
   void sendSTTORRY(Signal* signal);
   void sendNDB_STTORRY(Signal* signal);
   void initSchemaFile(Signal* signal);
-  
+
   /* ------------------------------------------------------------ */
   // Drop Table Handling
   /* ------------------------------------------------------------ */
   void releaseTableObject(Uint32 tableId, bool removeFromHash = true);
-  
+
   /* ------------------------------------------------------------ */
   // General Stuff
   /* ------------------------------------------------------------ */
   Uint32 getFreeObjId(Uint32 minId, bool both = false);
-  Uint32 getFreeTableRecord(Uint32 primaryTableId);
+  Uint32 getFreeTableRecord();
   Uint32 getFreeTriggerRecord();
   bool getNewAttributeRecord(TableRecordPtr tablePtr,
 			     AttributeRecordPtr & attrPtr);
@@ -3658,13 +3685,13 @@ private:
 			      const Uint32 undo_free_hi,
 			      const Uint32 undo_free_lo);
   void packFileIntoPages(SimpleProperties::Writer &, FilePtr, const Uint32);
-  
+
   void sendGET_TABINFOREQ(Signal* signal,
                           Uint32 tableId);
   void sendTC_SCHVERREQ(Signal* signal,
                         Uint32 tableId,
                         BlockReference tcRef);
-  
+
   /* ------------------------------------------------------------ */
   // System Restart Handling
   /* ------------------------------------------------------------ */
@@ -3672,7 +3699,7 @@ private:
   void sendSchemaData(Signal* signal);
   Uint32 sendSCHEMA_INFO(Signal* signal, Uint32 nodeId, Uint32* pagePointer);
   void sendDIHSTARTTAB_REQ(Signal* signal);
-  
+
   /* ------------------------------------------------------------ */
   // Receive Table Handling
   /* ------------------------------------------------------------ */
@@ -3682,21 +3709,21 @@ private:
 			 bool checkExist = true);
   void handleTabInfo(SimpleProperties::Reader & it, ParseDictTabInfoRecord *,
 		     DictTabInfo::Table & tableDesc);
-  
+
   void handleAddTableFailure(Signal* signal,
                              Uint32 failureLine,
                              Uint32 tableId);
   bool verifyTableCorrect(Signal* signal, Uint32 tableId);
-  
+
   /* ------------------------------------------------------------ */
   // Add Fragment Handling
   /* ------------------------------------------------------------ */
   void sendLQHADDATTRREQ(Signal*, SchemaOpPtr, Uint32 attributePtrI);
-  
+
   /* ------------------------------------------------------------ */
   // Read/Write Schema and Table files
   /* ------------------------------------------------------------ */
-  void updateSchemaState(Signal* signal, Uint32 tableId, 
+  void updateSchemaState(Signal* signal, Uint32 tableId,
 			 SchemaFile::TableEntry*, Callback*,
                          bool savetodisk = 1, bool dicttrans = 0);
   void startWriteSchemaFile(Signal* signal);
@@ -3712,13 +3739,13 @@ private:
   void closeWriteSchemaConf(Signal* signal,
                                FsConnectRecordPtr fsPtr);
   void initSchemaFile_conf(Signal* signal, Uint32 i, Uint32 returnCode);
-  
-  void writeTableFile(Signal* signal, Uint32 tableId, 
+
+  void writeTableFile(Signal* signal, Uint32 tableId,
 		      SegmentedSectionPtr tabInfo, Callback*);
   void writeTableFile(Signal* signal, SchemaOpPtr op_ptr, Uint32 tableId,
 		      OpSection opSection, Callback*);
   void startWriteTableFile(Signal* signal, Uint32 tableId);
-  void openTableFile(Signal* signal, 
+  void openTableFile(Signal* signal,
                      Uint32 fileNo,
                      Uint32 fsPtr,
                      Uint32 tableId,
@@ -3754,12 +3781,12 @@ private:
   /* ------------------------------------------------------------ */
   // Get table definitions
   /* ------------------------------------------------------------ */
-  void sendGET_TABINFOREF(Signal* signal, 
+  void sendGET_TABINFOREF(Signal* signal,
 			  GetTabInfoReq*,
 			  GetTabInfoRef::ErrorCode errorCode,
                           Uint32 errorLine);
 
-  void sendGET_TABLEID_REF(Signal* signal, 
+  void sendGET_TABLEID_REF(Signal* signal,
 			   GetTableIdReq * req,
 			   GetTableIdRef::ErrorCode errorCode);
 
@@ -3780,19 +3807,19 @@ private:
   void rebuildIndex_fromEndTrans(Signal*, Uint32 tx_key, Uint32 ret);
 
   // Events
-  void 
+  void
   createEventUTIL_PREPARE(Signal* signal,
 			  Uint32 callbackData,
 			  Uint32 returnCode);
-  void 
-  createEventUTIL_EXECUTE(Signal *signal, 
+  void
+  createEventUTIL_EXECUTE(Signal *signal,
 			  Uint32 callbackData,
 			  Uint32 returnCode);
-  void 
+  void
   dropEventUTIL_PREPARE_READ(Signal* signal,
 			     Uint32 callbackData,
 			     Uint32 returnCode);
-  void 
+  void
   dropEventUTIL_EXECUTE_READ(Signal* signal,
 			     Uint32 callbackData,
 			     Uint32 returnCode);
@@ -3800,8 +3827,8 @@ private:
   dropEventUTIL_PREPARE_DELETE(Signal* signal,
 			       Uint32 callbackData,
 			       Uint32 returnCode);
-  void 
-  dropEventUTIL_EXECUTE_DELETE(Signal *signal, 
+  void
+  dropEventUTIL_EXECUTE_DELETE(Signal *signal,
 			       Uint32 callbackData,
 			       Uint32 returnCode);
   void
@@ -3814,10 +3841,10 @@ private:
 			  Uint32 returnCode);
   int
   sendSignalUtilReq(Callback *c,
-		    BlockReference ref, 
-		    GlobalSignalNumber gsn, 
-		    Signal* signal, 
-		    Uint32 length, 
+		    BlockReference ref,
+		    GlobalSignalNumber gsn,
+		    Signal* signal,
+		    Uint32 length,
 		    JobBufferLevel jbuf,
 		    LinearSectionPtr ptr[3],
 		    Uint32 noOfSections);
@@ -3827,7 +3854,7 @@ private:
   void completeSubStartReq(Signal* signal, Uint32 ptrI,	Uint32 returnCode);
   void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
   void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
-  
+
   void dropEvent_sendReply(Signal* signal,
 			   OpDropEventPtr evntRecPtr);
 
@@ -3867,7 +3894,7 @@ private:
 				 const Uint32 prepareId,
 				 UtilPrepareReq::OperationTypeValue prepReq);
   void executeTransaction(Callback *c,
-			  Signal* signal, 
+			  Signal* signal,
 			  Uint32 senderData,
 			  Uint32 prepareId,
 			  Uint32 noAttr,
@@ -3878,7 +3905,7 @@ private:
   bool upgrade_suma_NotStarted(Uint32 err, Uint32 ref) const;
 
   // support
-  void getTableKeyList(TableRecordPtr, 
+  void getTableKeyList(TableRecordPtr,
 		       Id_array<MAX_ATTRIBUTES_IN_INDEX+1>& list);
   void getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id);
   void getIndexAttrList(TableRecordPtr indexPtr, IndexAttributeList& list);
@@ -3938,7 +3965,7 @@ private:
 public:
   void send_drop_file(Signal*, Uint32, Uint32, DropFileImplReq::RequestInfo);
   void send_drop_fg(Signal*, Uint32, Uint32, DropFilegroupImplReq::RequestInfo);
-  
+
   int checkSingleUserMode(Uint32 senderRef);
 
   friend NdbOut& operator<<(NdbOut& out, const ErrorInfo&);
@@ -3969,7 +3996,7 @@ public:
    */
   struct DictLockType;
   friend struct DictLockType;
-  
+
   struct DictLockType {
     DictLockReq::LockType lockType;
     const char* text;
@@ -3981,7 +4008,7 @@ public:
 
   Uint32 dict_lock_trylock(const DictLockReq* req);
   Uint32 dict_lock_unlock(Signal* signal, const DictLockReq* req);
-  
+
   LockQueue::Pool m_dict_lock_pool;
   LockQueue m_dict_lock;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-23 08:38:06 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-11-03 17:22:01 +0000
@@ -2372,6 +2372,17 @@ void Dbdih::execSTART_PERMREQ(Signal* si
   CRASH_INSERTION(7122);
   ndbrequire(isMaster());
   ndbrequire(refToNode(retRef) == nodeId);
+  if (c_lcpMasterTakeOverState.state != LMTOS_IDLE)
+  {
+    jam();
+    infoEvent("DIH : Denied request for start permission from %u "
+              "while LCP Master takeover in progress.",
+              nodeId);
+    signal->theData[0] = nodeId;
+    signal->theData[1] = StartPermRef::ZNODE_START_DISALLOWED_ERROR;
+    sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
+    return;
+  }
   if ((c_nodeStartMaster.activeState) ||
       (c_nodeStartMaster.wait != ZFALSE) ||
       ERROR_INSERTED_CLEAR(7175)) {

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-11-03 17:22:01 +0000
@@ -18,6 +18,7 @@
 #ifndef DBLQH_H
 #define DBLQH_H
 
+#ifndef DBLQH_STATE_EXTRACT
 #include <pc.hpp>
 #include <ndb_limits.h>
 #include <SimulatedBlock.hpp>
@@ -41,6 +42,7 @@
 class Dbacc;
 class Dbtup;
 class Lgman;
+#endif // DBLQH_STATE_EXTRACT
 
 #ifdef DBLQH_C
 // Constants
@@ -410,10 +412,15 @@ class Lgman;
  *  - TEST 
  *  - LOG 
  */
-class Dblqh: public SimulatedBlock {
+class Dblqh 
+#ifndef DBLQH_STATE_EXTRACT
+  : public SimulatedBlock
+#endif
+{
   friend class DblqhProxy;
 
 public:
+#ifndef DBLQH_STATE_EXTRACT
   enum LcpCloseState {
     LCP_IDLE = 0,
     LCP_RUNNING = 1,       // LCP is running
@@ -1940,7 +1947,7 @@ public:
     Uint32 usageCountW; // writers
   }; // Size 100 bytes
   typedef Ptr<Tablerec> TablerecPtr;
-
+#endif // DBLQH_STATE_EXTRACT
   struct TcConnectionrec {
     enum ListState {
       NOT_IN_LIST = 0,
@@ -2021,6 +2028,7 @@ public:
       COPY_CONNECTED = 2,
       LOG_CONNECTED = 3
     };
+#ifndef DBLQH_STATE_EXTRACT
     ConnectState connectState;
     UintR copyCountWords;
     Uint32 keyInfoIVal;
@@ -2131,8 +2139,10 @@ public:
       Uint32 m_page_id[2];
       Local_key m_disk_ref[2];
     } m_nr_delete;
+#endif // DBLQH_STATE_EXTRACT
   }; /* p2c: size = 280 bytes */
-  
+
+#ifndef DBLQH_STATE_EXTRACT
   typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
 
   struct TcNodeFailRecord {
@@ -3278,8 +3288,9 @@ public:
 
   void sendFireTrigConfTc(Signal* signal, BlockReference ref, Uint32 Tdata[]);
   bool check_fire_trig_pass(Uint32 op, Uint32 pass);
+#endif
 };
-
+#ifndef DBLQH_STATE_EXTRACT
 inline
 bool
 Dblqh::ScanRecord::check_scan_batch_completed() const
@@ -3402,5 +3413,5 @@ Dblqh::TRACE_OP_CHECK(const TcConnection
 	   regTcPtr->operation == ZDELETE)) ||
     ERROR_INSERTED(5713);
 }
-
+#endif
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-11-03 17:22:01 +0000
@@ -13788,6 +13788,15 @@ void Dblqh::execBACKUP_FRAGMENT_REF(Sign
 void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal) 
 {
   jamEntry();
+
+  if (ERROR_INSERTED(5073))
+  {
+    ndbout_c("Delaying BACKUP_FRAGMENT_CONF");
+    sendSignalWithDelay(reference(), GSN_BACKUP_FRAGMENT_CONF, signal, 500,
+                        signal->getLength());
+    return;
+  }
+
   //BackupFragmentConf* conf= (BackupFragmentConf*)signal->getDataPtr();
 
   lcpPtr.i = 0;

=== added file 'storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp	2011-10-28 09:56:57 +0000
@@ -0,0 +1,76 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <kernel/statedesc.hpp>
+#define DBLQH_STATE_EXTRACT
+#include "Dblqh.hpp"
+
+#define SDESC(a,b,c) { (unsigned)Dblqh::TcConnectionrec::a, #a, b, c }
+
+struct ndbkernel_state_desc g_dblqh_tcconnect_state_desc[] =
+{
+  SDESC(IDLE, "Idle", ""),
+  SDESC(WAIT_ACC, "WaitLock", ""),
+  SDESC(WAIT_TUPKEYINFO, "", ""),
+  SDESC(WAIT_ATTR, "WaitData", ""),
+  SDESC(WAIT_TUP, "WaitTup", ""),
+  SDESC(STOPPED, "Stopped", ""),
+  SDESC(LOG_QUEUED, "LogPrepare", ""),
+  SDESC(PREPARED, "Prepared", ""),
+  SDESC(LOG_COMMIT_WRITTEN_WAIT_SIGNAL, "", ""),
+  SDESC(LOG_COMMIT_QUEUED_WAIT_SIGNAL, "", ""),
+
+  // Commit in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(COMMIT_STOPPED, "CommittingStopped", ""),
+  SDESC(LOG_COMMIT_QUEUED, "Committing", ""),
+  SDESC(COMMIT_QUEUED, "Committing", ""),
+  SDESC(COMMITTED, "Committed", ""),
+  SDESC(WAIT_TUP_COMMIT, "Committing", ""),
+
+  /* -------------------------------------------------------------------- */
+  // Abort in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(WAIT_ACC_ABORT, "Aborting", ""),
+  SDESC(ABORT_QUEUED, "Aborting", ""),
+  SDESC(ABORT_STOPPED, "AbortingStopped", ""),
+  SDESC(WAIT_AI_AFTER_ABORT, "Aborting", ""),
+  SDESC(LOG_ABORT_QUEUED, "Aborting", ""),
+  SDESC(WAIT_TUP_TO_ABORT, "Aborting", ""),
+
+  /* -------------------------------------------------------------------- */
+  // Scan in progress states
+  /* -------------------------------------------------------------------- */
+  SDESC(WAIT_SCAN_AI, "Scanning", ""),
+  SDESC(SCAN_STATE_USED, "Scanning", ""),
+  SDESC(SCAN_FIRST_STOPPED, "Scanning", ""),
+  SDESC(SCAN_CHECK_STOPPED, "Scanning", ""),
+  SDESC(SCAN_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_RELEASE_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_CLOSE_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_CLOSE_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_FIRST_STOPPED, "ScanningStopped", ""),
+  SDESC(COPY_STOPPED, "ScanningStopped", ""),
+  SDESC(SCAN_TUPKEY, "Scanning", ""),
+  SDESC(COPY_TUPKEY, "NodeRecoveryScanning", ""),
+
+  SDESC(TC_NOT_CONNECTED, "Idle", ""),
+  SDESC(PREPARED_RECEIVED_COMMIT, "Committing", ""),
+  SDESC(LOG_COMMIT_WRITTEN, "Committing", ""),
+
+  { 0, 0, 0, 0 }
+};

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-23 08:38:06 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-11-03 17:22:01 +0000
@@ -2705,6 +2705,23 @@ Dbspj::lookup_build(Build_context& ctx,
   const QN_LookupParameters * param = (const QN_LookupParameters*)qp;
   do
   {
+    err = DbspjErr::InvalidTreeNodeSpecification;
+    if (unlikely(node->len < QN_LookupNode::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
+    err = DbspjErr::InvalidTreeParametersSpecification;
+    DEBUG("param len: " << param->len);
+    if (unlikely(param->len < QN_LookupParameters::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
     err = createNode(ctx, requestPtr, treeNodePtr);
     if (unlikely(err != 0))
     {
@@ -2756,13 +2773,6 @@ Dbspj::lookup_build(Build_context& ctx,
       dst->requestInfo = requestInfo;
     }
 
-    err = DbspjErr::InvalidTreeNodeSpecification;
-    if (unlikely(node->len < QN_LookupNode::NodeSize))
-    {
-      DEBUG_CRASH();
-      break;
-    }
-
     if (treeBits & QN_LookupNode::L_UNIQUE_INDEX)
     {
       jam();
@@ -2775,14 +2785,6 @@ Dbspj::lookup_build(Build_context& ctx,
     Uint32 tableSchemaVersion = tableId + ((schemaVersion << 16) & 0xFFFF0000);
     dst->tableSchemaVersion = tableSchemaVersion;
 
-    err = DbspjErr::InvalidTreeParametersSpecification;
-    DEBUG("param len: " << param->len);
-    if (unlikely(param->len < QN_LookupParameters::NodeSize))
-    {
-      DEBUG_CRASH();
-      break;
-    }
-
     ctx.m_resultData = param->resultData;
     treeNodePtr.p->m_lookup_data.m_api_resultRef = ctx.m_resultRef;
     treeNodePtr.p->m_lookup_data.m_api_resultData = param->resultData;
@@ -3765,6 +3767,24 @@ Dbspj::scanFrag_build(Build_context& ctx
 
   do
   {
+    err = DbspjErr::InvalidTreeNodeSpecification;
+    DEBUG("scanFrag_build: len=" << node->len);
+    if (unlikely(node->len < QN_ScanFragNode::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
+    err = DbspjErr::InvalidTreeParametersSpecification;
+    DEBUG("param len: " << param->len);
+    if (unlikely(param->len < QN_ScanFragParameters::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
     err = createNode(ctx, requestPtr, treeNodePtr);
     if (unlikely(err != 0))
       break;
@@ -3810,24 +3830,9 @@ Dbspj::scanFrag_build(Build_context& ctx
                                (treeBits & DABits::NI_LINKED_DISK) == 0 &&
                                (paramBits & DABits::PI_DISK_ATTR) == 0);
     dst->requestInfo = requestInfo;
-
-    err = DbspjErr::InvalidTreeNodeSpecification;
-    DEBUG("scanFrag_build: len=" << node->len);
-    if (unlikely(node->len < QN_ScanFragNode::NodeSize))
-      break;
-
     dst->tableId = node->tableId;
     dst->schemaVersion = node->tableVersion;
 
-    err = DbspjErr::InvalidTreeParametersSpecification;
-    DEBUG("param len: " << param->len);
-    if (unlikely(param->len < QN_ScanFragParameters::NodeSize))
-    {
-      jam();
-      DEBUG_CRASH();
-      break;
-    }
-
     ctx.m_resultData = param->resultData;
 
     /**
@@ -4315,6 +4320,24 @@ Dbspj::scanIndex_build(Build_context& ct
 
   do
   {
+    err = DbspjErr::InvalidTreeNodeSpecification;
+    DEBUG("scanIndex_build: len=" << node->len);
+    if (unlikely(node->len < QN_ScanIndexNode::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
+    err = DbspjErr::InvalidTreeParametersSpecification;
+    DEBUG("param len: " << param->len);
+    if (unlikely(param->len < QN_ScanIndexParameters::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
     err = createNode(ctx, requestPtr, treeNodePtr);
     if (unlikely(err != 0))
       break;
@@ -4355,24 +4378,9 @@ Dbspj::scanIndex_build(Build_context& ct
                                (paramBits & DABits::PI_DISK_ATTR) == 0);
     ScanFragReq::setCorrFactorFlag(requestInfo, 1);
     dst->requestInfo = requestInfo;
-
-    err = DbspjErr::InvalidTreeNodeSpecification;
-    DEBUG("scanIndex_build: len=" << node->len);
-    if (unlikely(node->len < QN_ScanIndexNode::NodeSize))
-      break;
-
     dst->tableId = node->tableId;
     dst->schemaVersion = node->tableVersion;
 
-    err = DbspjErr::InvalidTreeParametersSpecification;
-    DEBUG("param len: " << param->len);
-    if (unlikely(param->len < QN_ScanIndexParameters::NodeSize))
-    {
-      jam();
-      DEBUG_CRASH();
-      break;
-    }
-
     ctx.m_resultData = param->resultData;
 
     /**
@@ -5735,7 +5743,7 @@ Dbspj::scanIndex_execSCAN_NEXTREQ(Signal
 
       DEBUG("scanIndex_execSCAN_NEXTREQ to: " << hex
             << treeNodePtr.p->m_send.m_ref
-              << ", m_node_no=" << treeNodePtr.p->m_node_no
+            << ", m_node_no=" << treeNodePtr.p->m_node_no
             << ", senderData: " << req->senderData);
 
 #ifdef DEBUG_SCAN_FRAGREQ

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-11-03 17:22:01 +0000
@@ -18,6 +18,7 @@
 #ifndef DBTC_H
 #define DBTC_H
 
+#ifndef DBTC_STATE_EXTRACT
 #include <ndb_limits.h>
 #include <pc.hpp>
 #include <SimulatedBlock.hpp>
@@ -37,6 +38,7 @@
 #include <signaldata/EventReport.hpp>
 #include <trigger_definitions.h>
 #include <SignalCounter.hpp>
+#endif
 
 #ifdef DBTC_C
 /*
@@ -143,14 +145,20 @@
 #define ZTRANS_TOO_BIG 261
 #endif
 
-class Dbtc: public SimulatedBlock {
+class Dbtc
+#ifndef DBTC_STATE_EXTRACT
+  : public SimulatedBlock
+#endif
+{
 public:
 
+#ifndef DBTC_STATE_EXTRACT
   /**
    * Incase of mt-TC...only one instance will perform actual take-over
    *   let this be TAKE_OVER_INSTANCE
    */
   STATIC_CONST( TAKE_OVER_INSTANCE = 1 );
+#endif
 
   enum ConnectionState {
     CS_CONNECTED = 0,
@@ -188,6 +196,7 @@ public:
     CS_WAIT_FIRE_TRIG_REQ = 27
   };
 
+#ifndef DBTC_STATE_EXTRACT
   enum OperationState {
     OS_CONNECTED = 1,
     OS_OPERATING = 2,
@@ -2103,6 +2112,7 @@ private:
 #endif
   Uint32 m_deferred_enabled;
   Uint32 m_max_writes_per_trans;
+#endif
 };
 
 #endif

=== added file 'storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp	2011-10-28 09:56:57 +0000
@@ -0,0 +1,59 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <kernel/statedesc.hpp>
+#define DBTC_STATE_EXTRACT
+#include "Dbtc.hpp"
+
+#define SDESC(a,b,c) { (unsigned)Dbtc::a, #a, b, c }
+
+/**
+ * Value
+ * Friendly name
+ * Description
+ */
+struct ndbkernel_state_desc g_dbtc_apiconnect_state_desc[] =
+{
+  SDESC(CS_CONNECTED, "Connected",
+        "An allocated idle transaction object"),
+  SDESC(CS_DISCONNECTED, "Disconnected",
+        "An unallocated connection object"),
+  SDESC(CS_STARTED, "Started", "A started transaction"),
+  SDESC(CS_RECEIVING, "Receiving", "A transaction receiving operations"),
+  SDESC(CS_RESTART, "", ""),
+  SDESC(CS_ABORTING, "Aborting", "A transaction aborting"),
+  SDESC(CS_COMPLETING, "Completing", "A transaction completing"),
+  SDESC(CS_COMPLETE_SENT, "Completing", "A transaction completing"),
+  SDESC(CS_PREPARE_TO_COMMIT, "", ""),
+  SDESC(CS_COMMIT_SENT, "Committing", "A transaction committing"),
+  SDESC(CS_START_COMMITTING, "", ""),
+  SDESC(CS_COMMITTING, "Committing", "A transaction committing"),
+  SDESC(CS_REC_COMMITTING, "", ""),
+  SDESC(CS_WAIT_ABORT_CONF, "Aborting", ""),
+  SDESC(CS_WAIT_COMPLETE_CONF, "Completing", ""),
+  SDESC(CS_WAIT_COMMIT_CONF, "Committing", ""),
+  SDESC(CS_FAIL_ABORTING, "TakeOverAborting", ""),
+  SDESC(CS_FAIL_ABORTED, "TakeOverAborting", ""),
+  SDESC(CS_FAIL_PREPARED, "", ""),
+  SDESC(CS_FAIL_COMMITTING, "TakeOverCommitting", ""),
+  SDESC(CS_FAIL_COMMITTED, "TakeOverCommitting", ""),
+  SDESC(CS_FAIL_COMPLETED, "TakeOverCompleting", ""),
+  SDESC(CS_START_SCAN, "Scanning", ""),
+  SDESC(CS_SEND_FIRE_TRIG_REQ, "Precomitting", ""),
+  SDESC(CS_WAIT_FIRE_TRIG_REQ, "Precomitting", ""),
+  { 0, 0, 0, 0 }
+};

=== modified file 'storage/ndb/src/kernel/vm/Rope.cpp'
--- a/storage/ndb/src/kernel/vm/Rope.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/Rope.cpp	2011-11-02 14:06:43 +0000
@@ -58,7 +58,7 @@ ConstRope::compare(const char * str, Uin
     int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize());
     if(res != 0){
       if(DEBUG_ROPE)
-	ndbout_c("ConstRope::compare(%s, %d, %s) -> %d", str, left, 
+	ndbout_c("ConstRope::compare(%s, %d, %s) -> %d", str, left,
 		 (const char*)curr.p->data, res);
       return res;
     }
@@ -83,10 +83,10 @@ ConstRope::compare(const char * str, Uin
 }
 
 void
-Rope::copy(char* buf) const {
+LocalRope::copy(char* buf) const {
   char * ptr = buf;
   if(DEBUG_ROPE)
-    ndbout_c("Rope::copy() head = [ %d 0x%x 0x%x ]",
+    ndbout_c("LocalRope::copy() head = [ %d 0x%x 0x%x ]",
 	     head.used, head.firstItem, head.lastItem);
   Uint32 left = head.used;
   Ptr<Segment> curr;
@@ -103,13 +103,13 @@ Rope::copy(char* buf) const {
     memcpy(buf, curr.p->data, left);
   }
   if(DEBUG_ROPE)
-    ndbout_c("Rope::copy()-> %s", ptr);
+    ndbout_c("LocalRope::copy()-> %s", ptr);
 }
 
 int
-Rope::compare(const char * str, Uint32 len) const {
+LocalRope::compare(const char * str, Uint32 len) const {
   if(DEBUG_ROPE)
-    ndbout_c("Rope::compare(%s, %d)", str, (int) len);
+    ndbout_c("LocalRope::compare(%s, %d)", str, (int) len);
   Uint32 left = head.used > len ? len : head.used;
   Ptr<Segment> curr;
   curr.i = head.firstItem;
@@ -118,7 +118,7 @@ Rope::compare(const char * str, Uint32 l
     int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize());
     if(res != 0){
       if(DEBUG_ROPE)
-	ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, (int) len, 
+	ndbout_c("LocalRope::compare(%s, %d, %s) -> %d", str, (int) len,
 		 (const char*)curr.p->data, res);
       return res;
     }
@@ -133,19 +133,19 @@ Rope::compare(const char * str, Uint32 l
     int res = memcmp(str, (const char*)curr.p->data, left);
     if(res){
       if(DEBUG_ROPE)
-	ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, res);
+	ndbout_c("LocalRope::compare(%s, %d) -> %d", str, (int) len, res);
       return res;
     }
   }
   if(DEBUG_ROPE)
-    ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
+    ndbout_c("LocalRope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
   return head.used > len;
 }
 
 bool
-Rope::assign(const char * s, Uint32 len, Uint32 hash){
+LocalRope::assign(const char * s, Uint32 len, Uint32 hash){
   if(DEBUG_ROPE)
-    ndbout_c("Rope::assign(%s, %d, 0x%x)", s, (int) len, hash);
+    ndbout_c("LocalRope::assign(%s, %d, 0x%x)", s, (int) len, hash);
   m_hash = hash;
   head.used = (head.used + 3) / 4;
   release();
@@ -164,7 +164,7 @@ Rope::assign(const char * s, Uint32 len,
     }
     head.used = len;
     if(DEBUG_ROPE)
-      ndbout_c("Rope::assign(...) head = [ %d 0x%x 0x%x ]",
+      ndbout_c("LocalRope::assign(...) head = [ %d 0x%x 0x%x ]",
 	       head.used, head.firstItem, head.lastItem);
     return true;
   }
@@ -172,20 +172,20 @@ Rope::assign(const char * s, Uint32 len,
 }
 
 void
-Rope::erase(){
+LocalRope::erase(){
   head.used = (head.used + 3) / 4;
   release();
 }
 
 Uint32
-Rope::hash(const char * p, Uint32 len){
+LocalRope::hash(const char * p, Uint32 len){
   if(DEBUG_ROPE)
-    ndbout_c("Rope::hash(%s, %d)", p, len);
+    ndbout_c("LocalRope::hash(%s, %d)", p, len);
   Uint32 h = 0;
   for (; len > 0; len--)
     h = (h << 5) + h + (* p++);
   if(DEBUG_ROPE)
-    ndbout_c("Rope::hash(...) -> 0x%x", h);
+    ndbout_c("LocalRope::hash(...) -> 0x%x", h);
   return h;
 }
 

=== modified file 'storage/ndb/src/kernel/vm/Rope.hpp'
--- a/storage/ndb/src/kernel/vm/Rope.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/Rope.hpp	2011-11-02 14:06:43 +0000
@@ -56,16 +56,16 @@ private:
   const RopeHandle & src;
 };
 
-class Rope : private RopeBase {
+class LocalRope : private RopeBase {
 public:
-  Rope(RopePool& thePool, RopeHandle& handle)  
+  LocalRope(RopePool& thePool, RopeHandle& handle)
     : RopeBase(thePool), src(handle)
   {
     this->head = src.m_head;
     m_hash = src.m_hash;
   }
   
-  ~Rope(){
+  ~LocalRope(){
     src.m_head = this->head;
     src.m_hash = m_hash;
   }
@@ -94,13 +94,13 @@ private:
 
 inline
 Uint32
-Rope::size() const {
+LocalRope::size() const {
   return head.used;
 }
 
 inline
 bool
-Rope::empty() const {
+LocalRope::empty() const {
   return head.used == 0;
 }
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-23 07:47:05 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-11-03 17:22:01 +0000
@@ -2464,6 +2464,7 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
            << ", finalBatchFrags=" << m_finalBatchFrags
            <<  endl;
   }
+  assert(rootFrag.isFragBatchComplete());
 
   /* May received fragment data after a SCANREF() (timeout?) 
    * terminated the scan.  We are about to close this query, 
@@ -2471,8 +2472,6 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
    */
   if (likely(m_errorReceived == 0))
   {
-    assert(rootFrag.isFragBatchComplete());
-
     assert(m_pendingFrags > 0);                // Check against underflow.
     assert(m_pendingFrags <= m_rootFragCount); // .... and overflow
     m_pendingFrags--;
@@ -2489,6 +2488,16 @@ NdbQueryImpl::handleBatchComplete(NdbRoo
     rootFrag.setReceivedMore();
     return true;
   }
+  else if (!getQueryDef().isScanQuery())  // A failed lookup query
+  {
+    /**
+     * A lookup query will retrieve the rows as part of ::execute().
+     * -> Error must be visible through API before we return control
+     *    to the application.
+     */
+    setErrorCode(m_errorReceived);
+    return true;
+  }
 
   return false;
 } // NdbQueryImpl::handleBatchComplete
@@ -4970,12 +4979,12 @@ NdbQueryOperationImpl::execTCKEYREF(cons
   if (&getRoot() == this || 
       ref->errorCode != static_cast<Uint32>(Err_TupleNotFound))
   {
-    getQuery().setErrorCode(ref->errorCode);
     if (aSignal->getLength() == TcKeyRef::SignalLength)
     {
       // Signal may contain additional error data
       getQuery().m_error.details = (char *)UintPtr(ref->errorData);
     }
+    getQuery().setFetchTerminated(ref->errorCode,false);
   }
 
   NdbRootFragment& rootFrag = getQuery().m_rootFrags[0];

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-17 18:13:57 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-11-03 17:22:01 +0000
@@ -4757,6 +4757,125 @@ int runSplitLatency25PctFail(NDBT_Contex
   return NDBT_OK;
 }
 
+int
+runMasterFailSlowLCP(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Motivated by bug# 13323589 */
+  NdbRestarter res;
+
+  if (res.getNumDbNodes() < 4)
+  {
+    return NDBT_OK;
+  }
+
+  int master = res.getMasterNodeId();
+  int otherVictim = res.getRandomNodeOtherNodeGroup(master, rand());
+  int nextMaster = res.getNextMasterNodeId(master);
+  nextMaster = (nextMaster == otherVictim) ? res.getNextMasterNodeId(otherVictim) :
+    nextMaster;
+  assert(nextMaster != master);
+  assert(nextMaster != otherVictim);
+
+  /* Get a node which is not current or next master */
+  int slowNode= nextMaster;
+  while ((slowNode == nextMaster) ||
+         (slowNode == otherVictim) ||
+         (slowNode == master))
+  {
+    slowNode = res.getRandomNotMasterNodeId(rand());
+  }
+
+  ndbout_c("master: %d otherVictim : %d nextMaster: %d slowNode: %d",
+           master,
+           otherVictim,
+           nextMaster,
+           slowNode);
+
+  /* Steps :
+   * 1. Insert slow LCP frag error in slowNode
+   * 2. Start LCP
+   * 3. Wait for LCP to start
+   * 4. Kill at least two nodes including Master
+   * 5. Wait for killed nodes to attempt to rejoin
+   * 6. Remove slow LCP error
+   * 7. Allow system to stabilise + check no errors
+   */
+  // 5073 = Delay on handling BACKUP_FRAGMENT_CONF in LQH
+  if (res.insertErrorInNode(slowNode, 5073))
+  {
+    return NDBT_FAILED;
+  }
+
+  {
+    int req[1] = {DumpStateOrd::DihStartLcpImmediately};
+    if (res.dumpStateOneNode(master, req, 1))
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("Giving LCP time to start...");
+
+  NdbSleep_SecSleep(10);
+
+  ndbout_c("Killing other victim node (%u)...", otherVictim);
+
+  if (res.restartOneDbNode(otherVictim, false, false, true))
+  {
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Killing Master node (%u)...", master);
+
+  if (res.restartOneDbNode(master, false, false, true))
+  {
+    return NDBT_FAILED;
+  }
+
+  /*
+     ndbout_c("Waiting for old Master node to enter NoStart state...");
+     if (res.waitNodesNoStart(&master, 1, 10))
+     return NDBT_FAILED;
+
+     ndbout_c("Starting old Master...");
+     if (res.startNodes(&master, 1))
+     return NDBT_FAILED;
+
+  */
+  ndbout_c("Waiting for some progress on old Master and other victim restart");
+  NdbSleep_SecSleep(15);
+
+  ndbout_c("Now removing error insert on slow node (%u)", slowNode);
+
+  if (res.insertErrorInNode(slowNode, 0))
+  {
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Now wait a while to check stability...");
+  NdbSleep_SecSleep(30);
+
+  if (res.getNodeStatus(master) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+  {
+    ndbout_c("Old Master needs kick to restart");
+    if (res.startNodes(&master, 1))
+    {
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("Wait for cluster recovery...");
+  if (res.waitClusterStarted())
+  {
+    return NDBT_FAILED;
+  }
+
+
+  ndbout_c("Done");
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testNodeRestart);
 TESTCASE("NoLoad", 
 	 "Test that one node at a time can be stopped and then restarted "\
@@ -5288,6 +5407,11 @@ TESTCASE("Bug57522", "")
 {
   INITIALIZER(runBug57522);
 }
+TESTCASE("MasterFailSlowLCP",
+         "DIH Master failure during a slow LCP can cause a crash.")
+{
+  INITIALIZER(runMasterFailSlowLCP);
+}
 TESTCASE("ForceStopAndRestart", "Test restart and stop -with force flag")
 {
   STEP(runForceStopAndRestart);

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-11-03 17:22:01 +0000
@@ -1835,3 +1835,8 @@ max-time 1800
 cmd: testNdbApi
 args: -n TestFragmentedSend T1
 
+max-time: 300
+cmd: testNodeRestart
+args: -nMasterFailSlowLCP T1
+
+

=== modified file 'storage/ndb/test/src/HugoQueries.cpp'
--- a/storage/ndb/test/src/HugoQueries.cpp	2011-10-21 12:36:44 +0000
+++ b/storage/ndb/test/src/HugoQueries.cpp	2011-11-03 17:22:01 +0000
@@ -220,11 +220,51 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
       pTrans->close();
       return NDBT_FAILED;
     }
+#if 0
+    // Disabled, as this is incorrectly handled in SPJ API, will fix soon
+    else
+    {
+      /**
+       * If ::execute() didn't fail, there should not be an error on
+       * its NdbError object either:
+       */
+      const NdbError err = pTrans->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        ndbout_c("API INCONSISTENCY: NdbTransaction returned NdbError even if ::execute() succeeded");
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
+#endif
 
+    bool retry = false;
     for (int b = 0; b<batch; b++)
     {
       NdbQuery * query = queries[b];
-      if (query->nextResult() == NdbQuery::NextResult_gotRow)
+
+      /**
+       * As NdbQuery is always 'dirty read' (impl. limitations), 'AbortOnError'
+       * is ignored and handled as 'IgnoreError'. We will therefore not get
+       * errors returned from ::execute() or set into 'pTrans->getNdbError()':
+       * Has to check for errors on the NdbQuery object instead:
+       */
+      const NdbError& err = query->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          retry = true;
+          break;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+
+      const NdbQuery::NextResultOutcome stat = query->nextResult();
+      if (stat == NdbQuery::NextResult_gotRow)
       {
         for (unsigned o = 0; o<m_ops.size(); o++)
         {
@@ -240,7 +280,26 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
           }
         }
       }
+      else if (stat == NdbQuery::NextResult_error)
+      {
+        const NdbError& err = query->getNdbError();
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          retry = true;
+          break;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
+    if (retry)
+    {
+      NdbSleep_MilliSleep(50);
+      retryAttempt++;
+      continue;
     }
+
     pTrans->close();
     r += batch;
 
@@ -313,6 +372,44 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       pTrans->close();
       return NDBT_FAILED;
     }
+    else
+    {
+      // Disabled, as this is incorrectly handled in SPJ API, will fix soon
+#if 0
+      /**
+       * If ::execute() didn't fail, there should not be an error on
+       * its NdbError object either:
+       */
+      const NdbError err = pTrans->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        ndbout_c("API INCONSISTENCY: NdbTransaction returned NdbError even if ::execute() succeeded");
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+#endif
+
+      /**
+       * As NdbQuery is always 'dirty read' (impl. limitations), 'AbortOnError'
+       * is ignored and handled as 'IgnoreError'. We will therefore not get
+       * errors returned from ::execute() or set into 'pTrans->getNdbError()':
+       * Has to check for errors on the NdbQuery object instead:
+       */
+      NdbError err = query->getNdbError();
+      if (err.code)
+      {
+        ERR(err);
+        if (err.status == NdbError::TemporaryError){
+          pTrans->close();
+          NdbSleep_MilliSleep(50);
+          retryAttempt++;
+          continue;
+        }
+        pTrans->close();
+        return NDBT_FAILED;
+      }
+    }
 
     int r = rand() % 100;
     if (r < abort && ((r & 1) == 0))

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-10-24 13:49:09 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-11-03 17:22:01 +0000
@@ -73,7 +73,11 @@ MYSQL_ADD_EXECUTABLE(ndb_config
 TARGET_LINK_LIBRARIES(ndb_config ndbclient_static)
 
 # Build ndbinfo_sql and run it to create ndbinfo.sql
-ADD_EXECUTABLE(ndbinfo_sql ndbinfo_sql.cpp)
+ADD_EXECUTABLE(ndbinfo_sql
+  ndbinfo_sql.cpp
+  ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
+  ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
+)
 TARGET_LINK_LIBRARIES(ndbinfo_sql ndbclient_static)
 GET_TARGET_PROPERTY(NDBINFO_SQL_EXE ndbinfo_sql LOCATION)
 ADD_CUSTOM_COMMAND(OUTPUT ${PROJECT_SOURCE_DIR}/storage/ndb/tools/ndbinfo.sql

=== modified file 'storage/ndb/tools/ndbinfo_sql.cpp'
--- a/storage/ndb/tools/ndbinfo_sql.cpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/tools/ndbinfo_sql.cpp	2011-11-03 17:22:01 +0000
@@ -50,12 +50,12 @@ struct view {
     "used, total, high, entry_size, cp1.param_name AS param_name1, "
     "cp2.param_name AS param_name2, cp3.param_name AS param_name3, "
     "cp4.param_name AS param_name4 "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>pools p "
-    "LEFT JOIN <NDBINFO_DB>.blocks b ON p.block_number = b.block_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp1 ON p.config_param1 = cp1.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp2 ON p.config_param2 = cp2.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp3 ON p.config_param3 = cp3.param_number "
-    "LEFT JOIN <NDBINFO_DB>.config_params cp4 ON p.config_param4 = cp4.param_number"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>pools` p "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b ON p.block_number = b.block_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp1 ON p.config_param1 = cp1.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp2 ON p.config_param2 = cp2.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp3 ON p.config_param3 = cp3.param_number "
+    "LEFT JOIN `<NDBINFO_DB>`.config_params cp4 ON p.config_param4 = cp4.param_number"
   },
 #endif
   { "transporters",
@@ -67,7 +67,7 @@ struct view {
     "  WHEN 3 THEN \"DISCONNECTING\""
     "  ELSE NULL "
     " END AS status "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>transporters"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>transporters`"
   },
   { "logspaces",
     "SELECT node_id, "
@@ -77,7 +77,7 @@ struct view {
     "  ELSE NULL "
     " END AS log_type, "
     "log_id, log_part, total, used "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>logspaces"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>logspaces`"
   },
   { "logbuffers",
     "SELECT node_id, "
@@ -87,7 +87,7 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS log_type, "
     "log_id, log_part, total, used "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>logbuffers"
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>logbuffers`"
   },
   { "resources",
     "SELECT node_id, "
@@ -105,9 +105,9 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS resource_name, "
     "reserved, used, max "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>resources"
-   },
-   { "counters",
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>resources`"
+  },
+  { "counters",
     "SELECT node_id, b.block_name, block_instance, "
     "counter_id, "
     "CASE counter_id"
@@ -137,11 +137,11 @@ struct view {
     "  ELSE \"<unknown>\" "
     " END AS counter_name, "
     "val "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>counters c "
-    "LEFT JOIN <NDBINFO_DB>.blocks b "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>counters` c "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b "
     "ON c.block_number = b.block_number"
-   },
-   { "nodes",
+  },
+  { "nodes",
     "SELECT node_id, "
     "uptime, "
     "CASE status"
@@ -158,8 +158,8 @@ struct view {
     " END AS status, "
     "start_phase, "
     "config_generation "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>nodes"
-   },
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>nodes`"
+  },
   { "memoryusage",
     "SELECT node_id,"
     "  pool_name AS memory_type,"
@@ -167,17 +167,91 @@ struct view {
     "  SUM(used) AS used_pages,"
     "  SUM(total*entry_size) AS total,"
     "  SUM(total) AS total_pages "
-    "FROM <NDBINFO_DB>.<TABLE_PREFIX>pools "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>pools` "
     "WHERE block_number IN (248, 254) AND "
     "  (pool_name = \"Index memory\" OR pool_name = \"Data memory\") "
     "GROUP BY node_id, memory_type"
   },
-   { "diskpagebuffer",
+  { "diskpagebuffer",
      "SELECT node_id, block_instance, "
      "pages_written, pages_written_lcp, pages_read, log_waits, "
      "page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io "
-     "FROM <NDBINFO_DB>.<TABLE_PREFIX>diskpagebuffer"
-   }
+     "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>diskpagebuffer`"
+  },
+  { "diskpagebuffer",
+     "SELECT node_id, block_instance, "
+     "pages_written, pages_written_lcp, pages_read, log_waits, "
+     "page_requests_direct_return, page_requests_wait_queue, page_requests_wait_io "
+     "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>diskpagebuffer`"
+  },
+  { "threadblocks",
+    "SELECT t.node_id, t.thr_no, b.block_name, t.block_instance "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>threadblocks` t "
+    "LEFT JOIN `<NDBINFO_DB>`.blocks b "
+    "ON t.block_number = b.block_number"
+  },
+  { "threadstat",
+    "SELECT * from `<NDBINFO_DB>`.`<TABLE_PREFIX>threadstat`"
+  },
+  { "cluster_transactions",
+    "SELECT"
+    " t.node_id,"
+    " t.block_instance,"
+    " t.transid0 + (t.transid1 << 32) as transid,"
+    " s.state_friendly_name as state, "
+    " t.c_ops as count_operations, "
+    " t.outstanding as outstanding_operations, "
+    " t.timer as inactive_seconds, "
+    " (t.apiref & 65535) as client_node_id, "
+    " (t.apiref >> 16) as client_block_ref "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>transactions` t"
+    " LEFT JOIN `<NDBINFO_DB>`.`<TABLE_PREFIX>dbtc_apiconnect_state` s"
+    "        ON s.state_int_value = t.state"
+  },
+  { "server_transactions",
+    "SELECT map.mysql_connection_id, t.*"
+    "FROM information_schema.ndb_transid_mysql_connection_map map "
+    "JOIN `<NDBINFO_DB>`.cluster_transactions t "
+    "  ON (map.ndb_transid >> 32) = (t.transid >> 32)"
+  },
+  { "cluster_operations",
+    "SELECT"
+    " o.node_id,"
+    " o.block_instance,"
+    " o.transid0 + (o.transid1 << 32) as transid,"
+    " case o.op "
+    " when 1 then \"READ\""
+    " when 2 then \"READ-SH\""
+    " when 3 then \"READ-EX\""
+    " when 4 then \"INSERT\""
+    " when 5 then \"UPDATE\""
+    " when 6 then \"DELETE\""
+    " when 7 then \"WRITE\""
+    " when 8 then \"UNLOCK\""
+    " when 9 then \"REFRESH\""
+    " when 257 then \"SCAN\""
+    " when 258 then \"SCAN-SH\""
+    " when 259 then \"SCAN-EX\""
+    " ELSE \"<unknown>\""
+    " END as operation_type, "
+    " s.state_friendly_name as state, "
+    " o.tableid, "
+    " o.fragmentid, "
+    " (o.apiref & 65535) as client_node_id, "
+    " (o.apiref >> 16) as client_block_ref, "
+    " (o.tcref & 65535) as tc_node_id, "
+    " ((o.tcref >> 16) & 511) as tc_block_no, "
+    " ((o.tcref >> (16 + 9)) & 127) as tc_block_instance "
+    "FROM `<NDBINFO_DB>`.`<TABLE_PREFIX>operations` o"
+    " LEFT JOIN `<NDBINFO_DB>`.`<TABLE_PREFIX>dblqh_tcconnect_state` s"
+    "        ON s.state_int_value = o.state"
+  },
+  { "server_operations",
+    "SELECT map.mysql_connection_id, o.* "
+    "FROM `<NDBINFO_DB>`.cluster_operations o "
+    "JOIN information_schema.ndb_transid_mysql_connection_map map"
+    "  ON (map.ndb_transid >> 32) = (o.transid >> 32)"
+  }
 };
 
 size_t num_views = sizeof(views)/sizeof(views[0]);
@@ -214,6 +288,38 @@ static void fill_blocks(BaseString& sql)
   }
 }
 
+#include "kernel/statedesc.hpp"
+
+static void fill_dbtc_apiconnect_state(BaseString& sql)
+{
+  const char* separator = "";
+  for (unsigned i = 0; g_dbtc_apiconnect_state_desc[i].name != 0; i++)
+  {
+    sql.appfmt("%s(%u, \"%s\", \"%s\", \"%s\")",
+               separator,
+               g_dbtc_apiconnect_state_desc[i].value,
+               g_dbtc_apiconnect_state_desc[i].name,
+               g_dbtc_apiconnect_state_desc[i].friendly_name,
+               g_dbtc_apiconnect_state_desc[i].description);
+    separator = ", ";
+  }
+}
+
+static void fill_dblqh_tcconnect_state(BaseString& sql)
+{
+  const char* separator = "";
+  for (unsigned i = 0; g_dblqh_tcconnect_state_desc[i].name != 0; i++)
+  {
+    sql.appfmt("%s(%u, \"%s\", \"%s\", \"%s\")",
+               separator,
+               g_dblqh_tcconnect_state_desc[i].value,
+               g_dblqh_tcconnect_state_desc[i].name,
+               g_dblqh_tcconnect_state_desc[i].friendly_name,
+               g_dblqh_tcconnect_state_desc[i].description);
+    separator = ", ";
+  }
+}
+
 struct lookup {
   const char* name;
   const char* columns;
@@ -224,12 +330,28 @@ struct lookup {
     "block_number INT UNSIGNED PRIMARY KEY, "
     "block_name VARCHAR(512)",
     &fill_blocks
-   },
+  },
   { "config_params",
     "param_number INT UNSIGNED PRIMARY KEY, "
     "param_name VARCHAR(512)",
     &fill_config_params
-   }
+  },
+  {
+    "<TABLE_PREFIX>dbtc_apiconnect_state",
+    "state_int_value  INT UNSIGNED PRIMARY KEY, "
+    "state_name VARCHAR(256), "
+    "state_friendly_name VARCHAR(256), "
+    "state_description VARCHAR(256)",
+    &fill_dbtc_apiconnect_state
+  },
+  {
+    "<TABLE_PREFIX>dblqh_tcconnect_state",
+    "state_int_value  INT UNSIGNED PRIMARY KEY, "
+    "state_name VARCHAR(256), "
+    "state_friendly_name VARCHAR(256), "
+    "state_description VARCHAR(256)",
+    &fill_dblqh_tcconnect_state
+  }
 };
 
 size_t num_lookups = sizeof(lookups)/sizeof(lookups[0]);
@@ -339,7 +461,7 @@ int main(int argc, char** argv){
   printf("# Drop any old views in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_views; i++)
   {
-    sql.assfmt("DROP VIEW IF EXISTS %s.%s",
+    sql.assfmt("DROP VIEW IF EXISTS `%s`.`%s`",
                opt_ndbinfo_db, views[i].name);
     print_conditional_sql(sql);
   }
@@ -347,8 +469,10 @@ int main(int argc, char** argv){
   printf("# Drop any old lookup tables in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_lookups; i++)
   {
-    sql.assfmt("DROP TABLE IF EXISTS %s.%s",
-               opt_ndbinfo_db, lookups[i].name);
+    BaseString table_name = replace_tags(lookups[i].name);
+
+    sql.assfmt("DROP TABLE IF EXISTS `%s`.`%s`",
+               opt_ndbinfo_db, table_name.c_str());
     print_conditional_sql(sql);
   }
 
@@ -409,16 +533,17 @@ int main(int argc, char** argv){
   for (size_t i = 0; i < num_lookups; i++)
   {
     lookup l = lookups[i];
-    printf("# %s.%s\n", opt_ndbinfo_db, l.name);
+    BaseString table_name = replace_tags(l.name);
+    printf("# %s.%s\n", opt_ndbinfo_db, table_name.c_str());
 
     /* Create lookup table */
     sql.assfmt("CREATE TABLE `%s`.`%s` (%s)",
-               opt_ndbinfo_db, l.name, l.columns);
+               opt_ndbinfo_db, table_name.c_str(), l.columns);
     print_conditional_sql(sql);
 
     /* Insert data */
     sql.assfmt("INSERT INTO `%s`.`%s` VALUES ",
-               opt_ndbinfo_db, l.name);
+               opt_ndbinfo_db, table_name.c_str());
     l.fill(sql);
     print_conditional_sql(sql);
   }

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster branch (jonas.oreland:3623 to 3624) jonas oreland7 Nov