List:Commits« Previous MessageNext Message »
From:Pekka Nousiainen Date:November 8 2011 7:53pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (pekka.nousiainen:4629 to 4639)
View as plain text  
 4639 jonas oreland	2011-11-08
      ndb - bump version to 7.0.29

    modified:
      configure.in
      storage/ndb/ndb_configure.m4
 4638 jonas oreland	2011-11-08 {clone-mysql-5.1.56-ndb-7.0.28-src-build} [merge]
      ndb - merge 63 to 70

 4637 Jonas Oreland	2011-11-07 [merge]
      ndb - merge 63 to 70

    added:
      mysql-test/suite/ndb/r/ndb_multi_update_delete.result
      mysql-test/suite/ndb/t/ndb_multi_update_delete.test
    modified:
      sql/sql_select.cc
 4636 Martin Skold	2011-11-07
      Bug#11765142  58073: CONCAT AND OR GIVE INCORRECT QUERY RESULTS: Fixed typo

    modified:
      sql/ha_ndbcluster_cond.cc
 4635 Martin Skold	2011-11-04
      Bug#11765142  58073: CONCAT AND OR GIVE INCORRECT QUERY RESULTS: When using result from const function returning string for LIKE/NOTLIKE we need to discard length byte before passing to scan filter

    modified:
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/t/ndb_condition_pushdown.test
      sql/ha_ndbcluster_cond.cc
      sql/ha_ndbcluster_cond.h
 4634 Mauritz Sundell	2011-11-03
      ndb - some typedef in preparation for pool change
      
      No functional changes.
      
      Only introducing some typedef to simplify switch from ArrayPool
      to other pool in near future.

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
 4633 Mauritz Sundell	2011-11-02
      ndb - removed trailing white spaces

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
 4632 Mauritz Sundell	2011-11-02
      ndb - removed some unused parameters and code in dbdict

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
 4631 Mauritz Sundell	2011-11-02
      ndb - rename class Rope to LocalRope
      
      No functional changes.  
      
      Only renaming to emphasize that the class shares some of the
      behaviour with other "Local" classes in ndb.  For example,
      intended for defining object as local variables where constructor
      copies some non-local object and the destructor copies back a
      possible changed state to the non-local object on deletion.
      
      So beware, dont change or destroy the non-local object while the
      local copy is alive.

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/vm/Rope.cpp
      storage/ndb/src/kernel/vm/Rope.hpp
 4630 Ole John Aske	2011-10-31
      SPJ: Small refactoring to improve error handling:
      
      Moved the error check for incorrect treeNode and parameter length
      to a place earlier in the code ... To ensure they are actually checked
      *before* they are used.

    modified:
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
 4629 Frazer Clement	2011-10-28 [merge]
      Merge 6.3->7.0

    modified:
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
=== modified file 'configure.in'
--- a/configure.in	2011-09-22 17:10:09 +0000
+++ b/configure.in	2011-11-08 09:05:02 +0000
@@ -12,7 +12,7 @@ dnl
 dnl When changing the major version number please also check the switch
 dnl statement in mysqlbinlog::check_master_version().  You may also need
 dnl to update version.c in ndb.
-AC_INIT([MySQL Server], [5.1.56-ndb-7.0.28], [], [mysql])
+AC_INIT([MySQL Server], [5.1.56-ndb-7.0.29], [], [mysql])
 
 AC_CONFIG_SRCDIR([sql/mysqld.cc])
 AC_CANONICAL_SYSTEM

=== modified file 'mysql-test/suite/ndb/r/ndb_condition_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2011-06-23 12:19:32 +0000
+++ b/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2011-11-04 08:33:56 +0000
@@ -2292,5 +2292,59 @@ pk1	pk1	pk2	pk2
 3	3	0	0
 4	3	0	0
 drop table t;
+create table mytable(i int, s varchar(255) ) engine = ndb;
+insert into mytable values(0,"Text Hej"),(1, "xText aaja");
+set engine_condition_pushdown=1;
+select * from mytable where s like concat("%Text","%") or s like concat("%Text","%") order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text","%") or s like "%Text%" order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text","%") or s like concat("%Text1","%") order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text","%") or s like "%Text1%" order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s not like concat("%Text","%") or s not like concat("%Text","%") order by i;
+i	s
+select * from mytable where s not like concat("%Text1","%") or s not like concat("%Text","%") order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text","%") or s not like "%Text%" order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text1","%") or s not like "%Text%" order by i;
+i	s
+select * from mytable where s like concat("%Text","%") and s like concat("%Text","%") order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text","%") and s like "%Text%" order by i;
+i	s
+0	Text Hej
+1	xText aaja
+select * from mytable where s like concat("%Text","%") and s like concat("%Text1","%") order by i;
+i	s
+select * from mytable where s like concat("%Text","%") and s like "%Text1%" order by i;
+i	s
+select * from mytable where s not like concat("%Text","%") and s not like concat("%Text","%") order by i;
+i	s
+select * from mytable where s not like concat("%Text","%") and s not like concat("%Text1","%") order by i;
+i	s
+select * from mytable where s like concat("%Text","%") and s not like "%Text%" order by i;
+i	s
+select * from mytable where s like concat("%Text","%") and s not like "%Text1%" order by i;
+i	s
+0	Text Hej
+1	xText aaja
+drop table mytable;
 set engine_condition_pushdown = @old_ecpd;
 DROP TABLE t1,t2,t3,t4,t5;

=== added file 'mysql-test/suite/ndb/r/ndb_multi_update_delete.result'
--- a/mysql-test/suite/ndb/r/ndb_multi_update_delete.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_multi_update_delete.result	2011-11-07 14:23:36 +0000
@@ -0,0 +1,68 @@
+
+# Bug#12728221 - MULTI-TABLE DELETE ON UNIQUE INDEX REMOVES WRONG ROWS IN CLUSTER
+
+CREATE TABLE t1 (
+a int(11) NOT NULL,
+b int(11) DEFAULT NULL,
+c varchar(64) DEFAULT NULL,
+PRIMARY KEY (a),
+UNIQUE KEY c (c),
+KEY b (b)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+CREATE TABLE t2 (
+a int(11) NOT NULL,
+b text,
+PRIMARY KEY (a)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES (1,3,'test3'),(2,2,'test2'),(3,1,'test1');
+INSERT INTO t2 VALUES (1,''),(2,''),(3,'');
+delete t1,t2 from t1,t2 where t1.b=t2.a and t1.c='test1';
+delete t1,t2 from t1,t2 where t1.b=t2.a and t1.c='test2';
+delete t1,t2 from t1,t2 where t1.b=t2.a and t1.c='test3';
+select * from t1,t2;
+a	b	c	a	b
+drop table t1,t2;
+
+# Bug#12718336 - 61705: TRIGGER WORKS IMPROPERLY IN MYSQL CLUSTER.
+
+CREATE TABLE t1 (a int(11) NOT NULL, b int(11) DEFAULT NULL, c
+varchar(64) DEFAULT NULL, d char(10), j int, PRIMARY KEY (a),
+UNIQUE KEY c (c),  KEY b (b)) ENGINE=ndbcluster DEFAULT
+CHARSET=latin1;
+CREATE TABLE t2 (  e int(11) NOT NULL,  f char(10),  PRIMARY KEY
+(e)) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+CREATE TABLE t3 (t3_id int(11) NOT NULL AUTO_INCREMENT,g int(11)
+DEFAULT NULL,  h char(10), PRIMARY KEY (t3_id)) ENGINE=ndbcluster
+DEFAULT CHARSET=latin1;
+CREATE TRIGGER trg1 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO
+t3(g,h) values (old.b,  old.d);
+INSERT INTO t1 VALUES (11,1,'test1', 'hei1',111),(22,2,'test2',
+'hei2',222),(33,3,'test3', 'hei3',333);
+INSERT INTO t2 VALUES (1,'xx'),(2,'yy'),(3,'zz');
+select * from t1;
+a	b	c	d	j
+11	1	test1	hei1	111
+22	2	test2	hei2	222
+33	3	test3	hei3	333
+select * from t2;
+e	f
+1	xx
+2	yy
+3	zz
+select * from t3;
+t3_id	g	h
+update t1,t2 set t1.d=t2.f where t1.b=t2.e and t1.c='test1';
+select * from t1;
+a	b	c	d	j
+11	1	test1	xx	111
+22	2	test2	hei2	222
+33	3	test3	hei3	333
+select * from t2;
+e	f
+1	xx
+2	yy
+3	zz
+select * from t3;
+t3_id	g	h
+1	1	hei1
+drop table t1,t2,t3;

=== modified file 'mysql-test/suite/ndb/t/ndb_condition_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2011-01-17 13:29:52 +0000
+++ b/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2011-11-04 08:33:56 +0000
@@ -2368,6 +2368,32 @@ select table1.pk1, table2.pk1, table1.pk
 
 drop table t;
 
+# Bug#11765142 58073: CONCAT AND OR GIVE INCORRECT QUERY RESULTS
+create table mytable(i int, s varchar(255) ) engine = ndb;
+insert into mytable values(0,"Text Hej"),(1, "xText aaja");
+set engine_condition_pushdown=1;
+
+ select * from mytable where s like concat("%Text","%") or s like concat("%Text","%") order by i;
+ select * from mytable where s like concat("%Text","%") or s like "%Text%" order by i;
+ select * from mytable where s like concat("%Text","%") or s like concat("%Text1","%") order by i;
+ select * from mytable where s like concat("%Text","%") or s like "%Text1%" order by i;
+ select * from mytable where s not like concat("%Text","%") or s not like concat("%Text","%") order by i;
+ select * from mytable where s not like concat("%Text1","%") or s not like concat("%Text","%") order by i;
+ select * from mytable where s like concat("%Text","%") or s not like "%Text%" order by i;
+ select * from mytable where s like concat("%Text1","%") or s not like "%Text%" order by i;
+
+
+ select * from mytable where s like concat("%Text","%") and s like concat("%Text","%") order by i;
+ select * from mytable where s like concat("%Text","%") and s like "%Text%" order by i;
+ select * from mytable where s like concat("%Text","%") and s like concat("%Text1","%") order by i;
+ select * from mytable where s like concat("%Text","%") and s like "%Text1%" order by i;
+ select * from mytable where s not like concat("%Text","%") and s not like concat("%Text","%") order by i;
+ select * from mytable where s not like concat("%Text","%") and s not like concat("%Text1","%") order by i;
+ select * from mytable where s like concat("%Text","%") and s not like "%Text%" order by i;
+ select * from mytable where s like concat("%Text","%") and s not like "%Text1%" order by i;
+
+drop table mytable;
+
 set engine_condition_pushdown = @old_ecpd;
 DROP TABLE t1,t2,t3,t4,t5;
 

=== added file 'mysql-test/suite/ndb/t/ndb_multi_update_delete.test'
--- a/mysql-test/suite/ndb/t/ndb_multi_update_delete.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_multi_update_delete.test	2011-11-07 14:23:36 +0000
@@ -0,0 +1,64 @@
+-- source include/have_ndb.inc
+
+--echo
+--echo # Bug#12728221 - MULTI-TABLE DELETE ON UNIQUE INDEX REMOVES WRONG ROWS IN CLUSTER
+--echo
+
+CREATE TABLE t1 (
+  a int(11) NOT NULL,
+  b int(11) DEFAULT NULL,
+  c varchar(64) DEFAULT NULL,
+  PRIMARY KEY (a),
+  UNIQUE KEY c (c),
+  KEY b (b)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+
+CREATE TABLE t2 (
+  a int(11) NOT NULL,
+  b text,
+  PRIMARY KEY (a)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+
+INSERT INTO t1 VALUES (1,3,'test3'),(2,2,'test2'),(3,1,'test1');
+INSERT INTO t2 VALUES (1,''),(2,''),(3,'');
+
+delete t1,t2 from t1,t2 where t1.b=t2.a and t1.c='test1';
+delete t1,t2 from t1,t2 where t1.b=t2.a and t1.c='test2';
+delete t1,t2 from t1,t2 where t1.b=t2.a and t1.c='test3'; 
+select * from t1,t2;
+drop table t1,t2;
+
+
+--echo
+--echo # Bug#12718336 - 61705: TRIGGER WORKS IMPROPERLY IN MYSQL CLUSTER.
+--echo
+
+CREATE TABLE t1 (a int(11) NOT NULL, b int(11) DEFAULT NULL, c
+varchar(64) DEFAULT NULL, d char(10), j int, PRIMARY KEY (a),
+UNIQUE KEY c (c),  KEY b (b)) ENGINE=ndbcluster DEFAULT
+CHARSET=latin1;
+CREATE TABLE t2 (  e int(11) NOT NULL,  f char(10),  PRIMARY KEY
+(e)) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+CREATE TABLE t3 (t3_id int(11) NOT NULL AUTO_INCREMENT,g int(11)
+DEFAULT NULL,  h char(10), PRIMARY KEY (t3_id)) ENGINE=ndbcluster
+DEFAULT CHARSET=latin1;
+CREATE TRIGGER trg1 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO
+t3(g,h) values (old.b,  old.d);
+INSERT INTO t1 VALUES (11,1,'test1', 'hei1',111),(22,2,'test2',
+'hei2',222),(33,3,'test3', 'hei3',333);
+INSERT INTO t2 VALUES (1,'xx'),(2,'yy'),(3,'zz');
+--sorted_result
+select * from t1;
+--sorted_result
+select * from t2;
+--sorted_result
+select * from t3;
+update t1,t2 set t1.d=t2.f where t1.b=t2.e and t1.c='test1';
+--sorted_result
+select * from t1;
+--sorted_result
+select * from t2;
+--sorted_result
+select * from t3;
+
+drop table t1,t2,t3;

=== modified file 'sql/ha_ndbcluster_cond.cc'
--- a/sql/ha_ndbcluster_cond.cc	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbcluster_cond.cc	2011-11-07 11:09:09 +0000
@@ -1209,18 +1209,27 @@ ha_ndbcluster_cond::build_scan_filter_pr
     case NDB_LIKE_FUNC:
     {
       if (!value || !field) break;
-      if ((value->qualification.value_type != Item::STRING_ITEM) &&
-          (value->qualification.value_type != Item::VARBIN_ITEM))
-          break;
+      bool is_string= (value->qualification.value_type == Item::STRING_ITEM);
+      bool is_varbin= (value->qualification.value_type == Item::VARBIN_ITEM);
+      if (!is_string && !is_varbin)
+        break;
       // Save value in right format for the field type
-      value->save_in_field(field);
+      uint32 len= value->save_in_field(field);
+      char buff[MAX_FIELD_WIDTH];
+      String str(buff,sizeof(buff),field->get_field_charset());
+      field->get_field_val_str(&str);
+      const char *val=
+        (value->is_const_func() && is_string)?
+        str.ptr()
+        : value->get_val();
       DBUG_PRINT("info", ("Generating LIKE filter: like(%d,%s,%d)", 
-                          field->get_field_no(), value->get_val(), 
-                          value->pack_length()));
+                          field->get_field_no(),
+                          val,
+                          len));
       if (filter->cmp(NdbScanFilter::COND_LIKE, 
                       field->get_field_no(),
-                      value->get_val(),
-                      value->pack_length()) == -1)
+                      val,
+                      len) == -1)
         DBUG_RETURN(1);
       cond= cond->next->next->next;
       DBUG_RETURN(0);
@@ -1228,18 +1237,27 @@ ha_ndbcluster_cond::build_scan_filter_pr
     case NDB_NOTLIKE_FUNC:
     {
       if (!value || !field) break;
-      if ((value->qualification.value_type != Item::STRING_ITEM) &&
-          (value->qualification.value_type != Item::VARBIN_ITEM))
-          break;
+      bool is_string= (value->qualification.value_type == Item::STRING_ITEM);
+      bool is_varbin= (value->qualification.value_type == Item::VARBIN_ITEM);
+      if (!is_string && !is_varbin)
+        break;
       // Save value in right format for the field type
-      value->save_in_field(field);
+      uint32 len= value->save_in_field(field);
+      char buff[MAX_FIELD_WIDTH];
+      String str(buff,sizeof(buff),field->get_field_charset());
+      field->get_field_val_str(&str);
+      const char *val=
+        (value->is_const_func() && is_string)?
+        str.ptr()
+        : value->get_val();
       DBUG_PRINT("info", ("Generating NOTLIKE filter: notlike(%d,%s,%d)", 
-                          field->get_field_no(), value->get_val(), 
-                          value->pack_length()));
+                          field->get_field_no(),
+                          (value->pack_length() > len)?value->get_val():val,
+                          (value->pack_length() > len)?value->pack_length():len));
       if (filter->cmp(NdbScanFilter::COND_NOT_LIKE, 
                       field->get_field_no(),
-                      value->get_val(),
-                      value->pack_length()) == -1)
+                      (value->pack_length() > len)?value->get_val():val,
+                      (value->pack_length() > len)?value->pack_length():len) == -1)
         DBUG_RETURN(1);
       cond= cond->next->next->next;
       DBUG_RETURN(0);

=== modified file 'sql/ha_ndbcluster_cond.h'
--- a/sql/ha_ndbcluster_cond.h	2011-06-30 15:59:25 +0000
+++ b/sql/ha_ndbcluster_cond.h	2011-11-04 08:33:56 +0000
@@ -201,6 +201,9 @@ public:
       break;
     case(NDB_FIELD):
       return (char*) value.field_value->field->ptr; 
+    case(NDB_FUNCTION):
+      if(qualification.value_type == Item::STRING_ITEM)
+        return value.item->str_value.ptr();
     default:
       break;
     }
@@ -208,8 +211,40 @@ public:
     return NULL;
   };
 
-  void save_in_field(Ndb_item *field_item)
+  CHARSET_INFO *get_field_charset()
   {
+    Field *field= get_field();
+    if (field)
+      return field->charset();
+
+    return NULL;
+  }
+
+  String *get_field_val_str(String *str)
+  {
+    Field *field= get_field();
+    if (field)
+      return field->val_str(str);
+
+    return NULL;
+  }
+
+  bool is_const_func()
+  {
+    const Item *item= value.item;
+
+    if (item->type() == Item::FUNC_ITEM)
+    {
+      Item_func *func_item= (Item_func *) item;
+      if (func_item->const_item())
+        return true;
+    }
+    return false;
+  };
+
+  uint32 save_in_field(Ndb_item *field_item)
+  {
+    uint32 length= 0;
     DBUG_ENTER("save_in_field");
     Field *field = field_item->value.field_value->field;
     const Item *item= value.item;
@@ -221,13 +256,16 @@ public:
       {
         DBUG_PRINT("info", ("Comparing field with longer value"));
         DBUG_PRINT("info", ("Field can store %u", field->field_length));
+        length= field->field_length;
       }
+      else
+        length= item->max_length;
       my_bitmap_map *old_map=
         dbug_tmp_use_all_columns(field->table, field->table->write_set);
       ((Item *)item)->save_in_field(field, FALSE);
       dbug_tmp_restore_column_map(field->table->write_set, old_map);
     }
-    DBUG_VOID_RETURN;
+    DBUG_RETURN(length);
   };
 
   static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)

=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc	2011-08-16 10:20:19 +0000
+++ b/sql/sql_select.cc	2011-11-07 14:46:47 +0000
@@ -12013,6 +12013,36 @@ join_read_const_table(JOIN_TAB *tab, POS
   table->const_table=1;
   table->null_row=0;
   table->status=STATUS_NO_RECORD;
+
+#ifndef MCP_BUG_12728221
+  MY_BITMAP * const save_read_set= table->read_set;
+  if (table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE)
+  {
+    const enum_sql_command sql_command= tab->join->thd->lex->sql_command;
+    if (sql_command == SQLCOM_UPDATE_MULTI ||
+        sql_command == SQLCOM_DELETE_MULTI)
+    {
+      /*
+        In a multi-UPDATE, if we represent "depends on" with "->", we have:
+        "what columns to read (read_set)" ->
+        "whether table will be updated on-the-fly or with tmp table" ->
+        "whether to-be-updated columns are used by access path"
+        "access path to table (range, ref, scan...)" ->
+        "query execution plan" ->
+        "what tables are const" ->
+        "reading const tables" ->
+        "what columns to read (read_set)".
+        To break this loop, we always read all columns of a constant table if
+        it is going to be updated.
+        Another case is in multi-UPDATE and multi-DELETE, when the table has a
+        trigger: bits of columns needed by the trigger are turned on in
+        result->initialize_tables(), which has not yet been called when we do
+        the reading now, so we must read all columns.
+      */
+      table->column_bitmaps_set(&table->s->all_set, table->write_set);
+    }
+  }
+#endif
   
   if (tab->type == JT_SYSTEM)
   {
@@ -12027,7 +12057,12 @@ join_read_const_table(JOIN_TAB *tab, POS
 #else
       if (!table->maybe_null || error > 0)
 #endif
+      {
+#ifndef MCP_BUG_12728221
+        table->column_bitmaps_set(save_read_set, table->write_set);
+#endif
 	DBUG_RETURN(error);
+      }
     }
   }
   else
@@ -12052,7 +12087,12 @@ join_read_const_table(JOIN_TAB *tab, POS
 #else
       if (!table->maybe_null || error > 0)
 #endif
+      {
+#ifndef MCP_BUG_12728221
+        table->column_bitmaps_set(save_read_set, table->write_set);
+#endif
 	DBUG_RETURN(error);
+      }
     }
   }
   if (*tab->on_expr_ref && !table->null_row)
@@ -12083,6 +12123,9 @@ join_read_const_table(JOIN_TAB *tab, POS
            embedding->nested_join->join_list.head() == embedded);
   }
 
+#ifndef MCP_BUG_12728221
+  table->column_bitmaps_set(save_read_set, table->write_set);
+#endif
   DBUG_RETURN(0);
 }
 

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-09-22 17:08:17 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-11-08 09:05:02 +0000
@@ -2,7 +2,7 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=28
+NDB_VERSION_BUILD=29
 NDB_VERSION_STATUS=""
 
 dnl for build ndb docs

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-21 08:59:23 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-11-03 08:40:19 +0000
@@ -65,7 +65,7 @@
 #include <signaldata/UtilPrepare.hpp>
 #include <signaldata/UtilExecute.hpp>
 #include <signaldata/UtilRelease.hpp>
-#include <signaldata/SumaImpl.hpp> 
+#include <signaldata/SumaImpl.hpp>
 
 #include <signaldata/LqhFrag.hpp>
 #include <signaldata/DictStart.hpp>
@@ -216,7 +216,7 @@ Dbdict::execDUMP_STATE_ORD(Signal* signa
     sendSignal(DBDICT_REF, GSN_DROP_TABLE_REQ, signal,
 	       DropTableReq::SignalLength, JBB);
   }
-#endif  
+#endif
 #define MEMINFO(x, y) infoEvent(x ": %d %d", y.getSize(), y.getNoOfFree())
   if(signal->theData[0] == 1226){
     MEMINFO("c_obj_pool", c_obj_pool);
@@ -226,20 +226,20 @@ Dbdict::execDUMP_STATE_ORD(Signal* signa
 
   if (signal->theData[0] == 1227)
   {
-    DLHashTable<DictObject>::Iterator iter;
+    DictObject_hash::Iterator iter;
     bool ok = c_obj_hash.first(iter);
     for(; ok; ok = c_obj_hash.next(iter))
     {
-      Rope name(c_rope_pool, iter.curr.p->m_name);
+      LocalRope name(c_rope_pool, iter.curr.p->m_name);
       char buf[1024];
       name.copy(buf);
-      ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count); 
+      ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count);
       if (iter.curr.p->m_trans_key != 0)
         ndbout_c("- m_trans_key: %u m_op_ref_count: %u",
                  iter.curr.p->m_trans_key, iter.curr.p->m_op_ref_count);
     }
-  }    
-  
+  }
+
   if (signal->theData[0] == 8004)
   {
     infoEvent("DICT: c_counterMgr size: %u free: %u",
@@ -380,7 +380,7 @@ void Dbdict::execDBINFO_SCANREQ(Signal *
 // processes.
 /* ---------------------------------------------------------------- */
 /* ---------------------------------------------------------------- */
-void Dbdict::execCONTINUEB(Signal* signal) 
+void Dbdict::execCONTINUEB(Signal* signal)
 {
   jamEntry();
   switch (signal->theData[0]) {
@@ -500,7 +500,7 @@ void Dbdict::packTableIntoPages(Signal*
   c_pageRecordArray.getPtr(pagePtr, pageId);
 
   memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
-  LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE], 
+  LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE],
 		 ZMAX_PAGES_OF_TABLE_DEFINITION * ZSIZE_OF_PAGES_IN_WORDS);
   w.first();
   switch((DictTabInfo::TableType)type) {
@@ -533,7 +533,7 @@ void Dbdict::packTableIntoPages(Signal*
       ref->senderData = c_retrieveRecord.m_senderData;
       ref->errorCode = err;
       Uint32 dstRef = c_retrieveRecord.blockRef;
-      sendSignal(dstRef, GSN_GET_TABINFOREF, signal, 
+      sendSignal(dstRef, GSN_GET_TABINFOREF, signal,
                  GetTabInfoRef::SignalLength, JBB);
       initRetrieveRecord(0,0,0);
       return;
@@ -563,7 +563,7 @@ void Dbdict::packTableIntoPages(Signal*
     break;
   }
   case DictTabInfo::HashMap:{
-    Ptr<HashMapRecord> hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tableId));
     packHashMapIntoPages(w, hm_ptr);
     break;
@@ -577,12 +577,12 @@ void Dbdict::packTableIntoPages(Signal*
   case DictTabInfo::ReorgTrigger:
     ndbrequire(false);
   }
-  
+
   Uint32 wordsOfTable = w.getWordsUsed();
   Uint32 pagesUsed = WORDS2PAGES(wordsOfTable);
-  pagePtr.p->word[ZPOS_CHECKSUM] = 
+  pagePtr.p->word[ZPOS_CHECKSUM] =
     computeChecksum(&pagePtr.p->word[0], pagesUsed * ZSIZE_OF_PAGES_IN_WORDS);
-  
+
   switch (c_packTable.m_state) {
   case PackTable::PTS_IDLE:
     ndbrequire(false);
@@ -603,7 +603,7 @@ void
 Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
 			       TableRecordPtr tablePtr,
 			       Signal* signal){
-  
+
   union {
     char tableName[MAX_TAB_NAME_SIZE];
     char frmData[MAX_FRM_DATA_SIZE];
@@ -622,18 +622,18 @@ Dbdict::packTableIntoPages(SimplePropert
   w.add(DictTabInfo::NoOfNullable, tablePtr.p->noOfNullAttr);
   w.add(DictTabInfo::NoOfVariable, (Uint32)0);
   w.add(DictTabInfo::KeyLength, tablePtr.p->tupKeyLength);
-  
-  w.add(DictTabInfo::TableLoggedFlag, 
+
+  w.add(DictTabInfo::TableLoggedFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_Logged));
-  w.add(DictTabInfo::RowGCIFlag, 
+  w.add(DictTabInfo::RowGCIFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_RowGCI));
-  w.add(DictTabInfo::RowChecksumFlag, 
+  w.add(DictTabInfo::RowChecksumFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_RowChecksum));
-  w.add(DictTabInfo::TableTemporaryFlag, 
+  w.add(DictTabInfo::TableTemporaryFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_Temporary));
   w.add(DictTabInfo::ForceVarPartFlag,
 	!!(tablePtr.p->m_bits & TableRecord::TR_ForceVarPart));
-  
+
   w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor);
   w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor);
   w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
@@ -655,7 +655,7 @@ Dbdict::packTableIntoPages(SimplePropert
 
   if (tablePtr.p->hashMapObjectId != RNIL)
   {
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tablePtr.p->hashMapObjectId));
     w.add(DictTabInfo::HashMapVersion, hm_ptr.p->m_object_version);
   }
@@ -674,7 +674,7 @@ Dbdict::packTableIntoPages(SimplePropert
       err = CreateFragmentationRef::InvalidPrimaryTable;
     }
     if (unlikely(err != 0))
-    { 
+    {
       jam();
       signal->theData[0] = err;
       return;
@@ -693,7 +693,7 @@ Dbdict::packTableIntoPages(SimplePropert
     /* This part is run at CREATE_TABLEREQ, ALTER_TABLEREQ */
     ;
   }
-  
+
   if (tablePtr.p->primaryTableId != RNIL)
   {
     jam();
@@ -736,7 +736,7 @@ Dbdict::packTableIntoPages(SimplePropert
   }
 
   AttributeRecordPtr attrPtr;
-  LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool, 
+  LocalAttributeRecord_list list(c_attributeRecordPool,
 				    tablePtr.p->m_attributes);
   for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)){
     jam();
@@ -747,7 +747,7 @@ Dbdict::packTableIntoPages(SimplePropert
     w.add(DictTabInfo::AttributeName, attributeName);
     w.add(DictTabInfo::AttributeId, attrPtr.p->attributeId);
     w.add(DictTabInfo::AttributeKeyFlag, attrPtr.p->tupleKey > 0);
-    
+
     const Uint32 desc = attrPtr.p->attributeDescriptor;
     const Uint32 attrType = AttributeDescriptor::getType(desc);
     const Uint32 attrSize = AttributeDescriptor::getSize(desc);
@@ -757,7 +757,7 @@ Dbdict::packTableIntoPages(SimplePropert
     const Uint32 DKey = AttributeDescriptor::getDKey(desc);
     const Uint32 disk= AttributeDescriptor::getDiskBased(desc);
     const Uint32 dynamic= AttributeDescriptor::getDynamic(desc);
-    
+
 
     // AttributeType deprecated
     w.add(DictTabInfo::AttributeSize, attrSize);
@@ -770,14 +770,14 @@ Dbdict::packTableIntoPages(SimplePropert
     w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision);
     w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale);
     w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength);
-    w.add(DictTabInfo::AttributeAutoIncrement, 
+    w.add(DictTabInfo::AttributeAutoIncrement,
 	  (Uint32)attrPtr.p->autoIncrement);
 
     if(disk)
       w.add(DictTabInfo::AttributeStorageType, (Uint32)NDB_STORAGETYPE_DISK);
     else
       w.add(DictTabInfo::AttributeStorageType, (Uint32)NDB_STORAGETYPE_MEMORY);
-    
+
     ConstRope def(c_rope_pool, attrPtr.p->defaultValue);
     def.copy(defaultValue);
 
@@ -790,9 +790,9 @@ Dbdict::packTableIntoPages(SimplePropert
       memcpy(&a, defaultValue, sizeof(Uint32));
       a = htonl(a);
       memcpy(defaultValue, &a, sizeof(Uint32));
-      
+
       Uint32 remainBytes = def.size() - sizeof(Uint32);
-      
+
       if (remainBytes)
         NdbSqlUtil::convertByteOrder(attrType,
                                      attrSize,
@@ -806,7 +806,7 @@ Dbdict::packTableIntoPages(SimplePropert
     w.add(DictTabInfo::AttributeDefaultValue, defaultValue, def.size());
     w.add(DictTabInfo::AttributeEnd, 1);
   }
-  
+
   w.add(DictTabInfo::TableEnd, 1);
 }
 
@@ -815,7 +815,7 @@ Dbdict::packFilegroupIntoPages(SimplePro
 			       FilegroupPtr fg_ptr,
 			       const Uint32 undo_free_hi,
 			       const Uint32 undo_free_lo){
-  
+
   DictFilegroupInfo::Filegroup fg; fg.init();
   ConstRope r(c_rope_pool, fg_ptr.p->m_name);
   r.copy(fg.FilegroupName);
@@ -842,20 +842,20 @@ Dbdict::packFilegroupIntoPages(SimplePro
   default:
     ndbrequire(false);
   }
-  
+
   SimpleProperties::UnpackStatus s;
-  s = SimpleProperties::pack(w, 
+  s = SimpleProperties::pack(w,
 			     &fg,
-			     DictFilegroupInfo::Mapping, 
+			     DictFilegroupInfo::Mapping,
 			     DictFilegroupInfo::MappingSize, true);
-  
+
   ndbrequire(s == SimpleProperties::Eof);
 }
 
 void
 Dbdict::packFileIntoPages(SimpleProperties::Writer & w,
 			  FilePtr f_ptr, const Uint32 free_extents){
-  
+
   DictFilegroupInfo::File f; f.init();
   ConstRope r(c_rope_pool, f_ptr.p->m_path);
   r.copy(f.FileName);
@@ -873,11 +873,11 @@ Dbdict::packFileIntoPages(SimpleProperti
   f.FilegroupVersion = lfg_ptr.p->m_version;
 
   SimpleProperties::UnpackStatus s;
-  s = SimpleProperties::pack(w, 
+  s = SimpleProperties::pack(w,
 			     &f,
-			     DictFilegroupInfo::FileMapping, 
+			     DictFilegroupInfo::FileMapping,
 			     DictFilegroupInfo::FileMappingSize, true);
-  
+
   ndbrequire(s == SimpleProperties::Eof);
 }
 
@@ -936,7 +936,7 @@ Dbdict::execCREATE_FRAGMENTATION_REQ(Sig
 /* ---------------------------------------------------------------- */
 // A file was successfully closed.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSCLOSECONF(Signal* signal) 
+void Dbdict::execFSCLOSECONF(Signal* signal)
 {
   FsConnectRecordPtr fsPtr;
   FsConf * const fsConf = (FsConf *)&signal->theData[0];
@@ -976,7 +976,7 @@ void Dbdict::execFSCLOSECONF(Signal* sig
 /* ---------------------------------------------------------------- */
 // A file was successfully opened.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSOPENCONF(Signal* signal) 
+void Dbdict::execFSOPENCONF(Signal* signal)
 {
   FsConnectRecordPtr fsPtr;
   jamEntry();
@@ -1026,7 +1026,7 @@ void Dbdict::execFSOPENCONF(Signal* sign
 /* ---------------------------------------------------------------- */
 // An open file was refused.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSOPENREF(Signal* signal) 
+void Dbdict::execFSOPENREF(Signal* signal)
 {
   jamEntry();
   FsRef * const fsRef = (FsRef *)&signal->theData[0];
@@ -1054,7 +1054,7 @@ void Dbdict::execFSOPENREF(Signal* signa
 /* ---------------------------------------------------------------- */
 // A file was successfully read.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSREADCONF(Signal* signal) 
+void Dbdict::execFSREADCONF(Signal* signal)
 {
   jamEntry();
   FsConf * const fsConf = (FsConf *)&signal->theData[0];
@@ -1091,7 +1091,7 @@ void Dbdict::execFSREADCONF(Signal* sign
 /* ---------------------------------------------------------------- */
 // A read file was refused.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSREADREF(Signal* signal) 
+void Dbdict::execFSREADREF(Signal* signal)
 {
   jamEntry();
   FsRef * const fsRef = (FsRef *)&signal->theData[0];
@@ -1119,7 +1119,7 @@ void Dbdict::execFSREADREF(Signal* signa
 /* ---------------------------------------------------------------- */
 // A file was successfully written.
 /* ---------------------------------------------------------------- */
-void Dbdict::execFSWRITECONF(Signal* signal) 
+void Dbdict::execFSWRITECONF(Signal* signal)
 {
   FsConf * const fsConf = (FsConf *)&signal->theData[0];
   FsConnectRecordPtr fsPtr;
@@ -1145,11 +1145,11 @@ void Dbdict::execFSWRITECONF(Signal* sig
 // Routines to handle Read/Write of Table Files
 /* ---------------------------------------------------------------- */
 void
-Dbdict::writeTableFile(Signal* signal, Uint32 tableId, 
+Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
 		       SegmentedSectionPtr tabInfoPtr, Callback* callback){
-  
+
   ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
-  
+
   Uint32 pages = WORDS2PAGES(tabInfoPtr.sz);
   c_writeTableRecord.no_of_words = tabInfoPtr.sz;
   c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
@@ -1157,16 +1157,16 @@ Dbdict::writeTableFile(Signal* signal, U
 
   c_writeTableRecord.pageId = 0;
   ndbrequire(pages == 1);
-  
+
   PageRecordPtr pageRecPtr;
   c_pageRecordArray.getPtr(pageRecPtr, c_writeTableRecord.pageId);
   copy(&pageRecPtr.p->word[ZPAGE_HEADER_SIZE], tabInfoPtr);
-  
+
   memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
-  pageRecPtr.p->word[ZPOS_CHECKSUM] = 
-    computeChecksum(&pageRecPtr.p->word[0], 
+  pageRecPtr.p->word[ZPOS_CHECKSUM] =
+    computeChecksum(&pageRecPtr.p->word[0],
 		    pages * ZSIZE_OF_PAGES_IN_WORDS);
-  
+
   startWriteTableFile(signal, tableId);
 }
 
@@ -1220,7 +1220,7 @@ void Dbdict::openTableFile(Signal* signa
                            Uint32 fileNo,
                            Uint32 fsConPtr,
                            Uint32 tableId,
-                           bool   writeFlag) 
+                           bool   writeFlag)
 {
   FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
 
@@ -1228,10 +1228,10 @@ void Dbdict::openTableFile(Signal* signa
   fsOpenReq->userPointer = fsConPtr;
   if (writeFlag) {
     jam();
-    fsOpenReq->fileFlags = 
-      FsOpenReq::OM_WRITEONLY | 
-      FsOpenReq::OM_TRUNCATE | 
-      FsOpenReq::OM_CREATE | 
+    fsOpenReq->fileFlags =
+      FsOpenReq::OM_WRITEONLY |
+      FsOpenReq::OM_TRUNCATE |
+      FsOpenReq::OM_CREATE |
       FsOpenReq::OM_SYNC;
   } else {
     jam();
@@ -1255,7 +1255,7 @@ void Dbdict::openTableFile(Signal* signa
   sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
 }//openTableFile()
 
-void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1264,7 +1264,7 @@ void Dbdict::writeTableFile(Signal* sign
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_TABLE_FILE;
   fsRWReq->numberOfPages = WORDS2PAGES(c_writeTableRecord.no_of_words);
@@ -1290,7 +1290,7 @@ void Dbdict::closeWriteTableConf(Signal*
     fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
     openTableFile(signal, 1, fsPtr.i, c_writeTableRecord.tableId, true);
     return;
-  } 
+  }
   ndbrequire(c_writeTableRecord.noOfTableFilesHandled == 2);
   c_fsConnectRecordPool.release(fsPtr);
   WriteTableRecord::TableWriteState state = c_writeTableRecord.tableWriteState;
@@ -1315,7 +1315,7 @@ void Dbdict::startReadTableFile(Signal*
 {
   //globalSignalLoggers.log(number(), "startReadTableFile");
   ndbrequire(!c_readTableRecord.inUse);
-  
+
   FsConnectRecordPtr fsPtr;
   c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
   c_readTableRecord.inUse = true;
@@ -1325,14 +1325,14 @@ void Dbdict::startReadTableFile(Signal*
 }//Dbdict::startReadTableFile()
 
 void Dbdict::openReadTableRef(Signal* signal,
-                              FsConnectRecordPtr fsPtr) 
+                              FsConnectRecordPtr fsPtr)
 {
   fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
   openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
   return;
 }//Dbdict::openReadTableConf()
 
-void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1341,7 +1341,7 @@ void Dbdict::readTableFile(Signal* signa
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_TABLE_FILE;
   fsRWReq->numberOfPages = WORDS2PAGES(c_readTableRecord.no_of_words);
@@ -1367,10 +1367,10 @@ void Dbdict::readTableConf(Signal* signa
 
   PageRecordPtr tmpPagePtr;
   c_pageRecordArray.getPtr(tmpPagePtr, c_readTableRecord.pageId);
-  Uint32 sz = 
+  Uint32 sz =
     WORDS2PAGES(c_readTableRecord.no_of_words)*ZSIZE_OF_PAGES_IN_WORDS;
   Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
-  
+
   ndbrequire((chk == 0) || !crashInd);
   if(chk != 0){
     jam();
@@ -1378,7 +1378,7 @@ void Dbdict::readTableConf(Signal* signa
     readTableRef(signal, fsPtr);
     return;
   }//if
-  
+
   fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_TAB_FILE;
   closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
   return;
@@ -1400,7 +1400,7 @@ void Dbdict::closeReadTableConf(Signal*
 {
   c_fsConnectRecordPool.release(fsPtr);
   c_readTableRecord.inUse = false;
-  
+
   execute(signal, c_readTableRecord.m_callback, 0);
   return;
 }//Dbdict::closeReadTableConf()
@@ -1411,7 +1411,7 @@ void Dbdict::closeReadTableConf(Signal*
 NdbOut& operator<<(NdbOut& out, const SchemaFile::TableEntry entry);
 
 void
-Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, 
+Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
 			  SchemaFile::TableEntry* te, Callback* callback,
                           bool savetodisk, bool dicttrans)
 {
@@ -1435,11 +1435,11 @@ Dbdict::updateSchemaState(Signal* signal
   * tableEntry = * te;
   computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
 #else
-  SchemaFile::TableState newState = 
+  SchemaFile::TableState newState =
     (SchemaFile::TableState)te->m_tableState;
-  SchemaFile::TableState oldState = 
+  SchemaFile::TableState oldState =
     (SchemaFile::TableState)tableEntry->m_tableState;
-  
+
   Uint32 newVersion = te->m_tableVersion;
   Uint32 oldVersion = tableEntry->m_tableVersion;
 
@@ -1507,7 +1507,7 @@ Dbdict::updateSchemaState(Signal* signal
     ndbrequire((oldState == SchemaFile::ADD_STARTED));
   }//if
   ndbrequire(ok);
-  
+
   * tableEntry = * te;
   computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
 
@@ -1515,13 +1515,13 @@ Dbdict::updateSchemaState(Signal* signal
   {
     ndbrequire(c_writeSchemaRecord.inUse == false);
     c_writeSchemaRecord.inUse = true;
-    
+
     c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
     c_writeSchemaRecord.newFile = false;
     c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
     c_writeSchemaRecord.noOfPages = 1;
     c_writeSchemaRecord.m_callback = * callback;
-    
+
     startWriteSchemaFile(signal);
   }
   else
@@ -1555,12 +1555,12 @@ void Dbdict::openSchemaFile(Signal* sign
   fsOpenReq->userPointer = fsConPtr;
   if (writeFlag) {
     jam();
-    fsOpenReq->fileFlags = 
-      FsOpenReq::OM_WRITEONLY | 
+    fsOpenReq->fileFlags =
+      FsOpenReq::OM_WRITEONLY |
       FsOpenReq::OM_SYNC;
     if (newFile)
       fsOpenReq->fileFlags |=
-        FsOpenReq::OM_TRUNCATE | 
+        FsOpenReq::OM_TRUNCATE |
         FsOpenReq::OM_CREATE;
   } else {
     jam();
@@ -1582,7 +1582,7 @@ void Dbdict::openSchemaFile(Signal* sign
   sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
 }//openSchemaFile()
 
-void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1597,7 +1597,7 @@ void Dbdict::writeSchemaFile(Signal* sig
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
   fsRWReq->numberOfPages = wr.noOfPages;
@@ -1615,7 +1615,7 @@ void Dbdict::writeSchemaConf(Signal* sig
   return;
 }//Dbdict::writeSchemaConf()
 
-void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
   fsCloseReq->filePointer = filePtr;
@@ -1635,9 +1635,9 @@ void Dbdict::closeWriteSchemaConf(Signal
     fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
     openSchemaFile(signal, 1, fsPtr.i, true, c_writeSchemaRecord.newFile);
     return;
-  } 
+  }
   ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2);
-  
+
   c_fsConnectRecordPool.release(fsPtr);
 
   c_writeSchemaRecord.inUse = false;
@@ -1655,13 +1655,13 @@ void Dbdict::startReadSchemaFile(Signal*
 }//Dbdict::startReadSchemaFile()
 
 void Dbdict::openReadSchemaRef(Signal* signal,
-                               FsConnectRecordPtr fsPtr) 
+                               FsConnectRecordPtr fsPtr)
 {
   fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
   openSchemaFile(signal, 1, fsPtr.i, false, false);
 }//Dbdict::openReadSchemaRef()
 
-void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) 
+void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
 {
   FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
 
@@ -1676,7 +1676,7 @@ void Dbdict::readSchemaFile(Signal* sign
   fsRWReq->userPointer = fsConPtr;
   fsRWReq->operationFlag = 0; // Initialise before bit changes
   FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
-  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, 
+  FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
                                 FsReadWriteReq::fsFormatArrayOfPages);
   fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
   fsRWReq->numberOfPages = rr.noOfPages;
@@ -1736,8 +1736,8 @@ void Dbdict::readSchemaConf(Signal* sign
     return;
   }
 
-  if (sf0->NdbVersion < NDB_MAKE_VERSION(6,4,0) && 
-      ! convertSchemaFileTo_6_4(xsf)) 
+  if (sf0->NdbVersion < NDB_MAKE_VERSION(6,4,0) &&
+      ! convertSchemaFileTo_6_4(xsf))
   {
     jam();
     ndbrequire(! crashInd);
@@ -1812,7 +1812,7 @@ void Dbdict::closeReadSchemaConf(Signal*
     jam();
     {
       // write back both copies
-      
+
       ndbrequire(c_writeSchemaRecord.inUse == false);
       XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0 ];
       Uint32 noOfPages =
@@ -1826,7 +1826,7 @@ void Dbdict::closeReadSchemaConf(Signal*
       c_writeSchemaRecord.firstPage = 0;
       c_writeSchemaRecord.noOfPages = xsf->noOfPages;
 
-      c_writeSchemaRecord.m_callback.m_callbackFunction = 
+      c_writeSchemaRecord.m_callback.m_callbackFunction =
         safe_cast(&Dbdict::initSchemaFile_conf);
 
       startWriteSchemaFile(signal);
@@ -1901,7 +1901,7 @@ Dbdict::convertSchemaFileTo_6_4(XSchemaF
     {
       Uint32 n = i * NDB_SF_PAGE_ENTRIES + j;
       SchemaFile::TableEntry * transEntry = getTableEntry(xsf, n);
-      
+
       switch(SchemaFile::Old::TableState(transEntry->m_tableState)) {
       case SchemaFile::Old::INIT:
         transEntry->m_tableState = SchemaFile::SF_UNUSED;
@@ -1962,7 +1962,7 @@ Dbdict::Dbdict(Block_context& ctx):
   c_opRecordSequence(0)
 {
   BLOCK_CONSTRUCTOR(Dbdict);
-  
+
   // Transit signals
   addRecSignal(GSN_DUMP_STATE_ORD, &Dbdict::execDUMP_STATE_ORD);
   addRecSignal(GSN_GET_TABINFOREQ, &Dbdict::execGET_TABINFOREQ);
@@ -2095,11 +2095,11 @@ Dbdict::Dbdict(Block_context& ctx):
   addRecSignal(GSN_LIST_TABLES_REQ, &Dbdict::execLIST_TABLES_REQ);
 
   addRecSignal(GSN_DROP_TABLE_REQ, &Dbdict::execDROP_TABLE_REQ);
-  
+
   addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdict::execPREP_DROP_TAB_REQ);
   addRecSignal(GSN_PREP_DROP_TAB_REF, &Dbdict::execPREP_DROP_TAB_REF);
   addRecSignal(GSN_PREP_DROP_TAB_CONF, &Dbdict::execPREP_DROP_TAB_CONF);
-  
+
   addRecSignal(GSN_DROP_TAB_REF, &Dbdict::execDROP_TAB_REF);
   addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF);
 
@@ -2173,7 +2173,7 @@ Dbdict::Dbdict(Block_context& ctx):
   addRecSignal(GSN_INDEX_STAT_REP, &Dbdict::execINDEX_STAT_REP);
 }//Dbdict::Dbdict()
 
-Dbdict::~Dbdict() 
+Dbdict::~Dbdict()
 {
 }//Dbdict::~Dbdict()
 
@@ -2193,7 +2193,7 @@ Dbdict::getParam(const char * name, Uint
   return false;
 }
 
-void Dbdict::initCommonData() 
+void Dbdict::initCommonData()
 {
 /* ---------------------------------------------------------------- */
 // Initialise all common variables.
@@ -2231,7 +2231,7 @@ void Dbdict::initCommonData()
 
 }//Dbdict::initCommonData()
 
-void Dbdict::initRecords() 
+void Dbdict::initRecords()
 {
   initNodeRecords();
   initPageRecords();
@@ -2239,7 +2239,7 @@ void Dbdict::initRecords()
   initTriggerRecords();
 }//Dbdict::initRecords()
 
-void Dbdict::initSendSchemaRecord() 
+void Dbdict::initSendSchemaRecord()
 {
   c_sendSchemaRecord.noOfWords = (Uint32)-1;
   c_sendSchemaRecord.pageId = RNIL;
@@ -2249,7 +2249,7 @@ void Dbdict::initSendSchemaRecord()
   //c_sendSchemaRecord.sendSchemaState = SendSchemaRecord::IDLE;
 }//initSendSchemaRecord()
 
-void Dbdict::initReadTableRecord() 
+void Dbdict::initReadTableRecord()
 {
   c_readTableRecord.no_of_words= 0;
   c_readTableRecord.pageId = RNIL;
@@ -2257,7 +2257,7 @@ void Dbdict::initReadTableRecord()
   c_readTableRecord.inUse = false;
 }//initReadTableRecord()
 
-void Dbdict::initWriteTableRecord() 
+void Dbdict::initWriteTableRecord()
 {
   c_writeTableRecord.no_of_words= 0;
   c_writeTableRecord.pageId = RNIL;
@@ -2266,20 +2266,20 @@ void Dbdict::initWriteTableRecord()
   c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
 }//initWriteTableRecord()
 
-void Dbdict::initReadSchemaRecord() 
+void Dbdict::initReadSchemaRecord()
 {
   c_readSchemaRecord.pageId = RNIL;
   c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
 }//initReadSchemaRecord()
 
-void Dbdict::initWriteSchemaRecord() 
+void Dbdict::initWriteSchemaRecord()
 {
   c_writeSchemaRecord.inUse = false;
   c_writeSchemaRecord.pageId = RNIL;
   c_writeSchemaRecord.noOfSchemaFilesHandled = 3;
 }//initWriteSchemaRecord()
 
-void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode) 
+void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
 {
   c_retrieveRecord.busyState = false;
   c_retrieveRecord.blockRef = 0;
@@ -2291,13 +2291,13 @@ void Dbdict::initRetrieveRecord(Signal*
   c_retrieveRecord.m_useLongSig = false;
 }//initRetrieveRecord()
 
-void Dbdict::initSchemaRecord() 
+void Dbdict::initSchemaRecord()
 {
   c_schemaRecord.schemaPage = RNIL;
   c_schemaRecord.oldSchemaPage = RNIL;
 }//Dbdict::initSchemaRecord()
 
-void Dbdict::initNodeRecords() 
+void Dbdict::initNodeRecords()
 {
   jam();
   for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
@@ -2309,7 +2309,7 @@ void Dbdict::initNodeRecords()
   }//for
 }//Dbdict::initNodeRecords()
 
-void Dbdict::initPageRecords() 
+void Dbdict::initPageRecords()
 {
   c_retrieveRecord.retrievePage =  ZMAX_PAGES_OF_TABLE_DEFINITION;
   ndbrequire(ZNUMBER_OF_PAGES >= (ZMAX_PAGES_OF_TABLE_DEFINITION + 1));
@@ -2317,7 +2317,7 @@ void Dbdict::initPageRecords()
   c_schemaRecord.oldSchemaPage = NDB_SF_MAX_PAGES;
 }//Dbdict::initPageRecords()
 
-void Dbdict::initTableRecords() 
+void Dbdict::initTableRecords()
 {
   TableRecordPtr tablePtr;
   while (1) {
@@ -2332,7 +2332,7 @@ void Dbdict::initTableRecords()
   }//while
 }//Dbdict::initTableRecords()
 
-void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) 
+void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
 {
   new (tablePtr.p) TableRecord();
   tablePtr.p->filePtr[0] = RNIL;
@@ -2401,7 +2401,7 @@ void Dbdict::initialiseTriggerRecord(Tri
   triggerPtr.p->indexId = RNIL;
 }
 
-Uint32 Dbdict::getFsConnRecord() 
+Uint32 Dbdict::getFsConnRecord()
 {
   FsConnectRecordPtr fsPtr;
   c_fsConnectRecordPool.seize(fsPtr);
@@ -2439,13 +2439,8 @@ Uint32 Dbdict::getFreeObjId(Uint32 minId
   return RNIL;
 }
 
-Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId) 
+Uint32 Dbdict::getFreeTableRecord()
 {
-  Uint32 minId = (primaryTableId == RNIL ? 0 : primaryTableId + 1);
-  if (ERROR_INSERTED(6012) && minId < 4096){
-    minId = 4096;
-    CLEAR_ERROR_INSERT_VALUE;
-  }
   Uint32 i = getFreeObjId(0);
   if (i == RNIL) {
     jam();
@@ -2541,7 +2536,7 @@ Dbdict::check_write_obj(Uint32 objId, Ui
       jam();
       return GetTabInfoRef::TableNotDefined;
     }
-    
+
     if (te->m_transId == 0 || te->m_transId == transId)
     {
       jam();
@@ -2583,7 +2578,7 @@ Dbdict::check_write_obj(Uint32 objId, Ui
 /* ---------------------------------------------------------------- */
 // This is sent as the first signal during start/restart.
 /* ---------------------------------------------------------------- */
-void Dbdict::execSTTOR(Signal* signal) 
+void Dbdict::execSTTOR(Signal* signal)
 {
   jamEntry();
   c_startPhase = signal->theData[1];
@@ -2624,7 +2619,7 @@ void Dbdict::sendSTTORRY(Signal* signal)
 /* ---------------------------------------------------------------- */
 // We receive information about sizes of records.
 /* ---------------------------------------------------------------- */
-void Dbdict::execREAD_CONFIG_REQ(Signal* signal) 
+void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
 {
   const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
   Uint32 ref = req->senderRef;
@@ -2632,13 +2627,13 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ndbrequire(req->noOfParameters == 0);
 
   jamEntry();
- 
-  const ndb_mgm_configuration_iterator * p = 
+
+  const ndb_mgm_configuration_iterator * p =
     m_ctx.m_config.getOwnConfigIterator();
   ndbrequire(p != 0);
-  
+
   Uint32 attributesize, tablerecSize;
-  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, 
+  ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS,
 					&c_maxNoOfTriggers));
   ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,&attributesize));
   ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &tablerecSize));
@@ -2740,7 +2735,7 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ndb_mgm_get_int_parameter(p, CFG_DB_STRING_MEMORY, &sm);
   if (sm == 0)
     sm = 25;
-  
+
   Uint64 sb = 0;
   if (sm <= 100)
   {
@@ -2750,12 +2745,12 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   {
     sb = sm;
   }
-  
-  sb /= (Rope::getSegmentSize() * sizeof(Uint32));
+
+  sb /= (LocalRope::getSegmentSize() * sizeof(Uint32));
   sb += 100; // more safty
   ndbrequire(sb < (Uint64(1) << 32));
   c_rope_pool.setSize(Uint32(sb));
-  
+
   // Initialize BAT for interface to file system
   NewVARIABLE* bat = allocateBat(2);
   bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0];
@@ -2775,12 +2770,12 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
   ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
   conf->senderRef = reference();
   conf->senderData = senderData;
-  sendSignal(ref, GSN_READ_CONFIG_CONF, signal, 
+  sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
 	     ReadConfigConf::SignalLength, JBB);
 
   {
-    Ptr<DictObject> ptr;
-    SLList<DictObject> objs(c_obj_pool);
+    DictObjectPtr ptr;
+    DictObject_list objs(c_obj_pool);
     while(objs.seize(ptr))
       new (ptr.p) DictObject();
     objs.release();
@@ -2803,7 +2798,7 @@ void Dbdict::execREAD_CONFIG_REQ(Signal*
 // Start phase signals sent by CNTR. We reply with NDB_STTORRY when
 // we completed this phase.
 /* ---------------------------------------------------------------- */
-void Dbdict::execNDB_STTOR(Signal* signal) 
+void Dbdict::execNDB_STTOR(Signal* signal)
 {
   jamEntry();
   c_startPhase = signal->theData[2];
@@ -2851,7 +2846,7 @@ void Dbdict::execNDB_STTOR(Signal* signa
   }//switch
 }//execNDB_STTOR()
 
-void Dbdict::sendNDB_STTORRY(Signal* signal) 
+void Dbdict::sendNDB_STTORRY(Signal* signal)
 {
   signal->theData[0] = reference();
   sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
@@ -2861,7 +2856,7 @@ void Dbdict::sendNDB_STTORRY(Signal* sig
 /* ---------------------------------------------------------------- */
 // We receive the information about which nodes that are up and down.
 /* ---------------------------------------------------------------- */
-void Dbdict::execREAD_NODESCONF(Signal* signal) 
+void Dbdict::execREAD_NODESCONF(Signal* signal)
 {
   jamEntry();
 
@@ -2898,7 +2893,7 @@ void Dbdict::execREAD_NODESCONF(Signal*
   sendNDB_STTORRY(signal);
 }//execREAD_NODESCONF()
 
-void Dbdict::initSchemaFile(Signal* signal) 
+void Dbdict::initSchemaFile(Signal* signal)
 {
   XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
   xsf->noOfPages = (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1)
@@ -2908,8 +2903,8 @@ void Dbdict::initSchemaFile(Signal* sign
   XSchemaFile * oldxsf = &c_schemaFile[SchemaRecord::OLD_SCHEMA_FILE];
   oldxsf->noOfPages = xsf->noOfPages;
   memcpy(&oldxsf->schemaPage[0], &xsf->schemaPage[0], xsf->schemaPage[0].FileSize);
-  
-  if (c_initialStart || c_initialNodeRestart) {    
+
+  if (c_initialStart || c_initialNodeRestart) {
     jam();
     ndbrequire(c_writeSchemaRecord.inUse == false);
     c_writeSchemaRecord.inUse = true;
@@ -2918,9 +2913,9 @@ void Dbdict::initSchemaFile(Signal* sign
     c_writeSchemaRecord.firstPage = 0;
     c_writeSchemaRecord.noOfPages = xsf->noOfPages;
 
-    c_writeSchemaRecord.m_callback.m_callbackFunction = 
+    c_writeSchemaRecord.m_callback.m_callbackFunction =
       safe_cast(&Dbdict::initSchemaFile_conf);
-    
+
     startWriteSchemaFile(signal);
   } else if (c_systemRestart || c_nodeRestart) {
     jam();
@@ -3106,7 +3101,7 @@ Dbdict::activateIndex_fromEndTrans(Signa
   {
     DictObjectPtr obj_ptr;
     c_obj_pool.getPtr(obj_ptr, indexPtr.p->m_obj_ptr_i);
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     name.copy(indexName);
   }
 
@@ -3267,7 +3262,7 @@ Dbdict::rebuildIndex_fromEndTrans(Signal
   {
     DictObjectPtr obj_ptr;
     c_obj_pool.getPtr(obj_ptr, indexPtr.p->m_obj_ptr_i);
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     name.copy(indexName);
   }
 
@@ -3307,7 +3302,7 @@ Dbdict::rebuildIndex_fromEndTrans(Signal
 // tables that should be started as part of this system restart.
 // DICT will also activate the tables in TC as part of this process.
 /* ---------------------------------------------------------------- */
-void Dbdict::execDICTSTARTREQ(Signal* signal) 
+void Dbdict::execDICTSTARTREQ(Signal* signal)
 {
   jamEntry();
   c_restartRecord.gciToRestart = signal->theData[0];
@@ -3319,7 +3314,7 @@ void Dbdict::execDICTSTARTREQ(Signal* si
     c_restartRecord.m_senderData = 0;
   }
   if (c_nodeRestart || c_initialNodeRestart) {
-    jam();   
+    jam();
 
     CRASH_INSERTION(6000);
 
@@ -3332,7 +3327,7 @@ void Dbdict::execDICTSTARTREQ(Signal* si
   ndbrequire(c_masterNodeId == getOwnNodeId());
 
   c_schemaRecord.m_callback.m_callbackData = 0;
-  c_schemaRecord.m_callback.m_callbackFunction = 
+  c_schemaRecord.m_callback.m_callbackFunction =
     safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete);
 
   /**
@@ -3373,7 +3368,7 @@ Dbdict::masterRestart_checkSchemaStatusC
   Callback c = { 0, 0 };
   sendFragmentedSignal(rg,
 		       GSN_SCHEMA_INFO,
-		       signal, 
+		       signal,
 		       1, //SchemaInfo::SignalLength,
 		       JBB,
 		       ptr,
@@ -3389,27 +3384,27 @@ Dbdict::masterRestart_checkSchemaStatusC
   sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB);
 }
 
-void 
+void
 Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
 
   const Uint32 ref = signal->getSendersBlockRef();
   //const Uint32 senderData = signal->theData[0];
-  
+
   ndbrequire(c_sendSchemaRecord.inUse == false);
   c_sendSchemaRecord.inUse = true;
 
   LinearSectionPtr ptr[3];
-  
+
   XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
   ndbrequire(xsf->noOfPages != 0);
-  
+
   ptr[0].p = (Uint32*)&xsf->schemaPage[0];
   ptr[0].sz = xsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
 
   Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 };
   sendFragmentedSignal(ref,
 		       GSN_SCHEMA_INFO,
-		       signal, 
+		       signal,
 		       1, //GetSchemaInfoConf::SignalLength,
 		       JBB,
 		       ptr,
@@ -3418,7 +3413,7 @@ Dbdict::execGET_SCHEMA_INFOREQ(Signal* s
 }//Dbdict::execGET_SCHEMA_INFOREQ()
 
 void
-Dbdict::sendSchemaComplete(Signal * signal, 
+Dbdict::sendSchemaComplete(Signal * signal,
 			   Uint32 callbackData,
 			   Uint32 returnCode){
   ndbrequire(c_sendSchemaRecord.inUse == true);
@@ -3431,7 +3426,7 @@ Dbdict::sendSchemaComplete(Signal * sign
 // We receive the schema info from master as part of all restarts
 // except the initial start where no tables exists.
 /* ---------------------------------------------------------------- */
-void Dbdict::execSCHEMA_INFO(Signal* signal) 
+void Dbdict::execSCHEMA_INFO(Signal* signal)
 {
   jamEntry();
   if(!assembleFragments(signal)){
@@ -3463,13 +3458,13 @@ void Dbdict::execSCHEMA_INFO(Signal* sig
   xsf->noOfPages = schemaDataPtr.sz / NDB_SF_PAGE_SIZE_IN_WORDS;
   copy((Uint32*)&xsf->schemaPage[0], schemaDataPtr);
   releaseSections(handle);
-  
+
   SchemaFile * sf0 = &xsf->schemaPage[0];
   if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6) {
     bool ok = convertSchemaFileTo_5_0_6(xsf);
     ndbrequire(ok);
   }
-    
+
   if (sf0->NdbVersion < NDB_MAKE_VERSION(6,4,0))
   {
     jam();
@@ -3484,7 +3479,7 @@ void Dbdict::execSCHEMA_INFO(Signal* sig
   resizeSchemaFile(xsf, ownxsf->noOfPages);
 
   ndbrequire(signal->getSendersBlockRef() != reference());
-    
+
   /* ---------------------------------------------------------------- */
   // Synchronise our view on data with other nodes in the cluster.
   // This is an important part of restart handling where we will handle
@@ -3498,7 +3493,7 @@ void Dbdict::execSCHEMA_INFO(Signal* sig
 }//execSCHEMA_INFO()
 
 void
-Dbdict::restart_checkSchemaStatusComplete(Signal * signal, 
+Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
 					  Uint32 callbackData,
 					  Uint32 returnCode)
 {
@@ -3512,14 +3507,14 @@ Dbdict::restart_checkSchemaStatusComplet
 	       signal, 1, JBB);
     return;
   }
-  
+
   ndbrequire(c_restartRecord.m_op_cnt == 0);
   ndbrequire(c_nodeRestart || c_initialNodeRestart);
   activateIndexes(signal, 0);
   return;
 }
 
-void Dbdict::execSCHEMA_INFOCONF(Signal* signal) 
+void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
 {
   jamEntry();
   ndbrequire(signal->getNoOfSections() == 0);
@@ -3539,7 +3534,7 @@ void Dbdict::execSCHEMA_INFOCONF(Signal*
   activateIndexes(signal, 0);
 }//execSCHEMA_INFOCONF()
 
-static bool 
+static bool
 checkSchemaStatus(Uint32 tableType, Uint32 pass)
 {
   switch(tableType){
@@ -3622,14 +3617,14 @@ void Dbdict::initRestartRecord(Uint32 st
  * Pass 3 Create old HashMap
  * Pass 4 Create old Table           // NOT DONE DUE TO DIH
  * Pass 5 Create old Index           // NOT DONE DUE TO DIH
- 
+
  * Pass 6 Drop old Index             // NOT DONE DUE TO DIH
  * Pass 7 Drop old Table             // NOT DONE DUE TO DIH
  * Pass 8 Drop old HashMap
  * Pass 9 Drop old Datafile/Undofile
  * Pass 10 Drop old Tablespace
  * Pass 11 Drop old Logfilegroup
- 
+
  * Pass 12 Create new LogfileGroup
  * Pass 13 Create new Tablespace
  * Pass 14 Create new Datafile/Undofile
@@ -3638,7 +3633,7 @@ void Dbdict::initRestartRecord(Uint32 st
  * Pass 17 Create new Index
  */
 
-void Dbdict::checkSchemaStatus(Signal* signal) 
+void Dbdict::checkSchemaStatus(Signal* signal)
 {
   // masterxsf == schema file of master (i.e what's currently in cluster)
   // ownxsf = schema file read from disk
@@ -3754,7 +3749,7 @@ void Dbdict::checkSchemaStatus(Signal* s
     jam();
 
     c_restartRecord.m_op_cnt = 0;
-    
+
     TxHandlePtr tx_ptr;
     c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
@@ -3781,7 +3776,7 @@ Dbdict::checkPendingSchemaTrans(XSchemaF
         transEntry->m_transId != 0)
     {
       jam();
-  
+
       bool commit = false;
       switch(transEntry->m_tableState){
       case SchemaFile::SF_STARTED:
@@ -3896,7 +3891,7 @@ void
 Dbdict::restart_fromBeginTrans(Signal* signal, Uint32 tx_key, Uint32 ret)
 {
   ndbrequire(ret == 0);
-  
+
   TxHandlePtr tx_ptr;
   findTxHandle(tx_ptr, tx_key);
   ndbrequire(!tx_ptr.isNull());
@@ -3928,7 +3923,7 @@ Dbdict::restart_nextOp(Signal* signal, b
     jam();
     c_restartRecord.m_op_cnt = 0;
 
-    Ptr<TxHandle> tx_ptr;
+    TxHandlePtr tx_ptr;
     c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
     Callback c = {
@@ -4061,19 +4056,19 @@ Dbdict::restartNextPass(Signal* signal)
      * Complete last trans
      */
     jam();
-    
-    c_restartRecord.m_pass--;    
+
+    c_restartRecord.m_pass--;
     c_restartRecord.m_op_cnt = 0;
 
-    Ptr<TxHandle> tx_ptr;
+    TxHandlePtr tx_ptr;
     c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
-    
-    Callback c = { 
+
+    Callback c = {
       safe_cast(&Dbdict::restartEndPass_fromEndTrans),
       tx_ptr.p->tx_key
     };
     tx_ptr.p->m_callback = c;
-    
+
     Uint32 flags = 0;
     endSchemaTrans(signal, tx_ptr, flags);
     return;
@@ -4128,7 +4123,7 @@ Dbdict::execGET_TABINFO_CONF(Signal* sig
     jam();
     return;
   }
-  
+
   GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
 
   switch(conf->tableType){
@@ -4202,12 +4197,12 @@ Dbdict::execGET_TABINFO_CONF(Signal* sig
  * Create Obj during NR/SR
  */
 void
-Dbdict::restartCreateObj(Signal* signal, 
-			 Uint32 tableId, 
+Dbdict::restartCreateObj(Signal* signal,
+			 Uint32 tableId,
 			 const SchemaFile::TableEntry * new_entry,
 			 bool file){
   jam();
-  
+
 
 #ifdef PRINT_SCHEMA_RESTART
   ndbout_c("restartCreateObj table: %u file: %u", tableId, Uint32(file));
@@ -4219,9 +4214,9 @@ Dbdict::restartCreateObj(Signal* signal,
     c_readTableRecord.no_of_words = new_entry->m_info_words;
     c_readTableRecord.pageId = 0;
     c_readTableRecord.m_callback.m_callbackData = tableId;
-    c_readTableRecord.m_callback.m_callbackFunction = 
+    c_readTableRecord.m_callback.m_callbackFunction =
       safe_cast(&Dbdict::restartCreateObj_readConf);
-    
+
     ndbout_c("restartCreateObj(%u) file: %u", tableId, file);
     startReadTableFile(signal, tableId);
   }
@@ -4256,7 +4251,7 @@ Dbdict::restartCreateObj_getTabInfoConf(
 
 void
 Dbdict::restartCreateObj_readConf(Signal* signal,
-				  Uint32 callbackData, 
+				  Uint32 callbackData,
 				  Uint32 returnCode)
 {
   jam();
@@ -4264,7 +4259,7 @@ Dbdict::restartCreateObj_readConf(Signal
 
   PageRecordPtr pageRecPtr;
   c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId);
-  
+
   Uint32 sz = c_readTableRecord.no_of_words;
 
   Ptr<SectionSegment> ptr;
@@ -4279,12 +4274,12 @@ Dbdict::restartCreateObj_parse(Signal* s
                                bool file)
 {
   jam();
-  Ptr<SchemaOp> op_ptr;
-  
-  Ptr<TxHandle> tx_ptr;
+  SchemaOpPtr op_ptr;
+
+  TxHandlePtr tx_ptr;
   c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
-  Ptr<SchemaTrans> trans_ptr;
+  SchemaTransPtr trans_ptr;
   findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
 
   switch(c_restartRecord.m_entry.m_tableType){
@@ -4295,27 +4290,27 @@ Dbdict::restartCreateObj_parse(Signal* s
   case DictTabInfo::UniqueOrderedIndex:
   case DictTabInfo::OrderedIndex:
   {
-    Ptr<CreateTableRec> opRecPtr;
+    CreateTableRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::Undofile:
   case DictTabInfo::Datafile:
   {
-    Ptr<CreateFileRec> opRecPtr;
+    CreateFileRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::Tablespace:
   case DictTabInfo::LogfileGroup:
   {
-    Ptr<CreateFilegroupRec> opRecPtr;
+    CreateFilegroupRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
   case DictTabInfo::HashMap:
   {
-    Ptr<CreateHashMapRec> opRecPtr;
+    CreateHashMapRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     break;
   }
@@ -4323,7 +4318,7 @@ Dbdict::restartCreateObj_parse(Signal* s
 
   op_ptr.p->m_restart = file ? 1 : 2;
   op_ptr.p->m_state = SchemaOp::OS_PARSE_MASTER;
-  
+
   SectionHandle handle(this, ptr.i);
   ErrorInfo error;
   const OpInfo& info = getOpInfo(op_ptr);
@@ -4355,20 +4350,20 @@ Dbdict::restartCreateObj_parse(Signal* s
  * Drop object during NR/SR
  */
 void
-Dbdict::restartDropObj(Signal* signal, 
-                       Uint32 tableId, 
+Dbdict::restartDropObj(Signal* signal,
+                       Uint32 tableId,
                        const SchemaFile::TableEntry * entry)
 {
   jam();
   c_restartRecord.m_entry = *entry;
 
   jam();
-  Ptr<SchemaOp> op_ptr;
+  SchemaOpPtr op_ptr;
 
-  Ptr<TxHandle> tx_ptr;
+  TxHandlePtr tx_ptr;
   c_txHandleHash.getPtr(tx_ptr, c_restartRecord.m_tx_ptr_i);
 
-  Ptr<SchemaTrans> trans_ptr;
+  SchemaTransPtr trans_ptr;
   findSchemaTrans(trans_ptr, tx_ptr.p->m_transKey);
 
   switch(c_restartRecord.m_entry.m_tableType){
@@ -4378,14 +4373,14 @@ Dbdict::restartDropObj(Signal* signal,
   case DictTabInfo::HashIndex:
   case DictTabInfo::UniqueOrderedIndex:
   case DictTabInfo::OrderedIndex:
-    Ptr<DropTableRec> opRecPtr;
+    DropTableRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     ndbrequire(false);
     break;
   case DictTabInfo::Undofile:
   case DictTabInfo::Datafile:
   {
-    Ptr<DropFileRec> opRecPtr;
+    DropFileRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     opRecPtr.p->m_request.file_id = tableId;
     opRecPtr.p->m_request.file_version = entry->m_tableVersion;
@@ -4394,7 +4389,7 @@ Dbdict::restartDropObj(Signal* signal,
   case DictTabInfo::Tablespace:
   case DictTabInfo::LogfileGroup:
   {
-    Ptr<DropFilegroupRec> opRecPtr;
+    DropFilegroupRecPtr opRecPtr;
     seizeSchemaOp(trans_ptr, op_ptr, opRecPtr);
     opRecPtr.p->m_request.filegroup_id = tableId;
     opRecPtr.p->m_request.filegroup_version = entry->m_tableVersion;
@@ -4403,10 +4398,10 @@ Dbdict::restartDropObj(Signal* signal,
   }
 
   ndbout_c("restartDropObj(%u)", tableId);
-  
+
   op_ptr.p->m_restart = 1; //
   op_ptr.p->m_state = SchemaOp::OS_PARSE_MASTER;
-  
+
   SectionHandle handle(this);
   ErrorInfo error;
   const OpInfo& info = getOpInfo(op_ptr);
@@ -4443,12 +4438,12 @@ Dbdict::restartDropObj(Signal* signal,
 /* ---------------------------------------------------------------- */
 /* **************************************************************** */
 
-void Dbdict::handleApiFailureCallback(Signal* signal, 
+void Dbdict::handleApiFailureCallback(Signal* signal,
                                       Uint32 failedNodeId,
                                       Uint32 ignoredRc)
 {
   jamEntry();
-  
+
   signal->theData[0] = failedNodeId;
   signal->theData[1] = reference();
   sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
@@ -4457,7 +4452,7 @@ void Dbdict::handleApiFailureCallback(Si
 /* ---------------------------------------------------------------- */
 // We receive a report of an API that failed.
 /* ---------------------------------------------------------------- */
-void Dbdict::execAPI_FAILREQ(Signal* signal) 
+void Dbdict::execAPI_FAILREQ(Signal* signal)
 {
   jamEntry();
   Uint32 failedApiNode = signal->theData[0];
@@ -4476,8 +4471,8 @@ void Dbdict::execAPI_FAILREQ(Signal* sig
   handleApiFail(signal, failedApiNode);
 }//execAPI_FAILREQ()
 
-void Dbdict::handleNdbdFailureCallback(Signal* signal, 
-                                       Uint32 failedNodeId, 
+void Dbdict::handleNdbdFailureCallback(Signal* signal,
+                                       Uint32 failedNodeId,
                                        Uint32 ignoredRc)
 {
   jamEntry();
@@ -4487,14 +4482,14 @@ void Dbdict::handleNdbdFailureCallback(S
   nfCompRep->blockNo      = DBDICT;
   nfCompRep->nodeId       = getOwnNodeId();
   nfCompRep->failedNodeId = failedNodeId;
-  sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal, 
+  sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
              NFCompleteRep::SignalLength, JBB);
 }
 
 /* ---------------------------------------------------------------- */
 // We receive a report of one or more node failures of kernel nodes.
 /* ---------------------------------------------------------------- */
-void Dbdict::execNODE_FAILREP(Signal* signal) 
+void Dbdict::execNODE_FAILREP(Signal* signal)
 {
   jamEntry();
   NodeFailRep nodeFailRep = *(NodeFailRep *)&signal->theData[0];
@@ -4583,11 +4578,11 @@ void Dbdict::handle_master_takeover(Sign
     pending schema transactions.
     Ask all slave nodes about state of any pending
     transactions
-  */      
+  */
   jam();
   NodeRecordPtr masterNodePtr;
   c_nodes.getPtr(masterNodePtr, c_masterNodeId);
-  
+
   masterNodePtr.p->m_nodes = c_aliveNodes;
   NodeReceiverGroup rg(DBDICT, masterNodePtr.p->m_nodes);
   {
@@ -4616,7 +4611,7 @@ void Dbdict::handle_master_takeover(Sign
 // Include a starting node in list of nodes to be part of adding
 // and dropping tables.
 /* ---------------------------------------------------------------- */
-void Dbdict::execINCL_NODEREQ(Signal* signal) 
+void Dbdict::execINCL_NODEREQ(Signal* signal)
 {
   jamEntry();
   NodeRecordPtr nodePtr;
@@ -4648,15 +4643,15 @@ void Dbdict::execINCL_NODEREQ(Signal* si
 inline
 void Dbdict::printTables()
 {
-  DLHashTable<DictObject>::Iterator iter;
+  DictObject_hash::Iterator iter;
   bool moreTables = c_obj_hash.first(iter);
   printf("OBJECTS IN DICT:\n");
   char name[PATH_MAX];
   while (moreTables) {
-    Ptr<DictObject> tablePtr = iter.curr;
+    DictObjectPtr tablePtr = iter.curr;
     ConstRope r(c_rope_pool, tablePtr.p->m_name);
     r.copy(name);
-    printf("%s ", name); 
+    printf("%s ", name);
     moreTables = c_obj_hash.next(iter);
   }
   printf("\n");
@@ -4674,7 +4669,7 @@ void Dbdict::printTables()
 
 Dbdict::DictObject *
 Dbdict::get_object(const char * name, Uint32 len, Uint32 hash){
-  Ptr<DictObject> old_ptr;
+  DictObjectPtr old_ptr;
   if (get_object(old_ptr, name, len, hash))
   {
     return old_ptr.p;
@@ -4695,10 +4690,10 @@ Dbdict::get_object(DictObjectPtr& obj_pt
 
 void
 Dbdict::release_object(Uint32 obj_ptr_i, DictObject* obj_ptr_p){
-  Rope name(c_rope_pool, obj_ptr_p->m_name);
+  LocalRope name(c_rope_pool, obj_ptr_p->m_name);
   name.erase();
 
-  Ptr<DictObject> ptr = { obj_ptr_p, obj_ptr_i };
+  DictObjectPtr ptr = { obj_ptr_p, obj_ptr_i };
   c_obj_hash.release(ptr);
 }
 
@@ -4706,7 +4701,7 @@ void
 Dbdict::increase_ref_count(Uint32 obj_ptr_i)
 {
   DictObject* ptr = c_obj_pool.getPtr(obj_ptr_i);
-  ptr->m_ref_count++;  
+  ptr->m_ref_count++;
 }
 
 void
@@ -4714,13 +4709,13 @@ Dbdict::decrease_ref_count(Uint32 obj_pt
 {
   DictObject* ptr = c_obj_pool.getPtr(obj_ptr_i);
   ndbrequire(ptr->m_ref_count);
-  ptr->m_ref_count--;  
+  ptr->m_ref_count--;
 }
 
 void Dbdict::handleTabInfoInit(Signal * signal, SchemaTransPtr & trans_ptr,
                                SimpleProperties::Reader & it,
 			       ParseDictTabInfoRecord * parseP,
-			       bool checkExist) 
+			       bool checkExist)
 {
 /* ---------------------------------------------------------------- */
 // We always start by handling table name since this must be the first
@@ -4732,11 +4727,11 @@ void Dbdict::handleTabInfoInit(Signal *
 
   SimpleProperties::UnpackStatus status;
   c_tableDesc.init();
-  status = SimpleProperties::unpack(it, &c_tableDesc, 
-				    DictTabInfo::TableMapping, 
-				    DictTabInfo::TableMappingSize, 
+  status = SimpleProperties::unpack(it, &c_tableDesc,
+				    DictTabInfo::TableMapping,
+				    DictTabInfo::TableMappingSize,
 				    true, true);
-  
+
   if(status != SimpleProperties::Break){
     parseP->errorCode = CreateTableRef::InvalidFormat;
     parseP->status    = status;
@@ -4746,24 +4741,24 @@ void Dbdict::handleTabInfoInit(Signal *
   }
 
   if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
-  {  
+  {
     ndbrequire(!checkExist);
   }
   if(!checkExist)
   {
     ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
   }
-  
+
   /* ---------------------------------------------------------------- */
   // Verify that table name is an allowed table name.
   // TODO
   /* ---------------------------------------------------------------- */
   const Uint32 tableNameLength = Uint32(strlen(c_tableDesc.TableName) + 1);
-  const Uint32 name_hash = Rope::hash(c_tableDesc.TableName, tableNameLength);
+  const Uint32 name_hash = LocalRope::hash(c_tableDesc.TableName, tableNameLength);
 
   if(checkExist){
     jam();
-    tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0, 
+    tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0,
 	       CreateTableRef::TableAlreadyExist);
   }
 
@@ -4772,7 +4767,7 @@ void Dbdict::handleTabInfoInit(Signal *
     jam();
     parseP->requestType = DictTabInfo::AddTableFromDict;
   }
-  
+
   TableRecordPtr tablePtr;
   switch (parseP->requestType) {
   case DictTabInfo::CreateTableFromAPI: {
@@ -4780,12 +4775,12 @@ void Dbdict::handleTabInfoInit(Signal *
   }
   case DictTabInfo::AlterTableFromAPI:{
     jam();
-    tablePtr.i = getFreeTableRecord(c_tableDesc.PrimaryTableId);
+    tablePtr.i = getFreeTableRecord();
     /* ---------------------------------------------------------------- */
     // Check if no free tables existed.
     /* ---------------------------------------------------------------- */
     tabRequire(tablePtr.i != RNIL, CreateTableRef::NoMoreTableRecords);
-    
+
     c_tableRecordPool.getPtr(tablePtr);
     break;
   }
@@ -4797,16 +4792,16 @@ void Dbdict::handleTabInfoInit(Signal *
 // Get table id and check that table doesn't already exist
 /* ---------------------------------------------------------------- */
     tablePtr.i = c_tableDesc.TableId;
-    
+
     if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
       ndbrequire(tablePtr.i == c_restartRecord.activeTable);
     }//if
     if (parseP->requestType == DictTabInfo::GetTabInfoConf) {
       ndbrequire(tablePtr.i == c_restartRecord.activeTable);
     }//if
-    
+
     c_tableRecordPool.getPtr(tablePtr);
-    
+
     //Uint32 oldTableVersion = tablePtr.p->tableVersion;
     initialiseTableRecord(tablePtr);
 
@@ -4815,21 +4810,21 @@ void Dbdict::handleTabInfoInit(Signal *
 /* ---------------------------------------------------------------- */
     Uint32 tableVersion = c_tableDesc.TableVersion;
     tablePtr.p->tableVersion = tableVersion;
-    
+
     break;
   }
   default:
     ndbrequire(false);
     break;
   }//switch
-  
-  { 
-    Rope name(c_rope_pool, tablePtr.p->tableName);
+
+  {
+    LocalRope name(c_rope_pool, tablePtr.p->tableName);
     tabRequire(name.assign(c_tableDesc.TableName, tableNameLength, name_hash),
 	       CreateTableRef::OutOfStringBuffer);
   }
 
-  Ptr<DictObject> obj_ptr;
+  DictObjectPtr obj_ptr;
   if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
     jam();
     ndbrequire(c_obj_hash.seize(obj_ptr));
@@ -4843,8 +4838,8 @@ void Dbdict::handleTabInfoInit(Signal *
 
     if (g_trace)
     {
-      g_eventLogger->info("Dbdict: create name=%s,id=%u,obj_ptr_i=%d", 
-                          c_tableDesc.TableName, 
+      g_eventLogger->info("Dbdict: create name=%s,id=%u,obj_ptr_i=%d",
+                          c_tableDesc.TableName,
                           tablePtr.i, tablePtr.p->m_obj_ptr_i);
     }
     send_event(signal, trans_ptr,
@@ -4854,20 +4849,20 @@ void Dbdict::handleTabInfoInit(Signal *
                c_tableDesc.TableType);
   }
   parseP->tablePtr = tablePtr;
-  
+
   // Disallow logging of a temporary table.
   tabRequire(!(c_tableDesc.TableTemporaryFlag && c_tableDesc.TableLoggedFlag),
              CreateTableRef::NoLoggingTemporaryTable);
 
   tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes;
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0);
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0);
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0);
 #if DOES_NOT_WORK_CURRENTLY
-  tablePtr.p->m_bits |= 
+  tablePtr.p->m_bits |=
     (c_tableDesc.TableTemporaryFlag ? TableRecord::TR_Temporary : 0);
 #endif
   tablePtr.p->m_bits |=
@@ -4878,13 +4873,13 @@ void Dbdict::handleTabInfoInit(Signal *
   tablePtr.p->tableType = (DictTabInfo::TableType)c_tableDesc.TableType;
   tablePtr.p->kValue = c_tableDesc.TableKValue;
   tablePtr.p->fragmentCount = c_tableDesc.FragmentCount;
-  tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId; 
-  tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow; 
-  tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh; 
+  tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId;
+  tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow;
+  tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh;
   tablePtr.p->minRowsLow = c_tableDesc.MinRowsLow;
   tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh;
-  tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; 
-  tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; 
+  tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag;
+  tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag;
   tablePtr.p->singleUserMode = c_tableDesc.SingleUserMode;
   tablePtr.p->hashMapObjectId = c_tableDesc.HashMapObjectId;
   tablePtr.p->hashMapVersion = c_tableDesc.HashMapVersion;
@@ -4913,7 +4908,7 @@ void Dbdict::handleTabInfoInit(Signal *
     if (dictObj && dictObj->m_type == DictTabInfo::HashMap)
     {
       jam();
-      HashMapPtr hm_ptr;
+      HashMapRecordPtr hm_ptr;
       ndbrequire(c_hash_map_hash.find(hm_ptr, dictObj->m_id));
       tablePtr.p->hashMapObjectId = hm_ptr.p->m_object_id;
       tablePtr.p->hashMapVersion = hm_ptr.p->m_object_version;
@@ -4923,7 +4918,7 @@ void Dbdict::handleTabInfoInit(Signal *
   if (tablePtr.p->fragmentType == DictTabInfo::HashMapPartition)
   {
     jam();
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     tabRequire(c_hash_map_hash.find(hm_ptr, tablePtr.p->hashMapObjectId),
                CreateTableRef::InvalidHashMap);
 
@@ -4944,21 +4939,21 @@ void Dbdict::handleTabInfoInit(Signal *
                  CreateTableRef::InvalidHashMap);
     }
   }
-  
+
   {
-    Rope frm(c_rope_pool, tablePtr.p->frmData);
+    LocalRope frm(c_rope_pool, tablePtr.p->frmData);
     tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen),
 	       CreateTableRef::OutOfStringBuffer);
-    Rope range(c_rope_pool, tablePtr.p->rangeData);
+    LocalRope range(c_rope_pool, tablePtr.p->rangeData);
     tabRequire(range.assign((const char*)c_tableDesc.RangeListData,
                c_tableDesc.RangeListDataLen),
 	      CreateTableRef::OutOfStringBuffer);
-    Rope fd(c_rope_pool, tablePtr.p->ngData);
+    LocalRope fd(c_rope_pool, tablePtr.p->ngData);
     tabRequire(fd.assign((const char*)c_tableDesc.FragmentData,
                          c_tableDesc.FragmentDataLen),
 	       CreateTableRef::OutOfStringBuffer);
   }
-  
+
   c_fragDataLen = c_tableDesc.FragmentDataLen;
   memcpy(c_fragData, c_tableDesc.FragmentData,
          c_tableDesc.FragmentDataLen);
@@ -5023,9 +5018,9 @@ void Dbdict::handleTabInfoInit(Signal *
     tablePtr.p->triggerId = RNIL;
   }
   tablePtr.p->buildTriggerId = RNIL;
-  
+
   handleTabInfo(it, parseP, c_tableDesc);
-  
+
   if(parseP->errorCode != 0)
   {
     /**
@@ -5048,7 +5043,7 @@ void Dbdict::handleTabInfoInit(Signal *
 }//handleTabInfoInit()
 
 void
-Dbdict::upgrade_seizeTrigger(Ptr<TableRecord> tabPtr,
+Dbdict::upgrade_seizeTrigger(TableRecordPtr tabPtr,
                              Uint32 insertTriggerId,
                              Uint32 updateTriggerId,
                              Uint32 deleteTriggerId)
@@ -5081,11 +5076,11 @@ Dbdict::upgrade_seizeTrigger(Ptr<TableRe
       BaseString::snprintf(buf, sizeof(buf),
                            "UPG_UPD_NDB$INDEX_%u_UI", tabPtr.i);
       {
-        Rope name(c_rope_pool, triggerPtr.p->triggerName);
+        LocalRope name(c_rope_pool, triggerPtr.p->triggerName);
         name.assign(buf);
       }
 
-      Ptr<DictObject> obj_ptr;
+      DictObjectPtr obj_ptr;
       bool ok = c_obj_hash.seize(obj_ptr);
       ndbrequire(ok);
       new (obj_ptr.p) DictObject();
@@ -5119,11 +5114,11 @@ Dbdict::upgrade_seizeTrigger(Ptr<TableRe
                            "UPG_DEL_NDB$INDEX_%u_UI", tabPtr.i);
 
       {
-        Rope name(c_rope_pool, triggerPtr.p->triggerName);
+        LocalRope name(c_rope_pool, triggerPtr.p->triggerName);
         name.assign(buf);
       }
 
-      Ptr<DictObject> obj_ptr;
+      DictObjectPtr obj_ptr;
       bool ok = c_obj_hash.seize(obj_ptr);
       ndbrequire(ok);
       new (obj_ptr.p) DictObject();
@@ -5144,9 +5139,9 @@ void Dbdict::handleTabInfo(SimplePropert
 			   DictTabInfo::Table &tableDesc)
 {
   TableRecordPtr tablePtr = parseP->tablePtr;
-  
+
   SimpleProperties::UnpackStatus status;
-  
+
   Uint32 keyCount = 0;
   Uint32 keyLength = 0;
   Uint32 attrCount = tablePtr.p->noOfAttributes;
@@ -5158,21 +5153,21 @@ void Dbdict::handleTabInfo(SimplePropert
   AttributeRecordPtr attrPtr;
   c_attributeRecordHash.removeAll();
 
-  LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool, 
+  LocalAttributeRecord_list list(c_attributeRecordPool,
 					tablePtr.p->m_attributes);
 
   Uint32 counts[] = {0,0,0,0,0};
-  
+
   for(Uint32 i = 0; i<attrCount; i++){
     /**
      * Attribute Name
      */
     DictTabInfo::Attribute attrDesc; attrDesc.init();
-    status = SimpleProperties::unpack(it, &attrDesc, 
-				      DictTabInfo::AttributeMapping, 
-				      DictTabInfo::AttributeMappingSize, 
+    status = SimpleProperties::unpack(it, &attrDesc,
+				      DictTabInfo::AttributeMapping,
+				      DictTabInfo::AttributeMappingSize,
 				      true, true);
-    
+
     if(status != SimpleProperties::Break){
       parseP->errorCode = CreateTableRef::InvalidFormat;
       parseP->status    = status;
@@ -5185,38 +5180,38 @@ void Dbdict::handleTabInfo(SimplePropert
      * Check that attribute is not defined twice
      */
     const Uint32 len = Uint32(strlen(attrDesc.AttributeName)+1);
-    const Uint32 name_hash = Rope::hash(attrDesc.AttributeName, len);
+    const Uint32 name_hash = LocalRope::hash(attrDesc.AttributeName, len);
     {
       AttributeRecord key;
       key.m_key.m_name_ptr = attrDesc.AttributeName;
       key.m_key.m_name_len = len;
       key.attributeName.m_hash = name_hash;
       key.m_key.m_pool = &c_rope_pool;
-      Ptr<AttributeRecord> old_ptr;
+      AttributeRecordPtr old_ptr;
       c_attributeRecordHash.find(old_ptr, key);
-      
+
       if(old_ptr.i != RNIL){
 	parseP->errorCode = CreateTableRef::AttributeNameTwice;
 	return;
       }
     }
-    
+
     list.seize(attrPtr);
     if(attrPtr.i == RNIL){
       jam();
       parseP->errorCode = CreateTableRef::NoMoreAttributeRecords;
       return;
     }
-    
+
     new (attrPtr.p) AttributeRecord();
     attrPtr.p->attributeDescriptor = 0x00012255; //Default value
     attrPtr.p->tupleKey = 0;
-    
+
     /**
      * TmpAttrib to Attribute mapping
      */
     {
-      Rope name(c_rope_pool, attrPtr.p->attributeName);
+      LocalRope name(c_rope_pool, attrPtr.p->attributeName);
       if (!name.assign(attrDesc.AttributeName, len, name_hash))
       {
 	jam();
@@ -5228,7 +5223,7 @@ void Dbdict::handleTabInfo(SimplePropert
     attrPtr.p->attributeId = i;
     //attrPtr.p->attributeId = attrDesc.AttributeId;
     attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag;
-    
+
     attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
     attrPtr.p->extScale = attrDesc.AttributeExtScale;
     attrPtr.p->extLength = attrDesc.AttributeExtLength;
@@ -5275,14 +5270,6 @@ void Dbdict::handleTabInfo(SimplePropert
       parseP->errorLine = __LINE__;
       return;
     }
-    
-    // XXX old test option, remove
-    if(!attrDesc.AttributeKeyFlag && 
-       tablePtr.i > 1 &&
-       !tablePtr.p->isIndex())
-    {
-      //attrDesc.AttributeStorageType= NDB_STORAGETYPE_DISK;
-    }
 
     Uint32 desc = 0;
     AttributeDescriptor::setType(desc, attrDesc.AttributeExtType);
@@ -5298,14 +5285,14 @@ void Dbdict::handleTabInfo(SimplePropert
     attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement;
     {
       char defaultValueBuf [MAX_ATTR_DEFAULT_VALUE_SIZE];
-      
+
       if (attrDesc.AttributeDefaultValueLen)
       {
         ndbrequire(attrDesc.AttributeDefaultValueLen >= sizeof(Uint32));
 
         memcpy(defaultValueBuf, attrDesc.AttributeDefaultValue,
                attrDesc.AttributeDefaultValueLen);
-        
+
         /* Table meta-info is normally stored in network byte order by
          * SimpleProperties.
          * For the default value, we convert as necessary here
@@ -5315,9 +5302,9 @@ void Dbdict::handleTabInfo(SimplePropert
         memcpy(&a, defaultValueBuf, sizeof(Uint32));
         a = ntohl(a);
         memcpy(defaultValueBuf, &a, sizeof(Uint32));
-        
+
         Uint32 remainBytes = attrDesc.AttributeDefaultValueLen - sizeof(Uint32);
-        
+
         if (remainBytes)
         {
           /* Convert attribute */
@@ -5330,26 +5317,26 @@ void Dbdict::handleTabInfo(SimplePropert
         }
       }
 
-      Rope defaultValue(c_rope_pool, attrPtr.p->defaultValue);
+      LocalRope defaultValue(c_rope_pool, attrPtr.p->defaultValue);
       defaultValue.assign(defaultValueBuf,
                           attrDesc.AttributeDefaultValueLen);
     }
-    
+
     keyCount += attrDesc.AttributeKeyFlag;
     nullCount += attrDesc.AttributeNullableFlag;
-    
+
     const Uint32 aSz = (1 << attrDesc.AttributeSize);
     Uint32 sz;
     if(aSz != 1)
     {
       sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
-    }    
+    }
     else
     {
       sz = 0;
-      nullBits += attrDesc.AttributeArraySize;      
+      nullBits += attrDesc.AttributeArraySize;
     }
-    
+
     if(attrDesc.AttributeArraySize == 0)
     {
       parseP->errorCode = CreateTableRef::InvalidArraySize;
@@ -5358,7 +5345,7 @@ void Dbdict::handleTabInfo(SimplePropert
       parseP->errorLine = __LINE__;
       return;
     }
-    
+
     recordLength += sz;
     if(attrDesc.AttributeKeyFlag){
       keyLength += sz;
@@ -5371,7 +5358,7 @@ void Dbdict::handleTabInfo(SimplePropert
 	return;
       }
     }
-    
+
     c_attributeRecordHash.add(attrPtr);
 
     int a= AttributeDescriptor::getDiskBased(desc);
@@ -5387,25 +5374,25 @@ void Dbdict::handleTabInfo(SimplePropert
       parseP->errorLine = __LINE__;
       return;
     }
-    
+
     if(!it.next())
       break;
-    
+
     if(it.getKey() != DictTabInfo::AttributeName)
       break;
   }//while
-  
+
   tablePtr.p->noOfPrimkey = keyCount;
   tablePtr.p->noOfNullAttr = nullCount;
   tablePtr.p->noOfCharsets = noOfCharsets;
   tablePtr.p->tupKeyLength = keyLength;
   tablePtr.p->noOfNullBits = nullCount + nullBits;
 
-  tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS, 
+  tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
 	     CreateTableRef::RecordTooBig);
-  tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS, 
+  tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS,
 	     CreateTableRef::InvalidPrimaryKeySize);
-  tabRequire(keyLength > 0, 
+  tabRequire(keyLength > 0,
 	     CreateTableRef::InvalidPrimaryKeySize);
   tabRequire(CHECK_SUMA_MESSAGE_SIZE(keyCount, keyLength, attrCount, recordLength),
              CreateTableRef::RecordTooBig);
@@ -5438,12 +5425,12 @@ void Dbdict::handleTabInfo(SimplePropert
     {
       tabRequire(false, CreateTableRef::InvalidTablespace);
     }
-    
+
     if(tablespacePtr.p->m_type != DictTabInfo::Tablespace)
     {
       tabRequire(false, CreateTableRef::NotATablespace);
     }
-    
+
     if(tablespacePtr.p->m_version != tableDesc.TablespaceVersion)
     {
       tabRequire(false, CreateTableRef::InvalidTablespaceVersion);
@@ -5504,7 +5491,7 @@ Dbdict::wait_gcp(Signal* signal, SchemaO
              WaitGCPReq::SignalLength, JBB);
 }
 
-void Dbdict::execWAIT_GCP_CONF(Signal* signal) 
+void Dbdict::execWAIT_GCP_CONF(Signal* signal)
 {
   WaitGCPConf* conf = (WaitGCPConf*)signal->getDataPtr();
   handleDictConf(signal, conf);
@@ -5513,7 +5500,7 @@ void Dbdict::execWAIT_GCP_CONF(Signal* s
 /* ---------------------------------------------------------------- */
 // Refused new global checkpoint.
 /* ---------------------------------------------------------------- */
-void Dbdict::execWAIT_GCP_REF(Signal* signal) 
+void Dbdict::execWAIT_GCP_REF(Signal* signal)
 {
   jamEntry();
   WaitGCPRef* ref = (WaitGCPRef*)signal->getDataPtr();
@@ -5669,7 +5656,7 @@ Dbdict::create_fragmentation(Signal* sig
   if (tabPtr.p->hashMapObjectId != RNIL)
   {
     jam();
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tabPtr.p->hashMapObjectId));
     frag_req->map_ptr_i = hm_ptr.p->m_map_ptr_i;
   }
@@ -5766,8 +5753,8 @@ Dbdict::createTable_parse(Signal* signal
 
     if (parseRecord.errorCode == 0)
     {
-      if (ERROR_INSERTED(6200) || 
-          (ERROR_INSERTED(6201) && 
+      if (ERROR_INSERTED(6200) ||
+          (ERROR_INSERTED(6201) &&
            DictTabInfo::isIndex(parseRecord.tablePtr.p->tableType)))
       {
         jam();
@@ -5776,7 +5763,7 @@ Dbdict::createTable_parse(Signal* signal
       }
     }
 
-    if (parseRecord.errorCode != 0) 
+    if (parseRecord.errorCode != 0)
     {
       jam();
       if (!parseRecord.tablePtr.isNull())
@@ -5809,8 +5796,8 @@ Dbdict::createTable_parse(Signal* signal
     impl_req->tableId = tabPtr.i;
     impl_req->tableVersion = tabPtr.p->tableVersion;
 
-    if (ERROR_INSERTED(6202) || 
-        (ERROR_INSERTED(6203) && 
+    if (ERROR_INSERTED(6202) ||
+        (ERROR_INSERTED(6203) &&
          DictTabInfo::isIndex(parseRecord.tablePtr.p->tableType)))
     {
       jam();
@@ -6162,8 +6149,8 @@ Dbdict::createTab_local(Signal* signal,
     }
 
     Uint32 key = 0;
-    Ptr<AttributeRecord> attrPtr;
-    LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool,
+    AttributeRecordPtr attrPtr;
+    LocalAttributeRecord_list list(c_attributeRecordPool,
                                           tabPtr.p->m_attributes);
     for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr))
     {
@@ -6301,7 +6288,7 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal
       {
         jam();
         AttributeDescriptor::clearArrayType(entry.attrDescriptor);
-        AttributeDescriptor::setArrayType(entry.attrDescriptor, 
+        AttributeDescriptor::setArrayType(entry.attrDescriptor,
                                           NDB_ARRAYTYPE_NONE_VAR);
       }
     }
@@ -6400,7 +6387,7 @@ Dbdict::createTab_dih(Signal* signal, Sc
 
   if (tabPtr.p->hashMapObjectId != RNIL)
   {
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, tabPtr.p->hashMapObjectId));
     req->hashMapPtrI = hm_ptr.p->m_map_ptr_i;
   }
@@ -6580,7 +6567,7 @@ Dbdict::execLQHFRAGCONF(Signal * signal)
   {
     jam();
     SchemaOpPtr op_ptr;
-    Ptr<TableRecord> tabPtr;
+    TableRecordPtr tabPtr;
     c_tableRecordPool.getPtr(tabPtr, tableId);
     if (DictTabInfo::isTable(tabPtr.p->tableType))
     {
@@ -6633,7 +6620,7 @@ Dbdict::execLQHFRAGREF(Signal * signal)
   {
     jam();
     SchemaOpPtr op_ptr;
-    Ptr<TableRecord> tabPtr;
+    TableRecordPtr tabPtr;
     c_tableRecordPool.getPtr(tabPtr, tableId);
     if (DictTabInfo::isTable(tabPtr.p->tableType))
     {
@@ -6832,10 +6819,10 @@ Dbdict::createTable_commit(Signal* signa
 
   if (DictTabInfo::isIndex(tabPtr.p->tableType))
   {
-    Ptr<TableRecord> basePtr;
+    TableRecordPtr basePtr;
     c_tableRecordPool.getPtr(basePtr, tabPtr.p->primaryTableId);
 
-    LocalDLFifoList<TableRecord> list(c_tableRecordPool, basePtr.p->m_indexes);
+    LocalTableRecord_list list(c_tableRecordPool, basePtr.p->m_indexes);
     list.add(tabPtr);
   }
 }
@@ -7075,7 +7062,7 @@ void Dbdict::execCREATE_TABLE_REF(Signal
   handleDictRef(signal, ref);
 }
 
-void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) 
+void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
 {
   TableRecordPtr tablePtr;
   c_tableRecordPool.getPtr(tablePtr, tableId);
@@ -7087,31 +7074,31 @@ void Dbdict::releaseTableObject(Uint32 t
   }
   else
   {
-    Rope tmp(c_rope_pool, tablePtr.p->tableName);
+    LocalRope tmp(c_rope_pool, tablePtr.p->tableName);
     tmp.erase();
   }
-  
+
   {
-    Rope tmp(c_rope_pool, tablePtr.p->frmData);
+    LocalRope tmp(c_rope_pool, tablePtr.p->frmData);
     tmp.erase();
   }
 
   {
-    Rope tmp(c_rope_pool, tablePtr.p->ngData);
+    LocalRope tmp(c_rope_pool, tablePtr.p->ngData);
     tmp.erase();
   }
 
   {
-    Rope tmp(c_rope_pool, tablePtr.p->rangeData);
+    LocalRope tmp(c_rope_pool, tablePtr.p->rangeData);
     tmp.erase();
   }
 
-  LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool, 
+  LocalAttributeRecord_list list(c_attributeRecordPool,
 					tablePtr.p->m_attributes);
   AttributeRecordPtr attrPtr;
   for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)){
-    Rope name(c_rope_pool, attrPtr.p->attributeName);
-    Rope def(c_rope_pool, attrPtr.p->defaultValue);
+    LocalRope name(c_rope_pool, attrPtr.p->attributeName);
+    LocalRope def(c_rope_pool, attrPtr.p->defaultValue);
     name.erase();
     def.erase();
   }
@@ -7443,7 +7430,7 @@ Dbdict::dropTable_commit(Signal* signal,
   // from a newer execDROP_TAB_REQ version
   {
     char buf[1024];
-    Rope name(c_rope_pool, tablePtr.p->tableName);
+    LocalRope name(c_rope_pool, tablePtr.p->tableName);
     name.copy(buf);
     g_eventLogger->info("Dbdict: drop name=%s,id=%u,obj_id=%u", buf, tablePtr.i,
                         tablePtr.p->m_obj_ptr_i);
@@ -7457,10 +7444,10 @@ Dbdict::dropTable_commit(Signal* signal,
 
   if (DictTabInfo::isIndex(tablePtr.p->tableType))
   {
-    Ptr<TableRecord> basePtr;
+    TableRecordPtr basePtr;
     c_tableRecordPool.getPtr(basePtr, tablePtr.p->primaryTableId);
 
-    LocalDLFifoList<TableRecord> list(c_tableRecordPool, basePtr.p->m_indexes);
+    LocalTableRecord_list list(c_tableRecordPool, basePtr.p->m_indexes);
     list.remove(tablePtr);
   }
   dropTabPtr.p->m_block = 0;
@@ -7509,7 +7496,7 @@ Dbdict::dropTable_commit_nextStep(Signal
                PrepDropTabRef::SignalLength, JBB);
     return;
   }
- 
+
 
   PrepDropTabReq* prep = (PrepDropTabReq*)signal->getDataPtrSend();
   prep->senderRef = reference();
@@ -7815,11 +7802,11 @@ Dbdict::alterTable_release(SchemaOpPtr o
   AlterTableRecPtr alterTabPtr;
   getOpRec(op_ptr, alterTabPtr);
   {
-    Rope r(c_rope_pool, alterTabPtr.p->m_oldTableName);
+    LocalRope r(c_rope_pool, alterTabPtr.p->m_oldTableName);
     r.erase();
   }
   {
-    Rope r(c_rope_pool, alterTabPtr.p->m_oldFrmData);
+    LocalRope r(c_rope_pool, alterTabPtr.p->m_oldFrmData);
     r.erase();
   }
   LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena, c_opSectionBufferPool);
@@ -8132,7 +8119,7 @@ Dbdict::alterTable_parse(Signal* signal,
       return;
     }
 
-    LocalDLFifoList<AttributeRecord>
+    LocalAttributeRecord_list
       list(c_attributeRecordPool, newTablePtr.p->m_attributes);
     AttributeRecordPtr attrPtr;
     list.first(attrPtr);
@@ -8341,10 +8328,10 @@ Dbdict::check_supported_reorg(Uint32 org
     return 0;
   }
 
-  HashMapPtr orgmap_ptr;
+  HashMapRecordPtr orgmap_ptr;
   ndbrequire(c_hash_map_hash.find(orgmap_ptr, org_map_id));
 
-  HashMapPtr newmap_ptr;
+  HashMapRecordPtr newmap_ptr;
   ndbrequire(c_hash_map_hash.find(newmap_ptr, new_map_id));
 
   Ptr<Hash2FragmentMap> orgptr;
@@ -8443,7 +8430,7 @@ Dbdict::alterTable_subOps(Signal* signal
       TableRecordPtr tabPtr;
       TableRecordPtr indexPtr;
       c_tableRecordPool.getPtr(tabPtr, impl_req->tableId);
-      LocalDLFifoList<TableRecord> list(c_tableRecordPool, tabPtr.p->m_indexes);
+      LocalTableRecord_list list(c_tableRecordPool, tabPtr.p->m_indexes);
       Uint32 ptrI = alterTabPtr.p->m_sub_add_frag_index_ptr;
 
       if (ptrI == RNIL)
@@ -8905,7 +8892,7 @@ Dbdict::alterTable_prepare(Signal* signa
      * Get DIH connectPtr for future commit
      */
     {
-      Ptr<SchemaOp> tmp = op_ptr;
+      SchemaOpPtr tmp = op_ptr;
       LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       for (list.prev(tmp); !tmp.isNull(); list.prev(tmp))
       {
@@ -9084,7 +9071,7 @@ Dbdict::alterTable_toLocal(Signal* signa
     if (AlterTableReq::getReorgFragFlag(req->changeMask))
     {
       jam();
-      HashMapPtr hm_ptr;
+      HashMapRecordPtr hm_ptr;
       ndbrequire(c_hash_map_hash.find(hm_ptr,
                                       alterTabPtr.p->m_newTablePtr.p->hashMapObjectId));
       req->new_map_ptr_i = hm_ptr.p->m_map_ptr_i;
@@ -9186,7 +9173,7 @@ Dbdict::alterTable_commit(Signal* signal
         << " old=" << copyRope<sz>(tablePtr.p->tableName)
         << " new=" << copyRope<sz>(newTablePtr.p->tableName));
 
-      Ptr<DictObject> obj_ptr;
+      DictObjectPtr obj_ptr;
       c_obj_pool.getPtr(obj_ptr, tablePtr.p->m_obj_ptr_i);
 
       // remove old name from hash
@@ -9219,9 +9206,9 @@ Dbdict::alterTable_commit(Signal* signal
       jam();
 
       /* Move the column definitions to the real table definitions. */
-      LocalDLFifoList<AttributeRecord>
+      LocalAttributeRecord_list
         list(c_attributeRecordPool, tablePtr.p->m_attributes);
-      LocalDLFifoList<AttributeRecord>
+      LocalAttributeRecord_list
         newlist(c_attributeRecordPool, newTablePtr.p->m_attributes);
 
       const Uint32 noOfNewAttr = impl_req->noOfNewAttr;
@@ -9438,7 +9425,7 @@ Dbdict::alterTable_fromCommitComplete(Si
     sendSignal(SUMA_REF, GSN_ALTER_TAB_REQ, signal,
                AlterTabReq::SignalLength, JBB, &handle);
   }
-  
+
   // older way to notify  wl3600_todo disable to find SUMA problems
   {
     ApiBroadcastRep* api= (ApiBroadcastRep*)signal->getDataPtrSend();
@@ -9752,10 +9739,10 @@ Dbdict::execALTER_TABLE_REF(Signal* sign
 /* ---------------------------------------------------------------- */
 /* **************************************************************** */
 
-void Dbdict::execGET_TABLEDID_REQ(Signal * signal) 
+void Dbdict::execGET_TABLEDID_REQ(Signal * signal)
 {
   jamEntry();
-  ndbrequire(signal->getNoOfSections() == 1);  
+  ndbrequire(signal->getNoOfSections() == 1);
   GetTableIdReq const * req = (GetTableIdReq *)signal->getDataPtr();
   Uint32 senderData = req->senderData;
   Uint32 senderRef = req->senderRef;
@@ -9764,8 +9751,8 @@ void Dbdict::execGET_TABLEDID_REQ(Signal
   if(len>PATH_MAX)
   {
     jam();
-    sendGET_TABLEID_REF((Signal*)signal, 
-			(GetTableIdReq *)req, 
+    sendGET_TABLEID_REF((Signal*)signal,
+			(GetTableIdReq *)req,
 			GetTableIdRef::TableNameTooLong);
     return;
   }
@@ -9776,31 +9763,31 @@ void Dbdict::execGET_TABLEDID_REQ(Signal
   handle.getSection(ssPtr,GetTableIdReq::TABLE_NAME);
   copy((Uint32*)tableName, ssPtr);
   releaseSections(handle);
-    
+
   DictObject * obj_ptr_p = get_object(tableName, len);
   if(obj_ptr_p == 0 || !DictTabInfo::isTable(obj_ptr_p->m_type)){
     jam();
-    sendGET_TABLEID_REF(signal, 
-			(GetTableIdReq *)req, 
+    sendGET_TABLEID_REF(signal,
+			(GetTableIdReq *)req,
 			GetTableIdRef::TableNotDefined);
     return;
   }
 
   TableRecordPtr tablePtr;
-  c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id); 
-  
+  c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id);
+
   GetTableIdConf * conf = (GetTableIdConf *)req;
   conf->tableId = tablePtr.p->tableId;
   conf->schemaVersion = tablePtr.p->tableVersion;
   conf->senderData = senderData;
   sendSignal(senderRef, GSN_GET_TABLEID_CONF, signal,
-	     GetTableIdConf::SignalLength, JBB);  
+	     GetTableIdConf::SignalLength, JBB);
 }
 
 
-void Dbdict::sendGET_TABLEID_REF(Signal* signal, 
+void Dbdict::sendGET_TABLEID_REF(Signal* signal,
 				 GetTableIdReq * req,
-				 GetTableIdRef::ErrorCode errorCode) 
+				 GetTableIdRef::ErrorCode errorCode)
 {
   GetTableIdRef * const ref = (GetTableIdRef *)req;
   /**
@@ -9808,20 +9795,20 @@ void Dbdict::sendGET_TABLEID_REF(Signal*
    */
   BlockReference retRef = req->senderRef;
   ref->err = errorCode;
-  sendSignal(retRef, GSN_GET_TABLEID_REF, signal, 
+  sendSignal(retRef, GSN_GET_TABLEID_REF, signal,
 	     GetTableIdRef::SignalLength, JBB);
 }
 
 /* ---------------------------------------------------------------- */
 // Get a full table description.
 /* ---------------------------------------------------------------- */
-void Dbdict::execGET_TABINFOREQ(Signal* signal) 
+void Dbdict::execGET_TABINFOREQ(Signal* signal)
 {
   jamEntry();
-  if(!assembleFragments(signal)) 
-  { 
+  if(!assembleFragments(signal))
+  {
     return;
-  }  
+  }
 
   GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
   SectionHandle handle(this, signal);
@@ -9831,26 +9818,26 @@ void Dbdict::execGET_TABINFOREQ(Signal*
    * it's is a one from the time queue
    */
   bool fromTimeQueue = (signal->senderBlockRef() == reference());
-  
+
   if (c_retrieveRecord.busyState && fromTimeQueue == true) {
     jam();
-    
-    sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30, 
+
+    sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
 			signal->length(),
 			&handle);
     return;
   }//if
 
   const Uint32 MAX_WAITERS = 5;
-  
+
   if(c_retrieveRecord.busyState && fromTimeQueue == false)
   {
     jam();
     if(c_retrieveRecord.noOfWaiters < MAX_WAITERS){
       jam();
       c_retrieveRecord.noOfWaiters++;
-      
-      sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30, 
+
+      sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
 			  signal->length(),
 			  &handle);
       return;
@@ -9859,11 +9846,11 @@ void Dbdict::execGET_TABINFOREQ(Signal*
     sendGET_TABINFOREF(signal, req, GetTabInfoRef::Busy, __LINE__);
     return;
   }
-  
+
   if(fromTimeQueue){
     jam();
     c_retrieveRecord.noOfWaiters--;
-  } 
+  }
 
   const bool useLongSig = (req->requestType & GetTabInfoReq::LongSignalConf);
   const bool byName = (req->requestType & GetTabInfoReq::RequestByName);
@@ -9874,7 +9861,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
     jam();
     ndbrequire(handle.m_cnt == 1);
     const Uint32 len = req->tableNameLen;
-    
+
     if(len > PATH_MAX){
       jam();
       releaseSections(handle);
@@ -9886,7 +9873,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
     SegmentedSectionPtr ssPtr;
     handle.getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
     copy(tableName, ssPtr);
-    
+
     DictObject * old_ptr_p = get_object((char*)tableName, len);
     if(old_ptr_p)
       obj_id = old_ptr_p->m_id;
@@ -9900,7 +9887,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
   if(obj_id != RNIL)
   {
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
-    objEntry = getTableEntry(xsf, obj_id);      
+    objEntry = getTableEntry(xsf, obj_id);
   }
 
   // The table seached for was not found
@@ -10012,7 +9999,7 @@ void Dbdict::execGET_TABINFOREQ(Signal*
   sendSignal(reference(), GSN_CONTINUEB, signal, len, JBB);
 }//execGET_TABINFOREQ()
 
-void Dbdict::sendGetTabResponse(Signal* signal) 
+void Dbdict::sendGetTabResponse(Signal* signal)
 {
   PageRecordPtr pagePtr;
   DictTabInfo * const conf = (DictTabInfo *)&signal->theData[0];
@@ -10023,7 +10010,7 @@ void Dbdict::sendGetTabResponse(Signal*
 
   c_pageRecordArray.getPtr(pagePtr, c_retrieveRecord.retrievePage);
   Uint32* pagePointer = (Uint32*)&pagePtr.p->word[0] + ZPAGE_HEADER_SIZE;
-  
+
   if(c_retrieveRecord.m_useLongSig){
     jam();
     GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
@@ -10039,7 +10026,7 @@ void Dbdict::sendGetTabResponse(Signal*
     ptr[0].sz = c_retrieveRecord.retrievedNoOfWords;
     sendFragmentedSignal(c_retrieveRecord.blockRef,
 			 GSN_GET_TABINFO_CONF,
-			 signal, 
+			 signal,
 			 GetTabInfoConf::SignalLength,
 			 JBB,
 			 ptr,
@@ -10051,7 +10038,7 @@ void Dbdict::sendGetTabResponse(Signal*
   ndbrequire(false);
 }//sendGetTabResponse()
 
-void Dbdict::sendGET_TABINFOREF(Signal* signal, 
+void Dbdict::sendGET_TABINFOREF(Signal* signal,
 				GetTabInfoReq * req,
 				GetTabInfoRef::ErrorCode errorCode,
                                 Uint32 errorLine)
@@ -10067,7 +10054,7 @@ void Dbdict::sendGET_TABINFOREF(Signal*
   ref->schemaTransId = req_copy.schemaTransId;
   ref->errorCode = (Uint32)errorCode;
   ref->errorLine = errorLine;
-  
+
   BlockReference retRef = req_copy.senderRef;
   sendSignal(retRef, GSN_GET_TABINFOREF, signal,
              GetTabInfoRef::SignalLength, JBB);
@@ -10104,7 +10091,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
   conf->counter = 0;
   Uint32 pos = 0;
 
-  DLHashTable<DictObject>::Iterator iter;
+  DictObject_hash::Iterator iter;
   bool ok = c_obj_hash.first(iter);
   for(; ok; ok = c_obj_hash.next(iter)){
     Uint32 type = iter.curr.p->m_type;
@@ -10120,7 +10107,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
 
       if(reqListIndexes && (reqTableId != tablePtr.p->primaryTableId))
 	continue;
-      
+
       conf->tableData[pos] = 0;
       conf->setTableId(pos, tablePtr.i); // id
       conf->setTableType(pos, type); // type
@@ -10132,7 +10119,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
         switch(te->m_tableState){
         case SchemaFile::SF_CREATE:
           jam();
-          conf->setTableState(pos, DictTabInfo::StateBuilding);          
+          conf->setTableState(pos, DictTabInfo::StateBuilding);
           break;
         case SchemaFile::SF_ALTER:
           jam();
@@ -10234,7 +10221,7 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
       conf->setTableState(pos, DictTabInfo::StateOnline); // XXX todo
       pos++;
     }
-    
+
     if (pos >= OldListTablesConf::DataLength) {
       sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
 		 OldListTablesConf::SignalLength, JBB);
@@ -10244,8 +10231,8 @@ void Dbdict::sendOLD_LIST_TABLES_CONF(Si
 
     if (! reqListNames)
       continue;
-    
-    Rope name(c_rope_pool, iter.curr.p->m_name);
+
+    LocalRope name(c_rope_pool, iter.curr.p->m_name);
     const Uint32 size = name.size();
     conf->tableData[pos] = size;
     pos++;
@@ -10292,7 +10279,7 @@ void Dbdict::sendLIST_TABLES_CONF(Signal
   XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
   NodeReceiverGroup rg(senderRef);
 
-  DLHashTable<DictObject>::Iterator iter;
+  DictObject_hash::Iterator iter;
   bool done = !c_obj_hash.first(iter);
 
   if (done)
@@ -10465,7 +10452,7 @@ void Dbdict::sendLIST_TABLES_CONF(Signal
     if (reqListNames)
     {
       jam();
-      Rope name(c_rope_pool, iter.curr.p->m_name);
+      LocalRope name(c_rope_pool, iter.curr.p->m_name);
       const Uint32 size = name.size(); // String length including \0
       const Uint32 wsize = (size + 3) / 4;
       tableNamesWriter.putWord(size);
@@ -10844,7 +10831,7 @@ Dbdict::createIndex_parse(Signal* signal
 
       // find the attribute
       {
-        LocalDLFifoList<AttributeRecord>
+        LocalAttributeRecord_list
           list(c_attributeRecordPool, tablePtr.p->m_attributes);
         list.first(attrPtr);
         while (!attrPtr.isNull()) {
@@ -11002,7 +10989,7 @@ Dbdict::createIndex_toCreateTable(Signal
   }
   w.add(DictTabInfo::FragmentTypeVal, createIndexPtr.p->m_fragmentType);
   w.add(DictTabInfo::TableTypeVal, createIndexPtr.p->m_request.indexType);
-  { Rope name(c_rope_pool, tablePtr.p->tableName);
+  { LocalRope name(c_rope_pool, tablePtr.p->tableName);
     char tableName[MAX_TAB_NAME_SIZE];
     name.copy(tableName);
     w.add(DictTabInfo::PrimaryTable, tableName);
@@ -11027,7 +11014,7 @@ Dbdict::createIndex_toCreateTable(Signal
     AttributeRecordPtr attrPtr;
     c_attributeRecordPool.getPtr(attrPtr, attrMap[k].attr_ptr_i);
 
-    { Rope attrName(c_rope_pool, attrPtr.p->attributeName);
+    { LocalRope attrName(c_rope_pool, attrPtr.p->attributeName);
       char attributeName[MAX_ATTR_NAME_SIZE];
       attrName.copy(attributeName);
       w.add(DictTabInfo::AttributeName, attributeName);
@@ -11061,7 +11048,7 @@ Dbdict::createIndex_toCreateTable(Signal
     jam();
     Uint32 key_type = NDB_ARRAYTYPE_FIXED;
     AttributeRecordPtr attrPtr;
-    LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool,
+    LocalAttributeRecord_list list(c_attributeRecordPool,
                                           tablePtr.p->m_attributes);
     // XXX move to parse
     for (list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr))
@@ -11075,7 +11062,7 @@ Dbdict::createIndex_toCreateTable(Signal
         {
           jam();
           /**
-           * We can only set this new array type "globally" if 
+           * We can only set this new array type "globally" if
            *   version >= X, this to allow down-grade(s) within minor versions
            *   if unique index has been added in newer version
            *
@@ -12046,7 +12033,7 @@ Dbdict::alterIndex_parse(Signal* signal,
        *       or if prev op is AlterIndex using baseop.p->m_base_op_ptr_i
        *   (i.e recursivly, assuming that no operation can come inbetween)
        */
-      Ptr<SchemaOp> baseop = op_ptr;
+      SchemaOpPtr baseop = op_ptr;
       LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
       ndbrequire(list.prev(baseop));
       Uint32 sz = sizeof(baseop.p->m_oprec_ptr.p->m_opType);
@@ -12783,9 +12770,9 @@ Dbdict::alterIndex_fromAddPartitions(Sig
 
     const AlterTabConf* conf =
       (const AlterTabConf*)signal->getDataPtr();
-    
+
     alterIndexPtr.p->m_dihAddFragPtr = conf->connectPtr;
-    
+
     sendTransConf(signal, op_ptr);
   } else {
     jam();
@@ -13732,7 +13719,7 @@ Dbdict::buildIndex_complete(Signal* sign
   jam();
   sendTransConf(signal, op_ptr);
 }
-  
+
 // BuildIndex: ABORT
 
 void
@@ -14707,7 +14694,7 @@ Dbdict::copyData_prepare(Signal* signal,
   TableRecordPtr tabPtr;
   c_tableRecordPool.getPtr(tabPtr, impl_req->srcTableId);
   {
-    LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+    LocalAttributeRecord_list alist(c_attributeRecordPool,
                                            tabPtr.p->m_attributes);
     AttributeRecordPtr attrPtr;
     for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr))
@@ -14726,14 +14713,14 @@ Dbdict::copyData_prepare(Signal* signal,
       }
     }
   }
-  
+
   /* Request Tup-ordered copy when we have disk columns for efficiency */
   if (tabHasDiskCols)
   {
     jam();
     req->requestInfo |= CopyDataReq::TupOrder;
   }
-  
+
   LinearSectionPtr ls_ptr[3];
   ls_ptr[0].sz = cnt;
   ls_ptr[0].p = tmp;
@@ -14755,7 +14742,7 @@ Dbdict::copyData_fromLocal(Signal* signa
     CLEAR_ERROR_INSERT_VALUE;
     ret = 1;
   }
-  
+
   if (ret == 0) {
     jam();
     sendTransConf(signal, op_ptr);
@@ -14808,7 +14795,7 @@ Dbdict::copyData_complete(Signal* signal
   TableRecordPtr tabPtr;
   c_tableRecordPool.getPtr(tabPtr, impl_req->srcTableId);
   {
-    LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+    LocalAttributeRecord_list alist(c_attributeRecordPool,
                                            tabPtr.p->m_attributes);
     AttributeRecordPtr attrPtr;
     for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr))
@@ -14903,10 +14890,10 @@ Dbdict::execCOPY_DATA_IMPL_REF(Signal* s
 
 int
 Dbdict::sendSignalUtilReq(Callback *pcallback,
-			  BlockReference ref, 
-			  GlobalSignalNumber gsn, 
-			  Signal* signal, 
-			  Uint32 length, 
+			  BlockReference ref,
+			  GlobalSignalNumber gsn,
+			  Signal* signal,
+			  Uint32 length,
 			  JobBufferLevel jbuf,
 			  LinearSectionPtr ptr[3],
 			  Uint32 noOfSections)
@@ -14970,7 +14957,7 @@ void
 Dbdict::execUTIL_PREPARE_REF(Signal *signal)
 {
   jamEntry();
-  const UtilPrepareRef * ref = CAST_CONSTPTR(UtilPrepareRef, 
+  const UtilPrepareRef * ref = CAST_CONSTPTR(UtilPrepareRef,
                                              signal->getDataPtr());
   Uint32 code = ref->errorCode;
   if (code == UtilPrepareRef::DICT_TAB_INFO_ERROR)
@@ -14999,7 +14986,7 @@ void Dbdict::execUTIL_EXECUTE_REF(Signal
   ndbout_c("errorCode %u",ref->getErrorCode());
   ndbout_c("TCErrorCode %u",ref->getTCErrorCode());
 #endif
-  
+
   ndbrequire(recvSignalUtilReq(signal, 1) == 0);
 }
 void Dbdict::execUTIL_RELEASE_CONF(Signal *signal)
@@ -15021,7 +15008,7 @@ void Dbdict::execUTIL_RELEASE_REF(Signal
  * MODULE: Create event
  *
  * Create event in DICT.
- * 
+ *
  *
  * Request type in CREATE_EVNT signals:
  *
@@ -15079,7 +15066,7 @@ Dbdict::prepareTransactionEventSysTable
   TableRecordPtr tablePtr;
   c_tableRecordPool.getPtr(tablePtr, opj_ptr_p->m_id);
   ndbrequire(tablePtr.i != RNIL); // system table must exist
-  
+
   Uint32 tableId = tablePtr.p->tableId; /* System table */
   Uint32 noAttr = tablePtr.p->noOfAttributes;
   if (noAttr > EVENT_SYSTEM_TABLE_LENGTH)
@@ -15118,9 +15105,9 @@ Dbdict::prepareUtilTransaction(Callback
   jam();
   EVENT_TRACE;
 
-  UtilPrepareReq * utilPrepareReq = 
+  UtilPrepareReq * utilPrepareReq =
     (UtilPrepareReq *)signal->getDataPtrSend();
-  
+
   utilPrepareReq->setSenderRef(reference());
   utilPrepareReq->setSenderData(senderData);
 
@@ -15162,7 +15149,7 @@ Dbdict::prepareUtilTransaction(Callback
   sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed();
 
   sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal,
-		    UtilPrepareReq::SignalLength, JBB, 
+		    UtilPrepareReq::SignalLength, JBB,
 		    sectionsPtr, UtilPrepareReq::NoOfSections);
 }
 
@@ -15278,7 +15265,7 @@ Dbdict::execCREATE_EVNT_REQ(Signal* sign
   {
     jam();
     releaseSections(handle);
-    
+
     CreateEvntRef * ref = (CreateEvntRef *)signal->getDataPtrSend();
     ref->setUserRef(reference());
     ref->setErrorCode(CreateEvntRef::NotMaster);
@@ -15310,7 +15297,7 @@ Dbdict::execCREATE_EVNT_REQ(Signal* sign
 #ifdef EVENT_DEBUG
   ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i);
 #endif
-  
+
   ndbrequire(req->getUserRef() == signal->getSendersBlockRef());
 
   evntRecPtr.p->init(req,this);
@@ -15349,11 +15336,11 @@ Dbdict::execCREATE_EVNT_REQ(Signal* sign
 #endif
   jam();
   releaseSections(handle);
-    
+
   evntRecPtr.p->m_errorCode = 1;
   evntRecPtr.p->m_errorLine = __LINE__;
   evntRecPtr.p->m_errorNode = reference();
-  
+
   createEvent_sendReply(signal, evntRecPtr);
 }
 
@@ -15375,7 +15362,7 @@ Dbdict::createEvent_RT_USER_CREATE(Signa
 #ifdef EVENT_DEBUG
   ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl;
   char buf[128] = {0};
-  AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask(); 
+  AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask();
   mask.getText(buf);
   ndbout_c("mask = %s", buf);
 #endif
@@ -15421,12 +15408,12 @@ Dbdict::createEvent_RT_USER_CREATE(Signa
       (r0.getValueType() != SimpleProperties::StringValue) ||
       (r0.getValueLen() <= 0)) {
     jam();
-    
+
     evntRecPtr.p->m_errorCode = 1;
 sendref:
     evntRecPtr.p->m_errorLine = __LINE__;
     evntRecPtr.p->m_errorNode = reference();
-    
+
     releaseSections(handle);
 
     createEvent_sendReply(signal, evntRecPtr);
@@ -15473,19 +15460,19 @@ sendref:
   }
 
   releaseSections(handle);
-  
+
   // Send request to SUMA
 
   CreateSubscriptionIdReq * sumaIdReq =
     (CreateSubscriptionIdReq *)signal->getDataPtrSend();
-  
+
   // make sure we save the original sender for later
   sumaIdReq->senderRef  = reference();
   sumaIdReq->senderData = evntRecPtr.i;
 #ifdef EVENT_DEBUG
   ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl;
 #endif
-  sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal, 
+  sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal,
 	     CreateSubscriptionIdReq::SignalLength, JBB);
   // we should now return in either execCREATE_SUBID_CONF
   // or execCREATE_SUBID_REF
@@ -15601,7 +15588,7 @@ void interpretUtilPrepareErrorCode(UtilP
   DBUG_VOID_RETURN;
 }
 
-void 
+void
 Dbdict::createEventUTIL_PREPARE(Signal* signal,
 				Uint32 callbackData,
 				Uint32 returnCode)
@@ -15614,9 +15601,9 @@ Dbdict::createEventUTIL_PREPARE(Signal*
     jam();
     evntRecPtr.i = req->getSenderData();
     const Uint32 prepareId = req->getPrepareId();
-    
+
     ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-    
+
     Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 };
 
     switch (evntRecPtr.p->m_requestType) {
@@ -15640,7 +15627,7 @@ Dbdict::createEventUTIL_PREPARE(Signal*
 		    evntRecPtr.p->m_eventRec.TABLE_NAME,
 		    evntRecPtr.p->m_eventRec.TABLEID,
 		    evntRecPtr.p->m_eventRec.TABLEVERSION));
-  
+
       }
       jam();
       executeTransEventSysTable(&c, signal,
@@ -15784,13 +15771,13 @@ void Dbdict::executeTransEventSysTable(C
   default:
     ndbrequire(false);
   }
-    
+
   LinearSectionPtr headerPtr;
   LinearSectionPtr lsdataPtr;
-    
+
   headerPtr.p = attrHdr;
   headerPtr.sz = id;
-    
+
   lsdataPtr.p = attrHdr + noAttr;
   lsdataPtr.sz = total_len/4;
 
@@ -15799,14 +15786,14 @@ void Dbdict::executeTransEventSysTable(C
     for(int i = 0; i < (int)headerPtr.sz; i++)
       printf("H'%.8x ", attrHdr[i]);
     printf("\n");
-    
+
     printf("Data size %u\n", lsdataPtr.sz);
     for(int i = 0; i < (int)lsdataPtr.sz; i++)
       printf("H'%.8x ", dataPage[i]);
     printf("\n");
 #endif
 
-  executeTransaction(pcallback, signal, 
+  executeTransaction(pcallback, signal,
 		     ptrI,
 		     prepareId,
 		     id,
@@ -15815,7 +15802,7 @@ void Dbdict::executeTransEventSysTable(C
 }
 
 void Dbdict::executeTransaction(Callback *pcallback,
-				Signal* signal, 
+				Signal* signal,
 				Uint32 senderData,
 				Uint32 prepareId,
 				Uint32 noAttr,
@@ -15825,7 +15812,7 @@ void Dbdict::executeTransaction(Callback
   jam();
   EVENT_TRACE;
 
-  UtilExecuteReq * utilExecuteReq = 
+  UtilExecuteReq * utilExecuteReq =
     (UtilExecuteReq *)signal->getDataPtrSend();
 
   utilExecuteReq->setSenderRef(reference());
@@ -15838,7 +15825,7 @@ void Dbdict::executeTransaction(Callback
   for(int i = 0; i < (int)headerPtr.sz; i++)
     printf("H'%.8x ", headerBuffer[i]);
   printf("\n");
-  
+
   printf("Data size %u\n", dataPtr.sz);
   for(int i = 0; i < (int)dataPtr.sz; i++)
     printf("H'%.8x ", dataBuffer[i]);
@@ -15864,10 +15851,10 @@ void Dbdict::parseReadEventSys(Signal* s
 
   handle.getSection(headerPtr, UtilExecuteReq::HEADER_SECTION);
   SectionReader headerReader(headerPtr, getSectionSegmentPool());
-      
+
   handle.getSection(dataPtr, UtilExecuteReq::DATA_SECTION);
   SectionReader dataReader(dataPtr, getSectionSegmentPool());
-  
+
   char *base = (char*)&m_eventRec;
 
   DictObject * opj_ptr_p = get_object(EVENT_SYSTEM_TABLE_NAME,
@@ -15916,7 +15903,7 @@ void Dbdict::parseReadEventSys(Signal* s
   }
 }
 
-void Dbdict::createEventUTIL_EXECUTE(Signal *signal, 
+void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
 				     Uint32 callbackData,
 				     Uint32 returnCode)
 {
@@ -15928,10 +15915,10 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
     jam();
     OpCreateEventPtr evntRecPtr;
     evntRecPtr.i = conf->getSenderData();
-    
+
     ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
     OpCreateEvent *evntRec = evntRecPtr.p;
-    
+
     switch (evntRec->m_requestType) {
     case CreateEvntReq::RT_USER_GET: {
       parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
@@ -15951,7 +15938,7 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
 		  evntRecPtr.p->m_eventRec.TABLE_NAME,
 		  evntRecPtr.p->m_eventRec.TABLEID,
 		  evntRecPtr.p->m_eventRec.TABLEVERSION));
-      
+
       // find table id for event table
       DictObject* obj_ptr_p = get_object(evntRecPtr.p->m_eventRec.TABLE_NAME);
       if(!obj_ptr_p){
@@ -15963,12 +15950,12 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
 	createEvent_sendReply(signal, evntRecPtr);
 	return;
       }
-      
+
       TableRecordPtr tablePtr;
       c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id);
       evntRec->m_request.setTableId(tablePtr.p->tableId);
       evntRec->m_request.setTableVersion(tablePtr.p->tableVersion);
-      
+
       createEventComplete_RT_USER_GET(signal, evntRecPtr);
       return;
     }
@@ -16015,7 +16002,7 @@ void Dbdict::createEventUTIL_EXECUTE(Sig
       evntRecPtr.p->m_errorCode = ref->getErrorCode();
       break;
     }
-    
+
     createEvent_sendReply(signal, evntRecPtr);
   }
 }
@@ -16061,14 +16048,14 @@ Dbdict::createEvent_RT_USER_GET(Signal*
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
   int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
   memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
-  
+
   releaseSections(handle);
-  
+
   Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
-  
+
   prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
 				  UtilPrepareReq::Read);
-  /* 
+  /*
    * Will read systable and fill an OpCreateEventPtr
    * and return below
    */
@@ -16081,20 +16068,20 @@ Dbdict::createEventComplete_RT_USER_GET(
 
   // Send to oneself and the other DICT's
   CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend();
-      
+
   *req = evntRecPtr.p->m_request;
   req->senderRef = reference();
   req->senderData = evntRecPtr.i;
-      
+
   req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET);
-      
+
 #ifdef EVENT_PH2_DEBUG
   ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i);
 #endif
 
   NodeReceiverGroup rg(DBDICT, c_aliveNodes);
   RequestTracker & p = evntRecPtr.p->m_reqTracker;
-  if (!p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 
+  if (!p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF,
 			     evntRecPtr.i))
   {
     jam();
@@ -16115,9 +16102,9 @@ Dbdict::createEvent_nodeFailCallback(Sig
   createEvent_sendReply(signal, evntRecPtr);
 }
 
-void Dbdict::execCREATE_EVNT_REF(Signal* signal) 
+void Dbdict::execCREATE_EVNT_REF(Signal* signal)
 {
-  jamEntry();      
+  jamEntry();
   EVENT_TRACE;
   CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr();
   OpCreateEventPtr evntRecPtr;
@@ -16136,7 +16123,7 @@ void Dbdict::execCREATE_EVNT_REF(Signal*
   if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF)
   {
     jam();
-    evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, 
+    evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr,
                                          refToNode(ref->senderRef));
 
     /**
@@ -16160,11 +16147,11 @@ void Dbdict::execCREATE_EVNT_REF(Signal*
       noLSP = 2;
     }
   }
-  else 
+  else
   {
     jam();
     evntRecPtr.p->m_errorCode = ref->errorCode;
-    evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, 
+    evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr,
                                          refToNode(ref->senderRef));
   }
 
@@ -16215,7 +16202,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Si
   DBUG_ENTER("Dbdict::createEvent_RT_DICT_AFTER_GET");
   jam();
   evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
-  
+
 #ifdef EVENT_PH2_DEBUG
   ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i);
 #endif
@@ -16229,7 +16216,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Si
   CRASH_INSERTION2(6009, getOwnNodeId() != c_masterNodeId);
 
   SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend();
-  
+
   sumaReq->senderRef        = reference(); // reference to DICT
   sumaReq->senderData       = evntRecPtr.i;
   sumaReq->subscriptionId   = evntRecPtr.p->m_request.getEventId();
@@ -16245,7 +16232,7 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Si
   }
   sumaReq->tableId          = evntRecPtr.p->m_request.getTableId();
   sumaReq->schemaTransId    = 0;
-    
+
 #ifdef EVENT_PH2_DEBUG
   ndbout_c("sending GSN_SUB_CREATE_REQ");
 #endif
@@ -16336,7 +16323,7 @@ void Dbdict::createEvent_sendReply(Signa
       evntRecPtr.p->m_errorLine = __LINE__;
       evntRecPtr.p->m_errorNode = reference();
       jam();
-    } else 
+    } else
       jam();
   }
 
@@ -16350,7 +16337,7 @@ void Dbdict::createEvent_sendReply(Signa
     jam();
     EVENT_TRACE;
     CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
-    
+
     ret->setEventId(evntRecPtr.p->m_request.getEventId());
     ret->setEventKey(evntRecPtr.p->m_request.getEventKey());
     ret->setUserData(evntRecPtr.p->m_request.getUserData());
@@ -16376,7 +16363,7 @@ void Dbdict::createEvent_sendReply(Signa
     jam();
     EVENT_TRACE;
     CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend();
-    
+
     evntConf->setEventId(evntRecPtr.p->m_request.getEventId());
     evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey());
     evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
@@ -16471,7 +16458,7 @@ busy:
     subbPtr.p->m_subscriberData = req->subscriberData;
     bzero(subbPtr.p->m_buckets_per_ng, sizeof(subbPtr.p->m_buckets_per_ng));
   }
-  
+
   if (refToBlock(origSenderRef) != DBDICT) {
     /*
      * Coordinator
@@ -16493,7 +16480,7 @@ busy:
       errCode = SubStartRef::BusyWithNR;
       goto busy;
     }
-    
+
     subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
     NodeReceiverGroup rg(DBDICT, c_aliveNodes);
 
@@ -16504,12 +16491,12 @@ busy:
       errCode = SubStartRef::Busy;
       goto busy;
     }
-    
+
     SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
-    
+
     req->senderRef  = reference();
     req->senderData = subbPtr.i;
-    
+
 #ifdef EVENT_PH3_DEBUG
     ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i);
 #endif
@@ -16541,13 +16528,13 @@ busy:
   ndbrequire(refToBlock(origSenderRef) == DBDICT);
 
   CRASH_INSERTION(6007);
-  
+
   {
     SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
-    
+
     req->senderRef = reference();
     req->senderData = subbPtr.i;
-    
+
 #ifdef EVENT_PH3_DEBUG
     ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i);
 #endif
@@ -16745,7 +16732,7 @@ void Dbdict::completeSubStartReq(Signal*
 #ifdef EVENT_DEBUG
   ndbout_c("SUB_START_CONF");
 #endif
-  
+
   ndbrequire(c_outstanding_sub_startstop);
   c_outstanding_sub_startstop--;
   SubStartConf* conf = (SubStartConf*)signal->getDataPtrSend();
@@ -16810,7 +16797,7 @@ busy:
       req->requestInfo = 0;
     }
   }
-  
+
   if (refToBlock(origSenderRef) != DBDICT) {
     /*
      * Coordinator
@@ -16846,9 +16833,9 @@ busy:
       errCode = SubStopRef::Busy;
       goto busy;
     }
-    
+
     SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-    
+
     req->senderRef  = reference();
     req->senderData = subbPtr.i;
 
@@ -16868,10 +16855,10 @@ busy:
 
   {
     SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-    
+
     req->senderRef = reference();
     req->senderData = subbPtr.i;
-    
+
     sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
   }
 }
@@ -17044,7 +17031,7 @@ void Dbdict::completeSubStopReq(Signal*
  * MODULE: Drop event.
  *
  * Drop event.
- * 
+ *
  * TODO
  */
 
@@ -17081,7 +17068,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
     // Failed to allocate event record
     jam();
     releaseSections(handle);
- 
+
     DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
     ret->setErrorCode(747);
     ret->setErrorLine(__LINE__);
@@ -17132,7 +17119,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
     printf("\n");
 #endif
   }
-  
+
   releaseSections(handle);
 
   Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 };
@@ -17142,7 +17129,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
   DBUG_VOID_RETURN;
 }
 
-void 
+void
 Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal,
 				   Uint32 callbackData,
 				   Uint32 returnCode)
@@ -17169,7 +17156,7 @@ Dbdict::dropEventUTIL_PREPARE_READ(Signa
 			    prepareId, UtilPrepareReq::Read);
 }
 
-void 
+void
 Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
 				   Uint32 callbackData,
 				   Uint32 returnCode)
@@ -17199,7 +17186,7 @@ Dbdict::dropEventUTIL_EXECUTE_READ(Signa
     dropEvent_sendReply(signal, evntRecPtr);
     return;
   }
-  
+
   SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
 
   req->senderRef       = reference();
@@ -17247,7 +17234,7 @@ Dbdict::execSUB_REMOVE_REQ(Signal* signa
   }
 
   CRASH_INSERTION2(6010, getOwnNodeId() != c_masterNodeId);
-  
+
   SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
   req->senderRef = reference();
   req->senderData = subbPtr.i;
@@ -17396,7 +17383,7 @@ Dbdict::completeSubRemoveReq(Signal* sig
 				  UtilPrepareReq::Delete);
 }
 
-void 
+void
 Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal,
 				     Uint32 callbackData,
 				     Uint32 returnCode)
@@ -17414,11 +17401,11 @@ Dbdict::dropEventUTIL_PREPARE_DELETE(Sig
   jam();
   evntRecPtr.i = req->getSenderData();
   const Uint32 prepareId = req->getPrepareId();
-  
+
   ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
 #ifdef EVENT_DEBUG
   printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i);
-#endif    
+#endif
 
   Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 };
 
@@ -17427,7 +17414,7 @@ Dbdict::dropEventUTIL_PREPARE_DELETE(Sig
 			    prepareId, UtilPrepareReq::Delete);
 }
 
-void 
+void
 Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal,
 				     Uint32 callbackData,
 				     Uint32 returnCode)
@@ -17481,7 +17468,7 @@ Dbdict::dropEventUtilExecuteRef(Signal*
   jam();
   evntRecPtr.i = ref->getSenderData();
   ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
-    
+
   evntRecPtr.p->m_errorNode = reference();
   evntRecPtr.p->m_errorLine = __LINE__;
 
@@ -17516,7 +17503,7 @@ void Dbdict::dropEvent_sendReply(Signal*
   if (evntRecPtr.p->hasError()) {
     jam();
     DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
-    
+
     ret->setUserData(evntRecPtr.p->m_request.getUserData());
     ret->setUserRef(evntRecPtr.p->m_request.getUserRef());
 
@@ -17529,7 +17516,7 @@ void Dbdict::dropEvent_sendReply(Signal*
   } else {
     jam();
     DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend();
-    
+
     evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
     evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef());
 
@@ -17822,7 +17809,7 @@ Dbdict::createTrigger_parse(Signal* sign
   }
 
   {
-    Rope name(c_rope_pool, triggerPtr.p->triggerName);
+    LocalRope name(c_rope_pool, triggerPtr.p->triggerName);
     if (!name.assign(createTriggerPtr.p->m_triggerName)) {
       jam();
       setError(error, CreateTrigRef::OutOfStringBuffer, __LINE__);
@@ -17832,7 +17819,7 @@ Dbdict::createTrigger_parse(Signal* sign
 
   // connect to new DictObject
   {
-    Ptr<DictObject> obj_ptr;
+    DictObjectPtr obj_ptr;
     seizeDictObject(op_ptr, obj_ptr, triggerPtr.p->triggerName);
 
     obj_ptr.p->m_id = impl_req->triggerId; // wl3600_todo id
@@ -17914,7 +17901,7 @@ Dbdict::createTrigger_parse_endpoint(Sig
     return;
   }
 
-  Ptr<TriggerRecord> triggerPtr;
+  TriggerRecordPtr triggerPtr;
   c_triggerRecordPool.getPtr(triggerPtr, impl_req->triggerId);
   switch(TriggerInfo::getTriggerType(triggerPtr.p->triggerInfo)){
   case TriggerType::REORG_TRIGGER:
@@ -18648,7 +18635,7 @@ Dbdict::dropTrigger_parse(Signal* signal
     c_triggerRecordPool.getPtr(triggerPtr, impl_req->triggerId);
     // wl3600_todo state check
   }
-  
+
   D("trigger " << copyRope<MAX_TAB_NAME_SIZE>(triggerPtr.p->triggerName));
   impl_req->triggerInfo = triggerPtr.p->triggerInfo;
   Uint32 requestType = impl_req->requestType;
@@ -19114,13 +19101,13 @@ Dbdict::execDROP_TRIG_IMPL_REF(Signal* s
 */
 
 void
-Dbdict::getTableKeyList(TableRecordPtr tablePtr, 
+Dbdict::getTableKeyList(TableRecordPtr tablePtr,
 			Id_array<MAX_ATTRIBUTES_IN_INDEX+1>& list)
 {
   jam();
   list.sz = 0;
   list.id[list.sz++] = AttributeHeader::FRAGMENT;
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
                                          tablePtr.p->m_attributes);
   AttributeRecordPtr attrPtr;
   for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) {
@@ -19150,7 +19137,7 @@ Dbdict::getIndexAttr(TableRecordPtr inde
     tmp.copy(name);
     len = tmp.size();
   }
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool, 
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
 					 tablePtr.p->m_attributes);
   for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)){
     ConstRope tmp(c_rope_pool, attrPtr.p->attributeName);
@@ -19170,7 +19157,7 @@ Dbdict::getIndexAttrList(TableRecordPtr
   memset(list.id, 0, sizeof(list.id));
   ndbrequire(indexPtr.p->noOfAttributes >= 2);
 
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool,
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
                                          indexPtr.p->m_attributes);
   AttributeRecordPtr attrPtr;
   for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) {
@@ -19194,12 +19181,12 @@ Dbdict::getIndexAttrMask(TableRecordPtr
   jam();
   mask.clear();
   ndbrequire(indexPtr.p->noOfAttributes >= 2);
-  
+
   AttributeRecordPtr attrPtr, currPtr;
-  LocalDLFifoList<AttributeRecord> alist(c_attributeRecordPool, 
+  LocalAttributeRecord_list alist(c_attributeRecordPool,
 					 indexPtr.p->m_attributes);
-  
-  
+
+
   for (alist.first(attrPtr); currPtr = attrPtr, alist.next(attrPtr); ){
     Uint32 id;
     getIndexAttr(indexPtr, currPtr.i, &id);
@@ -19231,7 +19218,7 @@ void
 Dbdict::sendDictLockInfoEvent(Signal*, const UtilLockReq* req, const char* text)
 {
   const Dbdict::DictLockType* lt = getDictLockType(req->extra);
-  
+
   infoEvent("DICT: %s %u for %s",
             text,
             (unsigned)refToNode(req->senderRef), lt->text);
@@ -19257,7 +19244,7 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
       req.lockType == DictLockReq::SumaHandOver)
   {
     jam();
-    
+
     if (c_outstanding_sub_startstop)
     {
       jam();
@@ -19265,7 +19252,7 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
       err = DictLockRef::TooManyRequests;
       goto ref;
     }
-    
+
     if (req.lockType == DictLockReq::SumaHandOver &&
         !c_sub_startstop_lock.isclear())
     {
@@ -19275,7 +19262,7 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
     }
 
     c_sub_startstop_lock.set(refToNode(req.userRef));
-    
+
     g_eventLogger->info("granting dict lock to %u", refToNode(req.userRef));
     DictLockConf* conf = (DictLockConf*)signal->getDataPtrSend();
     conf->userPtr = req.userPtr;
@@ -19294,14 +19281,14 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
 
   // make sure bad request crashes slave, not master (us)
   Uint32 res;
-  if (getOwnNodeId() != c_masterNodeId) 
+  if (getOwnNodeId() != c_masterNodeId)
   {
     jam();
     err = DictLockRef::NotMaster;
     goto ref;
   }
-  
-  if (lt == NULL) 
+
+  if (lt == NULL)
   {
     jam();
     err = DictLockRef::InvalidLockType;
@@ -19309,20 +19296,20 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
   }
 
   if (req.userRef != signal->getSendersBlockRef() ||
-      getNodeInfo(refToNode(req.userRef)).m_type != NodeInfo::DB) 
+      getNodeInfo(refToNode(req.userRef)).m_type != NodeInfo::DB)
   {
     jam();
     err = DictLockRef::BadUserRef;
     goto ref;
   }
 
-  if (c_aliveNodes.get(refToNode(req.userRef))) 
+  if (c_aliveNodes.get(refToNode(req.userRef)))
   {
     jam();
     err = DictLockRef::TooLate;
     goto ref;
   }
-  
+
   res = m_dict_lock.lock(this, m_dict_lock_pool, &lockReq, 0);
   switch(res){
   case 0:
@@ -19337,31 +19324,31 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal
     break;
   default:
     jam();
-    sendDictLockInfoEvent(signal, &lockReq, "lock request by node");    
+    sendDictLockInfoEvent(signal, &lockReq, "lock request by node");
     break;
   }
   return;
-  
+
 ref:
   {
     DictLockRef* ref = (DictLockRef*)signal->getDataPtrSend();
     ref->userPtr = lockReq.senderData;
     ref->lockType = lockReq.extra;
     ref->errorCode = err;
-    
+
     sendSignal(lockReq.senderRef, GSN_DICT_LOCK_REF, signal,
                DictLockRef::SignalLength, JBB);
   }
   return;
-  
+
 conf:
   {
     DictLockConf* conf = (DictLockConf*)signal->getDataPtrSend();
-    
+
     conf->userPtr = lockReq.senderData;
     conf->lockType = lockReq.extra;
     conf->lockPtr = lockReq.senderData;
-    
+
     sendSignal(lockReq.senderRef, GSN_DICT_LOCK_CONF, signal,
                DictLockConf::SignalLength, JBB);
   }
@@ -19373,7 +19360,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* sign
 {
   jamEntry();
   const DictUnlockOrd* ord = (const DictUnlockOrd*)&signal->theData[0];
-  
+
   DictLockReq req;
   req.userPtr = ord->senderData;
   req.userRef = ord->senderRef;
@@ -19393,7 +19380,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* sign
     c_sub_startstop_lock.clear(refToNode(ord->senderRef));
     return;
   }
-  
+
   UtilLockReq lockReq;
   lockReq.senderData = req.userPtr;
   lockReq.senderRef = req.userRef;
@@ -19464,7 +19451,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
 #ifdef VM_TRACE
       ndbout_c("Dbdict::execDICT_TAKEOVER_REQ: trans %u(0x%8x), state %u, op_list %s", trans_ptr.i, (uint)trans_ptr.p->trans_key, trans_ptr.p->m_state, (trans_ptr.p->m_op_list.in_use)?"yes":"no");
 #endif
-     
+
      SchemaOpPtr op_ptr;
      LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
      bool pending_op = list.first(op_ptr);
@@ -19496,7 +19483,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
 #ifdef VM_TRACE
        ndbout_c("Dbdict::execDICT_TAKEOVER_REQ: op %u state %u", op_ptr.p->op_key, op_ptr.p->m_state);
 #endif
-       
+
        /*
          Check if operation is busy
        */
@@ -19606,7 +19593,7 @@ Dbdict::execDICT_TAKEOVER_REQ(Signal* si
      ndbrequire(!(pending_trans = c_schemaTransList.next(trans_ptr)));
    }
  }
- 
+
 void
 Dbdict::execDICT_TAKEOVER_REF(Signal* signal)
 {
@@ -19655,7 +19642,7 @@ Dbdict::execDICT_TAKEOVER_CONF(Signal* s
   //Uint32 rollback_op = conf->rollback_op;
   //Uint32 rollback_op_state = conf->rollback_op_state;
   NodeRecordPtr masterNodePtr;
-  
+
   /*
     Accumulate all responses
   */
@@ -19806,7 +19793,7 @@ void Dbdict::check_takeover_replies(Sign
           trans_ptr.p->m_lowest_trans_state = SchemaTrans::TS_ENDING;
           trans_ptr.p->m_highest_trans_state = SchemaTrans::TS_INITIAL;
         }
-  
+
         trans_ptr.p->m_isMaster = true;
         trans_ptr.p->m_masterRef = reference();
         trans_ptr.p->m_clientRef = clientRef;
@@ -19843,7 +19830,7 @@ void Dbdict::check_takeover_replies(Sign
 	  trans_ptr.p->m_rollback_op = rollback_op;
 	  trans_ptr.p->m_rollback_op_state = rollback_op_state;
 	}
-        
+
         if (SchemaTrans::weight(trans_state) <
             SchemaTrans::weight(trans_ptr.p->m_lowest_trans_state))
         {
@@ -19893,7 +19880,7 @@ void Dbdict::check_takeover_replies(Sign
       trans_ptr.p->m_master_recovery_state = SchemaTrans::TRS_ROLLFORWARD;
       break;
     }
-    
+
     if (trans_ptr.p->m_master_recovery_state == SchemaTrans::TRS_ROLLFORWARD)
     {
       /*
@@ -19905,8 +19892,8 @@ void Dbdict::check_takeover_replies(Sign
       trans_ptr.p->check_partial_rollforward = true;
       trans_ptr.p->m_state = trans_ptr.p->m_lowest_trans_state;
 #ifdef VM_TRACE
-      ndbout_c("Setting transaction state to %u for rollforward", trans_ptr.p->m_state); 
-#endif     
+      ndbout_c("Setting transaction state to %u for rollforward", trans_ptr.p->m_state);
+#endif
     }
     else
     {
@@ -19918,8 +19905,8 @@ void Dbdict::check_takeover_replies(Sign
       infoEvent("Pending schema transaction %u will be rolled back", trans_ptr.p->trans_key);
       trans_ptr.p->m_state = trans_ptr.p->m_highest_trans_state;
 #ifdef VM_TRACE
-      ndbout_c("Setting transaction state to %u for rollback", trans_ptr.p->m_state); 
-#endif     
+      ndbout_c("Setting transaction state to %u for rollback", trans_ptr.p->m_state);
+#endif
     }
 #ifdef VM_TRACE
     ndbout_c("Setting start state for transaction %u to %u", trans_ptr.p->trans_key, trans_ptr.p->m_state);
@@ -19927,8 +19914,8 @@ void Dbdict::check_takeover_replies(Sign
     pending_trans = c_schemaTransList.next(trans_ptr);
   }
 
-  /* 
-     Initialize all node recovery states 
+  /*
+     Initialize all node recovery states
   */
   for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
     jam();
@@ -19954,7 +19941,7 @@ void Dbdict::check_takeover_replies(Sign
         c_nodes.getPtr(nodePtr, i);
 #ifdef VM_TRACE
         ndbout_c("Node %u had %u operations, master has %u",i , nodePtr.p->takeOverConf.op_count, masterNodePtr.p->takeOverConf.op_count);
-#endif        
+#endif
         if (nodePtr.p->takeOverConf.op_count == 0)
         {
           if (SchemaTrans::weight(trans_ptr.p->m_state)
@@ -19987,7 +19974,7 @@ void Dbdict::check_takeover_replies(Sign
           /*
               Operation is missing on slave
           */
-          if (SchemaTrans::weight(trans_ptr.p->m_state) < 
+          if (SchemaTrans::weight(trans_ptr.p->m_state) <
               SchemaTrans::weight(SchemaTrans::TS_PREPARING))
           {
             /*
@@ -20064,7 +20051,7 @@ void Dbdict::check_takeover_replies(Sign
               }
               trans_ptr.p->m_nodes.set(c_masterNodeId);
 #ifdef VM_TRACE
-              ndbout_c("Adding master node %u to transaction %u", c_masterNodeId, trans_ptr.p->trans_key);          
+              ndbout_c("Adding master node %u to transaction %u", c_masterNodeId, trans_ptr.p->trans_key);
 #endif
             }
           }
@@ -20100,7 +20087,7 @@ void Dbdict::check_takeover_replies(Sign
               nodePtr.p->recoveryState = NodeRecord::RS_PARTIAL_ROLLFORWARD;
               nodePtr.p->start_op = op_key;
               nodePtr.p->start_op_state = op_state;
-              
+
             }
             else
             {
@@ -20145,11 +20132,11 @@ void Dbdict::check_takeover_replies(Sign
           else if (SchemaOp::weight(nodePtr.p->takeOverConf.rollforward_op_state) >
                    SchemaOp::weight(trans_ptr.p->m_rollforward_op_state) ||
                    nodePtr.p->takeOverConf.rollforward_op >
-                   trans_ptr.p->m_rollforward_op)              
+                   trans_ptr.p->m_rollforward_op)
           {
             /*
               Slave has started committing, but other slaves have non-committed
-              operations. Node needs to be partially rollforward. 
+              operations. Node needs to be partially rollforward.
             */
             jam();
             nodePtr.p->recoveryState = NodeRecord::RS_PARTIAL_ROLLFORWARD;
@@ -20209,7 +20196,7 @@ void Dbdict::check_takeover_replies(Sign
 #ifdef VM_TRACE
             ndbout_c("Node %u will be partially rolled back from operation %u, state %u", nodePtr.i, nodePtr.p->start_op, nodePtr.p->start_op_state);
 #endif
-            if (i == c_masterNodeId && 
+            if (i == c_masterNodeId &&
                 (SchemaTrans::weight(trans_ptr.p->m_state) <=
                  SchemaTrans::weight(SchemaTrans::TS_PREPARING)))
             {
@@ -20240,7 +20227,7 @@ void Dbdict::check_takeover_replies(Sign
       jam();
       SchemaOpPtr rollforward_op_ptr;
       ndbrequire(findSchemaOp(rollforward_op_ptr, trans_ptr.p->m_rollforward_op));
-      trans_ptr.p->m_curr_op_ptr_i = rollforward_op_ptr.i;    
+      trans_ptr.p->m_curr_op_ptr_i = rollforward_op_ptr.i;
 #ifdef VM_TRACE
       ndbout_c("execDICT_TAKEOVER_CONF: Transaction %u rolled forward starting at %u(%u)", trans_ptr.p->trans_key,  trans_ptr.p->m_rollforward_op, trans_ptr.p->m_curr_op_ptr_i);
 #endif
@@ -20256,13 +20243,13 @@ void Dbdict::check_takeover_replies(Sign
         jam();
         SchemaOpPtr rollback_op_ptr;
         ndbrequire(findSchemaOp(rollback_op_ptr, trans_ptr.p->m_rollback_op));
-        trans_ptr.p->m_curr_op_ptr_i = rollback_op_ptr.i;    
+        trans_ptr.p->m_curr_op_ptr_i = rollback_op_ptr.i;
 #ifdef VM_TRACE
         ndbout_c("execDICT_TAKEOVER_CONF: Transaction %u rolled back starting at %u(%u)", trans_ptr.p->trans_key,  trans_ptr.p->m_rollback_op, trans_ptr.p->m_curr_op_ptr_i);
 #endif
       }
     }
-    
+
     trans_recover(signal, trans_ptr);
     pending_trans = c_schemaTransList.next(trans_ptr);
   }
@@ -20281,21 +20268,21 @@ Dbdict::removeStaleDictLocks(Signal* sig
     infoEvent("Iterating lock queue");
 #endif
     do {
-      if (NodeBitmask::get(theFailedNodes, 
+      if (NodeBitmask::get(theFailedNodes,
                            refToNode(iter.m_curr.p->m_req.senderRef)))
       {
         if (iter.m_curr.p->m_req.requestInfo & UtilLockReq::Granted)
         {
           jam();
           infoEvent("Removed lock for node %u", refToNode(iter.m_curr.p->m_req.senderRef));
-          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req, 
+          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req,
                                 "remove lock by failed node");
-        } 
-        else 
+        }
+        else
         {
           jam();
           infoEvent("Removed lock request for node %u", refToNode(iter.m_curr.p->m_req.senderRef));
-          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req, 
+          sendDictLockInfoEvent(signal, &iter.m_curr.p->m_req,
                                 "remove lock request by failed node");
         }
         DictUnlockOrd* ord = (DictUnlockOrd*)signal->getDataPtrSend();
@@ -20356,7 +20343,7 @@ Dbdict::dict_lock_unlock(Signal* signal,
   UtilUnlockReq req;
   req.senderData = _req->userPtr;
   req.senderRef = _req->userRef;
-  
+
   Uint32 res = m_dict_lock.unlock(this, m_dict_lock_pool, &req);
   switch(res){
   case UtilUnlockRef::OK:
@@ -20387,8 +20374,8 @@ Dbdict::dict_lock_unlock(Signal* signal,
         conf->lockType = lockReq.extra;
         sendSignal(lockReq.senderRef, GSN_DICT_LOCK_CONF, signal,
                    DictLockConf::SignalLength, JBB);
-      }        
-      
+      }
+
       if (!m_dict_lock.next(iter))
         break;
     }
@@ -20494,15 +20481,15 @@ Dbdict::resizeSchemaFile(XSchemaFile * x
 }
 
 void
-Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){ 
+Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){
   SchemaFile * sf = &xsf->schemaPage[pageNo];
   sf->CheckSum = 0;
   sf->CheckSum = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
 }
 
-bool 
+bool
 Dbdict::validateChecksum(const XSchemaFile * xsf){
-  
+
   for (Uint32 n = 0; n < xsf->noOfPages; n++) {
     SchemaFile * sf = &xsf->schemaPage[n];
     Uint32 c = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
@@ -20520,7 +20507,7 @@ Dbdict::computeChecksum(const Uint32 * s
   return ret;
 }
 
-SchemaFile::TableEntry * 
+SchemaFile::TableEntry *
 Dbdict::getTableEntry(Uint32 tableId)
 {
   return getTableEntry(&c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE], tableId);
@@ -20608,7 +20595,7 @@ Dbdict::execCREATE_FILE_REQ(Signal* sign
       jam();
       impl_req->requestInfo = CreateFileImplReq::CreateForce;
     }
-    
+
     handleClientReq(signal, op_ptr, handle);
     return;
   } while (0);
@@ -20663,7 +20650,7 @@ Dbdict::createFile_parse(Signal* signal,
   }
   SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool());
 
-  Ptr<DictObject> obj_ptr; obj_ptr.setNull();
+  DictObjectPtr obj_ptr; obj_ptr.setNull();
   FilePtr filePtr; filePtr.setNull();
 
   DictFilegroupInfo::File f; f.init();
@@ -20692,7 +20679,7 @@ Dbdict::createFile_parse(Signal* signal,
   if(fg_ptr.p->m_version != f.FilegroupVersion)
   {
     jam();
-    setError(error, CreateFileRef::InvalidFilegroupVersion, __LINE__, 
+    setError(error, CreateFileRef::InvalidFilegroupVersion, __LINE__,
              f.FileName);
     return;
   }
@@ -20725,7 +20712,7 @@ Dbdict::createFile_parse(Signal* signal,
   }
 
   Uint32 len = Uint32(strlen(f.FileName) + 1);
-  Uint32 hash = Rope::hash(f.FileName, len);
+  Uint32 hash = LocalRope::hash(f.FileName, len);
   if(get_object(f.FileName, len, hash) != 0)
   {
     jam();
@@ -20740,7 +20727,7 @@ Dbdict::createFile_parse(Signal* signal,
     if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl)
     {
       jam();
-      setError(error, CreateFileRef::NotSupportedWhenDiskless, __LINE__, 
+      setError(error, CreateFileRef::NotSupportedWhenDiskless, __LINE__,
                f.FileName);
       return;
     }
@@ -20773,7 +20760,7 @@ Dbdict::createFile_parse(Signal* signal,
   new (filePtr.p) File();
 
   {
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     if(!name.assign(f.FileName, len, hash))
     {
       jam();
@@ -20790,7 +20777,7 @@ Dbdict::createFile_parse(Signal* signal,
     if (objId == RNIL)
     {
       jam();
-      setError(error, CreateFilegroupRef::NoMoreObjectRecords, __LINE__, 
+      setError(error, CreateFilegroupRef::NoMoreObjectRecords, __LINE__,
                f.FileName);
       goto error;
     }
@@ -20924,7 +20911,7 @@ Dbdict::createFile_parse(Signal* signal,
                         filePtr.p->m_file_size,
                         createFilePtr.p->m_warningFlags);
   }
-  
+
   send_event(signal, trans_ptr,
              NDB_LE_CreateSchemaObject,
              impl_req->file_id,
@@ -21014,7 +21001,7 @@ Dbdict::createFile_reply(Signal* signal,
     ref->senderData = op_ptr.p->m_clientData;
     ref->transId = trans_ptr.p->m_transId;
     getError(error, ref);
-    
+
     Uint32 clientRef = op_ptr.p->m_clientRef;
     sendSignal(clientRef, GSN_CREATE_FILE_REF, signal,
                CreateFileRef::SignalLength, JBB);
@@ -21162,7 +21149,7 @@ Dbdict::createFile_abortPrepare(Signal*
   default:
     ndbrequire(false);
   }
-  
+
   sendSignal(ref, GSN_CREATE_FILE_IMPL_REQ, signal,
              CreateFileImplReq::AbortLength, JBB);
 
@@ -21383,7 +21370,7 @@ Dbdict::createFilegroup_parse(Signal* si
   }
   SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool());
 
-  Ptr<DictObject> obj_ptr; obj_ptr.setNull();
+  DictObjectPtr obj_ptr; obj_ptr.setNull();
   FilegroupPtr fg_ptr; fg_ptr.setNull();
 
   DictFilegroupInfo::Filegroup fg; fg.init();
@@ -21423,7 +21410,7 @@ Dbdict::createFilegroup_parse(Signal* si
   }
 
   Uint32 len = Uint32(strlen(fg.FilegroupName) + 1);
-  Uint32 hash = Rope::hash(fg.FilegroupName, len);
+  Uint32 hash = LocalRope::hash(fg.FilegroupName, len);
   if(get_object(fg.FilegroupName, len, hash) != 0)
   {
     jam();
@@ -21450,7 +21437,7 @@ Dbdict::createFilegroup_parse(Signal* si
   new (fg_ptr.p) Filegroup();
 
   {
-    Rope name(c_rope_pool, obj_ptr.p->m_name);
+    LocalRope name(c_rope_pool, obj_ptr.p->m_name);
     if(!name.assign(fg.FilegroupName, len, hash))
     {
       jam();
@@ -21479,7 +21466,7 @@ Dbdict::createFilegroup_parse(Signal* si
 #endif
     fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId;
 
-    Ptr<Filegroup> lg_ptr;
+    FilegroupPtr lg_ptr;
     if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId))
     {
       jam();
@@ -22012,9 +21999,9 @@ Dbdict::dropFile_parse(Signal* signal, b
 #if defined VM_TRACE || defined ERROR_INSERT
   {
     char buf[1024];
-    Rope name(c_rope_pool, f_ptr.p->m_path);
+    LocalRope name(c_rope_pool, f_ptr.p->m_path);
     name.copy(buf);
-    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf, 
+    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf,
              impl_req->file_id,
              f_ptr.p->m_obj_ptr_i);
   }
@@ -22201,15 +22188,15 @@ Dbdict::send_drop_file(Signal* signal, U
   jam();
   ndbrequire(c_file_hash.find(f_ptr, fileId));
   ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
-  
+
   req->senderData = op_key;
   req->senderRef = reference();
   req->requestInfo = type;
-  
+
   req->file_id = f_ptr.p->key;
   req->filegroup_id = f_ptr.p->m_filegroup_id;
   req->filegroup_version = fg_ptr.p->m_version;
-  
+
   Uint32 ref= 0;
   switch(f_ptr.p->m_type){
   case DictTabInfo::Datafile:
@@ -22371,9 +22358,9 @@ Dbdict::dropFilegroup_parse(Signal* sign
 #if defined VM_TRACE || defined ERROR_INSERT
   {
     char buf[1024];
-    Rope name(c_rope_pool, fg_ptr.p->m_name);
+    LocalRope name(c_rope_pool, fg_ptr.p->m_name);
     name.copy(buf);
-    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf, 
+    ndbout_c("Dbdict: drop name=%s,id=%u,obj_id=%u", buf,
              impl_req->filegroup_id,
              fg_ptr.p->m_obj_ptr_i);
   }
@@ -22455,7 +22442,7 @@ Dbdict::dropFilegroup_prepare(Signal* si
   if (fg_ptr.p->m_type == DictTabInfo::LogfileGroup)
   {
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
-    Ptr<File> filePtr;
+    FilePtr filePtr;
     Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
     for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
     {
@@ -22494,7 +22481,7 @@ Dbdict::dropFilegroup_abortPrepare(Signa
   {
     jam();
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
-    Ptr<File> filePtr;
+    FilePtr filePtr;
     Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
     for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
     {
@@ -22539,7 +22526,7 @@ Dbdict::dropFilegroup_commit(Signal* sig
      */
     XSchemaFile * xsf = &c_schemaFile[SchemaRecord::NEW_SCHEMA_FILE];
 
-    Ptr<File> filePtr;
+    FilePtr filePtr;
     Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
     for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
     {
@@ -22563,7 +22550,7 @@ Dbdict::dropFilegroup_commit(Signal* sig
     ndbrequire(c_filegroup_hash.
 	       find(lg_ptr,
 		    fg_ptr.p->m_tablespace.m_default_logfile_group_id));
-    
+
     decrease_ref_count(lg_ptr.p->m_obj_ptr_i);
   }
 }
@@ -22632,17 +22619,17 @@ Dbdict::send_drop_fg(Signal* signal, Uin
 		     DropFilegroupImplReq::RequestInfo type)
 {
   DropFilegroupImplReq* req = (DropFilegroupImplReq*)signal->getDataPtrSend();
-  
+
   FilegroupPtr fg_ptr;
   ndbrequire(c_filegroup_hash.find(fg_ptr, filegroupId));
-  
+
   req->senderData = op_key;
   req->senderRef = reference();
   req->requestInfo = type;
-  
+
   req->filegroup_id = fg_ptr.p->key;
   req->filegroup_version = fg_ptr.p->m_version;
-  
+
   Uint32 ref= 0;
   switch(fg_ptr.p->m_type){
   case DictTabInfo::Tablespace:
@@ -22654,7 +22641,7 @@ Dbdict::send_drop_fg(Signal* signal, Uin
   default:
     ndbrequire(false);
   }
-  
+
   sendSignal(ref, GSN_DROP_FILEGROUP_IMPL_REQ, signal,
 	     DropFilegroupImplReq::SignalLength, JBB);
 }
@@ -22712,7 +22699,7 @@ Dbdict::execCREATE_NODEGROUP_REQ(Signal*
     }
 
     impl_req->nodegroupId = req->nodegroupId;
-    for (Uint32 i = 0; i<NDB_ARRAY_SIZE(req->nodes) && 
+    for (Uint32 i = 0; i<NDB_ARRAY_SIZE(req->nodes) &&
            i<NDB_ARRAY_SIZE(impl_req->nodes); i++)
     {
       impl_req->nodes[i] = req->nodes[i];
@@ -22785,7 +22772,7 @@ Dbdict::createNodegroup_parse(Signal* si
    * createNodegroup blocks gcp
    *   so trans_ptr can *not* do this (endless loop)
    */
-  trans_ptr.p->m_wait_gcp_on_commit = false; 
+  trans_ptr.p->m_wait_gcp_on_commit = false;
 }
 
 void
@@ -23360,7 +23347,7 @@ Dbdict::dropNodegroup_parse(Signal* sign
    * dropNodegroup blocks gcp
    *   so trans_ptr can *not* do this (endless loop)
    */
-  trans_ptr.p->m_wait_gcp_on_commit = false; 
+  trans_ptr.p->m_wait_gcp_on_commit = false;
 }
 
 void
@@ -23787,14 +23774,14 @@ Dbdict::setError(ErrorInfo& e,
     e.errorNodeId = nodeId ? nodeId : getOwnNodeId();
     e.errorStatus = status;
     e.errorKey = key;
-    BaseString::snprintf(e.errorObjectName, sizeof(e.errorObjectName), "%s", 
+    BaseString::snprintf(e.errorObjectName, sizeof(e.errorObjectName), "%s",
                          name ? name : "");
   }
   e.errorCount++;
 }
 
 void
-Dbdict::setError(ErrorInfo& e, 
+Dbdict::setError(ErrorInfo& e,
                  Uint32 code,
                  Uint32 line,
                  const char * name)
@@ -24097,20 +24084,20 @@ Dbdict::getOpInfo(SchemaOpPtr op_ptr)
 bool
 Dbdict::seizeSchemaOp(SchemaTransPtr trans_ptr, SchemaOpPtr& op_ptr, Uint32 op_key, const OpInfo& info, bool linked)
 {
-  if ((ERROR_INSERTED(6111) && 
+  if ((ERROR_INSERTED(6111) &&
        (info.m_impl_req_gsn == GSN_CREATE_TAB_REQ ||
         info.m_impl_req_gsn == GSN_DROP_TAB_REQ ||
         info.m_impl_req_gsn == GSN_ALTER_TAB_REQ)) ||
-      (ERROR_INSERTED(6112) && 
+      (ERROR_INSERTED(6112) &&
        (info.m_impl_req_gsn == GSN_CREATE_INDX_IMPL_REQ ||
         info.m_impl_req_gsn == GSN_DROP_INDX_IMPL_REQ)) ||
-      (ERROR_INSERTED(6113) && 
+      (ERROR_INSERTED(6113) &&
        (info.m_impl_req_gsn == GSN_ALTER_INDX_IMPL_REQ)) ||
-      (ERROR_INSERTED(6114) && 
+      (ERROR_INSERTED(6114) &&
        (info.m_impl_req_gsn == GSN_CREATE_TRIG_IMPL_REQ ||
         info.m_impl_req_gsn == GSN_DROP_TRIG_IMPL_REQ)) ||
-      (ERROR_INSERTED(6116) && 
-       (info.m_impl_req_gsn == GSN_BUILD_INDX_IMPL_REQ))) 
+      (ERROR_INSERTED(6116) &&
+       (info.m_impl_req_gsn == GSN_BUILD_INDX_IMPL_REQ)))
   {
     jam();
     CLEAR_ERROR_INSERT_VALUE;
@@ -24918,7 +24905,7 @@ Dbdict::handleClientReq(Signal* signal,
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_PARSE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -25316,7 +25303,7 @@ Dbdict::trans_prepare_start(Signal* sign
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_FLUSH_PREPARE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -25400,7 +25387,7 @@ Dbdict::trans_prepare_next(Signal* signa
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_PREPARE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -25527,7 +25514,7 @@ Dbdict::trans_abort_parse_recv_reply(Sig
   trans_abort_parse_done(signal, trans_ptr);
 }
 
-void 
+void
 Dbdict::check_partial_trans_abort_parse_next(SchemaTransPtr trans_ptr,
                                              NdbNodeBitmask &nodes,
                                              SchemaOpPtr op_ptr)
@@ -25577,7 +25564,7 @@ Dbdict::trans_abort_parse_next(Signal* s
   jam();
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_ABORTING_PARSE);
 #ifdef MARTIN
-  ndbout_c("Dbdict::trans_abort_parse_next: op %u state %u", op_ptr.i,op_ptr.p->m_state); 
+  ndbout_c("Dbdict::trans_abort_parse_next: op %u state %u", op_ptr.i,op_ptr.p->m_state);
 #endif
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
   op_ptr.p->m_state = SchemaOp::OS_ABORTING_PARSE;
@@ -25605,7 +25592,7 @@ Dbdict::trans_abort_parse_next(Signal* s
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_ABORT_PARSE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -25737,8 +25724,8 @@ Dbdict::trans_abort_prepare_next(Signal*
   jam();
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_ABORTING_PREPARE);
 #ifdef MARTIN
-  ndbout_c("Dbdict::trans_abort_prepare_next: op %u state %u", op_ptr.p->op_key, op_ptr.p->m_state); 
-#endif 
+  ndbout_c("Dbdict::trans_abort_prepare_next: op %u state %u", op_ptr.p->op_key, op_ptr.p->m_state);
+#endif
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
 
   switch(op_ptr.p->m_state){
@@ -25794,7 +25781,7 @@ Dbdict::trans_abort_prepare_next(Signal*
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_ABORT_PREPARE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -25928,7 +25915,7 @@ Dbdict::trans_rollback_sp_next(Signal* s
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_ABORT_PARSE", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -26048,7 +26035,7 @@ Dbdict::trans_commit_start(Signal* signa
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_FLUSH_COMMIT", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26107,11 +26094,11 @@ Dbdict::trans_commit_first(Signal* signa
     signal->theData[1] = 0; // Execute direct
     signal->theData[2] = 1; // Current
     EXECUTE_DIRECT(DBDIH, GSN_GETGCIREQ, signal, 3);
-    
+
     jamEntry();
     Uint32 gci_hi = signal->theData[1];
     Uint32 gci_lo = signal->theData[2];
-    
+
     signal->theData[0] = ZCOMMIT_WAIT_GCI;
     signal->theData[1] = trans_ptr.i;
     signal->theData[2] = gci_hi;
@@ -26126,7 +26113,7 @@ Dbdict::trans_commit_first(Signal* signa
     jam();
     Mutex mutex(signal, c_mutexMgr, trans_ptr.p->m_commit_mutex);
     Callback c = { safe_cast(&Dbdict::trans_commit_mutex_locked), trans_ptr.i };
-    
+
     // Todo should alloc mutex on SCHEMA_BEGIN
     bool ok = mutex.lock(c);
     ndbrequire(ok);
@@ -26141,7 +26128,7 @@ Dbdict::trans_commit_wait_gci(Signal* si
   c_schemaTransPool.getPtr(trans_ptr, signal->theData[1]);
 
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_COMMITTING);
-  
+
   Uint32 gci_hi = signal->theData[2];
   Uint32 gci_lo = signal->theData[3];
 
@@ -26174,7 +26161,7 @@ Dbdict::trans_commit_wait_gci(Signal* si
 
   Mutex mutex(signal, c_mutexMgr, trans_ptr.p->m_commit_mutex);
   Callback c = { safe_cast(&Dbdict::trans_commit_mutex_locked), trans_ptr.i };
-  
+
   // Todo should alloc mutex on SCHEMA_BEGIN
   bool ok = mutex.lock(c);
   ndbrequire(ok);
@@ -26262,7 +26249,7 @@ void Dbdict::check_partial_trans_commit_
     }
     trans_ptr.p->check_partial_rollforward = false;
   }
-  
+
 }
 void
 Dbdict::trans_commit_next(Signal* signal,
@@ -26271,7 +26258,7 @@ Dbdict::trans_commit_next(Signal* signal
 {
   jam();
 #ifdef MARTIN
-  ndbout_c("Dbdict::trans_commit_next: op %u state %u", op_ptr.i,op_ptr.p->m_state); 
+  ndbout_c("Dbdict::trans_commit_next: op %u state %u", op_ptr.i,op_ptr.p->m_state);
 #endif
   op_ptr.p->m_state = SchemaOp::OS_COMMITTING;
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
@@ -26299,7 +26286,7 @@ Dbdict::trans_commit_next(Signal* signal
       Uint32 nodeId = rand() % MAX_NDB_NODES;
       while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
         nodeId = rand() % MAX_NDB_NODES;
-      
+
       infoEvent("Simulating node %u missing RT_COMMIT", nodeId);
       rg.m_nodes.clear(nodeId);
       signal->theData[0] = 9999;
@@ -26436,18 +26423,18 @@ Dbdict::check_partial_trans_complete_sta
     */
     for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
       NodeRecordPtr nodePtr;
-#ifdef VM_TRACE      
+#ifdef VM_TRACE
       ndbout_c("Node %u", i);
 #endif
       if (trans_ptr.p->m_nodes.get(i))
       {
         c_nodes.getPtr(nodePtr, i);
-#ifdef VM_TRACE      
+#ifdef VM_TRACE
         ndbout_c("Checking node %u(%u,%u)", nodePtr.i, nodePtr.p->recoveryState, nodePtr.p->takeOverConf.trans_state);
 #endif
         if (nodePtr.p->takeOverConf.trans_state >= SchemaTrans::TS_FLUSH_COMPLETE)
         {
-#ifdef VM_TRACE      
+#ifdef VM_TRACE
           ndbout_c("Skipping TS_FLUSH_COMPLETE of node %u", i);
 #endif
           nodes.clear(i);
@@ -26496,7 +26483,7 @@ Dbdict::trans_complete_start(Signal* sig
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_FLUSH_COMPLETE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26552,12 +26539,12 @@ Dbdict::trans_complete_first(Signal * si
 }
 
 void
-Dbdict::trans_complete_next(Signal* signal, 
+Dbdict::trans_complete_next(Signal* signal,
                             SchemaTransPtr trans_ptr, SchemaOpPtr op_ptr)
 {
   op_ptr.p->m_state = SchemaOp::OS_COMPLETING;
   trans_ptr.p->m_curr_op_ptr_i = op_ptr.i;
-  
+
   trans_ptr.p->m_nodes.bitAND(c_aliveNodes);
   NdbNodeBitmask nodes = trans_ptr.p->m_nodes;
   NodeReceiverGroup rg(DBDICT, nodes);
@@ -26577,7 +26564,7 @@ Dbdict::trans_complete_next(Signal* sign
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_COMPLETE", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26594,7 +26581,7 @@ Dbdict::trans_complete_next(Signal* sign
   req->requestInfo = SchemaTransImplReq::RT_COMPLETE;
   req->transId = trans_ptr.p->m_transId;
   sendSignal(rg, GSN_SCHEMA_TRANS_IMPL_REQ, signal,
-             SchemaTransImplReq::SignalLength, JBB);  
+             SchemaTransImplReq::SignalLength, JBB);
 }
 
 void
@@ -26616,7 +26603,7 @@ Dbdict::trans_complete_recv_reply(Signal
     LocalSchemaOp_list list(c_schemaOpPool, trans_ptr.p->m_op_list);
     next = list.next(op_ptr);
   }
-  
+
   if (next)
   {
     jam();
@@ -26652,7 +26639,7 @@ Dbdict::trans_end_start(Signal* signal,
     bool ok = sc.init<SchemaTransImplRef>(rg, trans_ptr.p->trans_key);
     ndbrequire(ok);
   }
-  
+
   if (ERROR_INSERTED(6150))
   {
     jam();
@@ -26663,7 +26650,7 @@ Dbdict::trans_end_start(Signal* signal,
     Uint32 nodeId = rand() % MAX_NDB_NODES;
     while(nodeId == c_masterNodeId || (!rg.m_nodes.get(nodeId)))
       nodeId = rand() % MAX_NDB_NODES;
-    
+
     infoEvent("Simulating node %u missing RT_END", nodeId);
     rg.m_nodes.clear(nodeId);
     signal->theData[0] = 9999;
@@ -26680,7 +26667,7 @@ Dbdict::trans_end_start(Signal* signal,
   req->requestInfo = SchemaTransImplReq::RT_END;
   req->transId = trans_ptr.p->m_transId;
   sendSignal(rg, GSN_SCHEMA_TRANS_IMPL_REQ, signal,
-             SchemaTransImplReq::SignalLength, JBB);  
+             SchemaTransImplReq::SignalLength, JBB);
 }
 
 void
@@ -26728,7 +26715,7 @@ void Dbdict::trans_recover(Signal* signa
 
   jam();
 #ifdef VM_TRACE
-  ndbout_c("Dbdict::trans_recover trans %u, state %u", trans_ptr.p->trans_key, trans_ptr.p->m_state); 
+  ndbout_c("Dbdict::trans_recover trans %u, state %u", trans_ptr.p->trans_key, trans_ptr.p->m_state);
 #endif
 
   switch(trans_ptr.p->m_state) {
@@ -26793,7 +26780,7 @@ void Dbdict::trans_recover(Signal* signa
   }
   case SchemaTrans::TS_FLUSH_COMMIT:
     flush_commit:
-    /* 
+    /*
        Flush commit any unflushed slaves
     */
     jam();
@@ -27004,7 +26991,7 @@ Dbdict::execSCHEMA_TRANS_IMPL_REQ(Signal
       ndbrequire(false); // handled above
     case SchemaTransImplReq::RT_PREPARE:
       jam();
-      op_ptr.p->m_state = SchemaOp::OS_PREPARING;      
+      op_ptr.p->m_state = SchemaOp::OS_PREPARING;
       (this->*(info.m_prepare))(signal, op_ptr);
       return;
     case SchemaTransImplReq::RT_ABORT_PARSE:
@@ -27342,7 +27329,7 @@ Dbdict::slave_commit_mutex_locked(Signal
   c_schemaTransPool.getPtr(trans_ptr, transPtrI);
 
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_COMMITTING);
-  sendTransConfRelease(signal, trans_ptr);  
+  sendTransConfRelease(signal, trans_ptr);
 }
 
 void
@@ -27360,7 +27347,7 @@ Dbdict::slave_commit_mutex_unlocked(Sign
   trans_ptr.p->m_commit_mutex.release(c_mutexMgr);
 
   ndbrequire(trans_ptr.p->m_state == SchemaTrans::TS_COMPLETING);
-  sendTransConfRelease(signal, trans_ptr);  
+  sendTransConfRelease(signal, trans_ptr);
 }
 
 void Dbdict::sendTransConfRelease(Signal*signal, SchemaTransPtr trans_ptr)
@@ -27409,7 +27396,7 @@ Dbdict::update_op_state(SchemaOpPtr op_p
     op_ptr.p->m_state = SchemaOp::OS_COMPLETED;
     break;
   case SchemaOp::OS_COMPLETED:
-    ndbrequire(false);    
+    ndbrequire(false);
   }
 }
 
@@ -27861,7 +27848,7 @@ Dbdict::endSchemaTrans(Signal* signal, T
 void
 Dbdict::execSCHEMA_TRANS_BEGIN_CONF(Signal* signal)
 {
-  jamEntry(); 
+  jamEntry();
   const SchemaTransBeginConf* conf =
     (const SchemaTransBeginConf*)signal->getDataPtr();
 
@@ -28259,10 +28246,10 @@ Dbdict::execCREATE_HASH_MAP_REQ(Signal*
   ErrorInfo error;
   do {
     SchemaOpPtr op_ptr;
-    CreateHashMapRecPtr createHashMapPtr;
+    CreateHashMapRecPtr createHashMapRecordPtr;
     CreateHashMapImplReq* impl_req;
 
-    startClientReq(op_ptr, createHashMapPtr, req, impl_req, error);
+    startClientReq(op_ptr, createHashMapRecordPtr, req, impl_req, error);
     if (hasError(error)) {
       jam();
       break;
@@ -28313,9 +28300,9 @@ Dbdict::createHashMap_parse(Signal* sign
 {
 
   SchemaTransPtr trans_ptr = op_ptr.p->m_trans_ptr;
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   jam();
 
@@ -28413,7 +28400,7 @@ Dbdict::createHashMap_parse(Signal* sign
   }
 
   Uint32 len = Uint32(strlen(hm.HashMapName) + 1);
-  Uint32 hash = Rope::hash(hm.HashMapName, len);
+  Uint32 hash = LocalRope::hash(hm.HashMapName, len);
 
   if (ERROR_INSERTED(6205))
   {
@@ -28454,7 +28441,7 @@ Dbdict::createHashMap_parse(Signal* sign
       return;
     }
 
-    HashMapPtr hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, objptr->m_id));
 
     impl_req->objectId = objptr->m_id;
@@ -28480,7 +28467,7 @@ Dbdict::createHashMap_parse(Signal* sign
 
   RopeHandle name;
   {
-    Rope tmp(c_rope_pool, name);
+    LocalRope tmp(c_rope_pool, name);
     if(!tmp.assign(hm.HashMapName, len, hash))
     {
       jam();
@@ -28494,7 +28481,7 @@ Dbdict::createHashMap_parse(Signal* sign
   Uint32 errCode = 0;
   Uint32 errLine = 0;
   DictObjectPtr obj_ptr; obj_ptr.setNull();
-  HashMapPtr hm_ptr; hm_ptr.setNull();
+  HashMapRecordPtr hm_ptr; hm_ptr.setNull();
   Ptr<Hash2FragmentMap> map_ptr; map_ptr.setNull();
 
   if (master)
@@ -28669,7 +28656,7 @@ error:
   else
   {
     jam();
-    Rope tmp(c_rope_pool, name);
+    LocalRope tmp(c_rope_pool, name);
     tmp.erase();
   }
 }
@@ -28679,9 +28666,9 @@ Dbdict::createHashMap_abortParse(Signal*
 {
   D("createHashMap_abortParse" << *op_ptr.p);
 
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   if (impl_req->requestType & CreateHashMapReq::CreateIfNotExists)
   {
@@ -28693,7 +28680,7 @@ Dbdict::createHashMap_abortParse(Signal*
   {
     jam();
 
-    Ptr<HashMapRecord> hm_ptr;
+    HashMapRecordPtr hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, impl_req->objectId));
 
     release_object(hm_ptr.p->m_obj_ptr_i);
@@ -28719,9 +28706,9 @@ Dbdict::createHashMap_reply(Signal* sign
   D("createHashMap_reply");
 
   SchemaTransPtr& trans_ptr = op_ptr.p->m_trans_ptr;
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  const CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  const CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   if (!hasError(error)) {
     CreateHashMapConf* conf = (CreateHashMapConf*)signal->getDataPtrSend();
@@ -28758,9 +28745,9 @@ Dbdict::createHashMap_prepare(Signal* si
   jam();
   D("createHashMap_prepare");
 
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
-  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapRecordPtr.p->m_request;
 
   if (impl_req->requestType & CreateHashMapReq::CreateIfNotExists)
   {
@@ -28781,8 +28768,8 @@ void
 Dbdict::createHashMap_writeObjConf(Signal* signal, Uint32 op_key, Uint32 ret)
 {
   SchemaOpPtr op_ptr;
-  CreateHashMapRecPtr createHashMapPtr;
-  findSchemaOp(op_ptr, createHashMapPtr, op_key);
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  findSchemaOp(op_ptr, createHashMapRecordPtr, op_key);
 
   ndbrequire(!op_ptr.isNull());
 
@@ -28797,8 +28784,8 @@ Dbdict::createHashMap_commit(Signal* sig
   jam();
   D("createHashMap_commit");
 
-  CreateHashMapRecPtr createHashMapPtr;
-  getOpRec(op_ptr, createHashMapPtr);
+  CreateHashMapRecPtr createHashMapRecordPtr;
+  getOpRec(op_ptr, createHashMapRecordPtr);
 
   sendTransConf(signal, op_ptr);
 }
@@ -28824,7 +28811,7 @@ Dbdict::createHashMap_abortPrepare(Signa
 
 void
 Dbdict::packHashMapIntoPages(SimpleProperties::Writer & w,
-                             Ptr<HashMapRecord> hm_ptr)
+                             HashMapRecordPtr hm_ptr)
 {
   DictHashMapInfo::HashMap hm; hm.init();
 
@@ -29132,7 +29119,7 @@ Dbdict::check_consistency_index(TableRec
     break;
   }
 
-  Ptr<TriggerRecord> triggerPtr;
+  TriggerRecordPtr triggerPtr;
   triggerPtr.i = indexPtr.p->triggerId;
   ndbrequire(triggerPtr.i != RNIL);
   c_triggerRecordPool.getPtr(triggerPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-10-07 13:15:08 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-11-03 08:40:19 +0000
@@ -217,9 +217,9 @@ public:
     Uint32 prevList;
     Uint32 nextHash;
     Uint32 prevHash;
- 
+
     Uint32 hashValue() const { return attributeName.hashValue();}
-    bool equal(const AttributeRecord& obj) const { 
+    bool equal(const AttributeRecord& obj) const {
       if(obj.hashValue() == hashValue()){
 	ConstRope r(* m_key.m_pool, obj.attributeName);
 	return r.compare(m_key.m_name_ptr, m_key.m_name_len) == 0;
@@ -228,14 +228,25 @@ public:
     }
   };
   typedef Ptr<AttributeRecord> AttributeRecordPtr;
-  ArrayPool<AttributeRecord> c_attributeRecordPool;
-  DLHashTable<AttributeRecord> c_attributeRecordHash;
+  typedef ArrayPool<AttributeRecord> AttributeRecord_pool;
+  typedef DLHashTable<AttributeRecord,AttributeRecord,AttributeRecord_pool> AttributeRecord_hash;
+  typedef DLFifoList<AttributeRecord,AttributeRecord,AttributeRecord_pool> AttributeRecord_list;
+  typedef LocalDLFifoList<AttributeRecord,AttributeRecord,AttributeRecord_pool> LocalAttributeRecord_list;
+
+  AttributeRecord_pool c_attributeRecordPool;
+  AttributeRecord_hash c_attributeRecordHash;
   RSS_AP_SNAPSHOT(c_attributeRecordPool);
 
   /**
    * Shared table / index record.  Most of this is permanent data stored
    * on disk.  Index trigger ids are volatile.
    */
+  struct TableRecord;
+  typedef Ptr<TableRecord> TableRecordPtr;
+  typedef ArrayPool<TableRecord> TableRecord_pool;
+  typedef DLFifoList<TableRecord,TableRecord,TableRecord_pool> TableRecord_list;
+  typedef LocalDLFifoList<TableRecord,TableRecord,TableRecord_pool> LocalTableRecord_list;
+
   struct TableRecord {
     TableRecord(){ m_upgrade_trigger_handling.m_upgrade = false;}
     Uint32 maxRowsLow;
@@ -344,7 +355,7 @@ public:
     bool isNonUniqueIndex() const;
     bool isHashIndex() const;
     bool isOrderedIndex() const;
-    
+
     /****************************************************
      *    Support variables for table handling
      ****************************************************/
@@ -353,7 +364,7 @@ public:
     Uint32 filePtr[2];
 
     /**    Pointer to first attribute in table */
-    DLFifoList<AttributeRecord>::Head m_attributes;
+    AttributeRecord_list::Head m_attributes;
 
     Uint32 nextPool;
 
@@ -390,9 +401,9 @@ public:
       Uint32 updateTriggerId;
       Uint32 deleteTriggerId;
     } m_upgrade_trigger_handling;
-    
+
     Uint32 noOfNullBits;
-    
+
     /**  frm data for this table */
     RopeHandle frmData;
     RopeHandle ngData;
@@ -402,7 +413,7 @@ public:
     Uint32 m_tablespace_id;
 
     /** List of indexes attached to table */
-    DLFifoList<TableRecord>::Head m_indexes;
+    TableRecord_list::Head m_indexes;
     Uint32 nextList, prevList;
 
     /*
@@ -423,8 +434,7 @@ public:
     Uint32 indexStatBgRequest;
   };
 
-  typedef Ptr<TableRecord> TableRecordPtr;
-  ArrayPool<TableRecord> c_tableRecordPool;
+  TableRecord_pool c_tableRecordPool;
   RSS_AP_SNAPSHOT(c_tableRecordPool);
 
   /**  Node Group and Tablespace id+version + range or list data.
@@ -447,7 +457,7 @@ public:
     TriggerRecord() {}
 
     /** Trigger state */
-    enum TriggerState { 
+    enum TriggerState {
       TS_NOT_DEFINED = 0,
       TS_DEFINING = 1,
       TS_OFFLINE  = 2,   // created globally in DICT
@@ -458,7 +468,7 @@ public:
     };
     TriggerState triggerState;
 
-    /** Trigger name, used by DICT to identify the trigger */ 
+    /** Trigger name, used by DICT to identify the trigger */
     RopeHandle triggerName;
 
     /** Trigger id, used by TRIX, TC, LQH, and TUP to identify the trigger */
@@ -486,10 +496,12 @@ public:
     /** Pointer to the next attribute used by ArrayPool */
     Uint32 nextPool;
   };
-  
-  Uint32 c_maxNoOfTriggers;
+
   typedef Ptr<TriggerRecord> TriggerRecordPtr;
-  ArrayPool<TriggerRecord> c_triggerRecordPool;
+  typedef ArrayPool<TriggerRecord> TriggerRecord_pool;
+
+  Uint32 c_maxNoOfTriggers;
+  TriggerRecord_pool c_triggerRecordPool;
   RSS_AP_SNAPSHOT(c_triggerRecordPool);
 
   /**
@@ -527,9 +539,10 @@ public:
     /** Used by Array Pool for free list handling */
     Uint32 nextPool;
   };
-  
+
   typedef Ptr<FsConnectRecord> FsConnectRecordPtr;
-  ArrayPool<FsConnectRecord> c_fsConnectRecordPool;
+  typedef ArrayPool<FsConnectRecord> FsConnectRecord_pool;
+  FsConnectRecord_pool c_fsConnectRecordPool;
 
   /**
    * This record stores all the information about a node and all its attributes
@@ -567,11 +580,11 @@ public:
   typedef Ptr<NodeRecord> NodeRecordPtr;
   CArray<NodeRecord> c_nodes;
   NdbNodeBitmask c_aliveNodes;
-  
+
   struct PageRecord {
     Uint32 word[8192];
   };
-  
+
   typedef Ptr<PageRecord> PageRecordPtr;
   CArray<PageRecord> c_pageRecordArray;
 
@@ -591,7 +604,7 @@ public:
 
   struct File {
     File() {}
-    
+
     Uint32 key;
     Uint32 m_magic;
     Uint32 m_version;
@@ -601,7 +614,7 @@ public:
     Uint64 m_file_size;
     Uint64 m_file_free;
     RopeHandle m_path;
-    
+
     Uint32 nextList;
     union {
       Uint32 prevList;
@@ -617,14 +630,14 @@ public:
   typedef DLListImpl<File_pool, File> File_list;
   typedef LocalDLListImpl<File_pool, File> Local_file_list;
   typedef KeyTableImpl<File_pool, File> File_hash;
-  
+
   struct Filegroup {
     Filegroup(){}
 
     Uint32 key;
     Uint32 m_obj_ptr_i;
     Uint32 m_magic;
-    
+
     Uint32 m_type;
     Uint32 m_version;
     RopeHandle m_name;
@@ -634,13 +647,13 @@ public:
 	Uint32 m_extent_size;
 	Uint32 m_default_logfile_group_id;
       } m_tablespace;
-      
+
       struct {
 	Uint32 m_undo_buffer_size;
 	File_list::HeadPOD m_files;
       } m_logfilegroup;
     };
-    
+
     union {
       Uint32 nextPool;
       Uint32 nextList;
@@ -654,12 +667,12 @@ public:
   typedef Ptr<Filegroup> FilegroupPtr;
   typedef RecordPool<Filegroup, RWPool> Filegroup_pool;
   typedef KeyTableImpl<Filegroup_pool, Filegroup> Filegroup_hash;
-  
+
   File_pool c_file_pool;
   Filegroup_pool c_filegroup_pool;
   File_hash c_file_hash;
   Filegroup_hash c_filegroup_hash;
-  
+
   RopePool c_rope_pool;
   RSS_AP_SNAPSHOT(c_rope_pool);
 
@@ -671,7 +684,7 @@ public:
     Uint32 m_id;
     Uint32 m_type;
     Uint32 m_ref_count;
-    RopeHandle m_name;  
+    RopeHandle m_name;
     union {
       struct {
 	Uint32 m_name_len;
@@ -683,9 +696,9 @@ public:
     };
     Uint32 nextHash;
     Uint32 prevHash;
-    
+
     Uint32 hashValue() const { return m_name.hashValue();}
-    bool equal(const DictObject& obj) const { 
+    bool equal(const DictObject& obj) const {
       if(obj.hashValue() == hashValue()){
 	ConstRope r(* m_key.m_pool, obj.m_name);
 	return r.compare(m_key.m_name_ptr, m_key.m_name_len) == 0;
@@ -702,20 +715,23 @@ public:
   };
 
   typedef Ptr<DictObject> DictObjectPtr;
-  
-  DLHashTable<DictObject> c_obj_hash; // Name
-  ArrayPool<DictObject> c_obj_pool;
+  typedef ArrayPool<DictObject> DictObject_pool;
+  typedef DLHashTable<DictObject,DictObject,DictObject_pool> DictObject_hash;
+  typedef SLList<DictObject> DictObject_list;
+
+  DictObject_hash c_obj_hash; // Name
+  DictObject_pool c_obj_pool;
   RSS_AP_SNAPSHOT(c_obj_pool);
-  
+
   // 1
   DictObject * get_object(const char * name){
     return get_object(name, Uint32(strlen(name) + 1));
   }
-  
+
   DictObject * get_object(const char * name, Uint32 len){
-    return get_object(name, len, Rope::hash(name, len));
+    return get_object(name, len, LocalRope::hash(name, len));
   }
-  
+
   DictObject * get_object(const char * name, Uint32 len, Uint32 hash);
 
   //2
@@ -724,7 +740,7 @@ public:
   }
 
   bool get_object(DictObjectPtr& obj_ptr, const char * name, Uint32 len){
-    return get_object(obj_ptr, name, len, Rope::hash(name, len));
+    return get_object(obj_ptr, name, len, LocalRope::hash(name, len));
   }
 
   bool get_object(DictObjectPtr&, const char* name, Uint32 len, Uint32 hash);
@@ -732,7 +748,7 @@ public:
   void release_object(Uint32 obj_ptr_i){
     release_object(obj_ptr_i, c_obj_pool.getPtr(obj_ptr_i));
   }
-  
+
   void release_object(Uint32 obj_ptr_i, DictObject* obj_ptr_p);
 
   void increase_ref_count(Uint32 obj_ptr_i);
@@ -747,7 +763,7 @@ private:
 
   // Signal receivers
   void execDICTSTARTREQ(Signal* signal);
-  
+
   void execGET_TABINFOREQ(Signal* signal);
   void execGET_TABLEDID_REQ(Signal* signal);
   void execGET_TABINFO_REF(Signal* signal);
@@ -869,12 +885,12 @@ private:
   void execDROP_TRIG_IMPL_REF(Signal* signal);
 
   void execDROP_TABLE_REQ(Signal* signal);
-  
+
   void execPREP_DROP_TAB_REQ(Signal* signal);
-  void execPREP_DROP_TAB_REF(Signal* signal);  
+  void execPREP_DROP_TAB_REF(Signal* signal);
   void execPREP_DROP_TAB_CONF(Signal* signal);
 
-  void execDROP_TAB_REF(Signal* signal);  
+  void execDROP_TAB_REF(Signal* signal);
   void execDROP_TAB_CONF(Signal* signal);
 
   void execCREATE_TABLE_REQ(Signal* signal);
@@ -894,7 +910,7 @@ private:
   void execLQHADDATTREF(Signal* signal);
   void execLQHADDATTCONF(Signal* signal);
   void execCREATE_TAB_REF(Signal* signal);
-  void execCREATE_TAB_CONF(Signal* signal);  
+  void execCREATE_TAB_CONF(Signal* signal);
   void execALTER_TAB_REF(Signal* signal);
   void execALTER_TAB_CONF(Signal* signal);
   void execALTER_TABLE_REF(Signal* signal);
@@ -949,7 +965,7 @@ private:
    */
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when the schema page is being sent to other nodes
    ***************************************************************************/
   struct SendSchemaRecord {
@@ -960,7 +976,7 @@ private:
 
     Uint32 nodeId;
     SignalCounter m_SCHEMAINFO_Counter;
-    
+
     Uint32 noOfWordsCurrentlySent;
     Uint32 noOfSignalsSentSinceDelay;
 
@@ -969,7 +985,7 @@ private:
   SendSchemaRecord c_sendSchemaRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a table file is being read from disk
    ****************************************************************************/
   struct ReadTableRecord {
@@ -979,14 +995,14 @@ private:
     Uint32 pageId;
     /** Table Id of read table */
     Uint32 tableId;
-    
+
     bool inUse;
     Callback m_callback;
   };
   ReadTableRecord c_readTableRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a table file is being written to disk
    ****************************************************************************/
   struct WriteTableRecord {
@@ -1013,7 +1029,7 @@ private:
   WriteTableRecord c_writeTableRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a schema file is being read from disk
    ****************************************************************************/
   struct ReadSchemaRecord {
@@ -1034,7 +1050,7 @@ private:
   ReadSchemaRecord c_readSchemaRecord;
 
   /**
-   * This record stores all the state needed 
+   * This record stores all the state needed
    * when a schema file is being written to disk
    ****************************************************************************/
   struct WriteSchemaRecord {
@@ -1055,7 +1071,7 @@ private:
   WriteSchemaRecord c_writeSchemaRecord;
 
   /**
-   * This record stores all the information needed 
+   * This record stores all the information needed
    * when a file is being read from disk
    ****************************************************************************/
   struct RestartRecord {
@@ -1081,20 +1097,20 @@ private:
   RestartRecord c_restartRecord;
 
   /**
-   * This record stores all the information needed 
+   * This record stores all the information needed
    * when a file is being read from disk
    ****************************************************************************/
   struct RetrieveRecord {
     RetrieveRecord(){ noOfWaiters = 0;}
-    
+
     /**    Only one retrieve table definition at a time       */
     bool busyState;
-    
+
     /**
      * No of waiting in time queue
      */
     Uint32 noOfWaiters;
-    
+
     /**    Block Reference of retriever       */
     BlockReference blockRef;
 
@@ -1129,9 +1145,9 @@ private:
   RetrieveRecord c_retrieveRecord;
 
   /**
-   * This record stores all the information needed 
+   * This record stores all the information needed
    * when a file is being read from disk
-   * 
+   *
    * This is the info stored in one entry of the schema
    * page. Each table has 4 words of info.
    * Word 1: Schema version (upper 16 bits)
@@ -1152,7 +1168,7 @@ private:
 
     /**    Old Schema file first page (used at node restart)    */
     Uint32 oldSchemaPage;
-    
+
     Callback m_callback;
   };
   SchemaRecord c_schemaRecord;
@@ -1198,7 +1214,7 @@ private:
   /* ----------------------------------------------------------------------- */
 
   struct PackTable {
-    
+
     enum PackTableState {
       PTS_IDLE = 0,
       PTS_GET_TAB = 3
@@ -1222,7 +1238,7 @@ private:
     DictTabInfo::RequestType requestType;
     Uint32 errorCode;
     Uint32 errorLine;
-    
+
     SimpleProperties::UnpackStatus status;
     Uint32 errorKey;
     TableRecordPtr tablePtr;
@@ -1235,7 +1251,7 @@ private:
   copyRope(RopeHandle& rh_dst, const RopeHandle& rh_src)
   {
     char buf[sz];
-    Rope r_dst(c_rope_pool, rh_dst);
+    LocalRope r_dst(c_rope_pool, rh_dst);
     ConstRope r_src(c_rope_pool, rh_src);
     ndbrequire(r_src.size() <= sz);
     r_src.copy(buf);
@@ -1256,7 +1272,7 @@ private:
     return str;
   }
 #endif
- 
+
   // Operation records
 
   /**
@@ -1318,7 +1334,7 @@ private:
                 Uint32 key = 0,
                 const char * name = 0);
 
-  void setError(ErrorInfo&, 
+  void setError(ErrorInfo&,
                 Uint32 code,
                 Uint32 line,
                 const char * name);
@@ -1504,7 +1520,7 @@ private:
       case OS_ABORTING_PARSE:
         return 4;
       //case OS_ABORTED_PARSE    = 9,  // Not used, op released
-        //return 3: 
+        //return 3:
       case OS_COMMITTING:
         return 10;
       case OS_COMMITTED:
@@ -1596,9 +1612,9 @@ private:
   };
 
   typedef RecordPool<SchemaOp,ArenaPool> SchemaOp_pool;
-  typedef LocalDLFifoList<SchemaOp,SchemaOp,SchemaOp_pool> LocalSchemaOp_list;
   typedef DLHashTable<SchemaOp,SchemaOp,SchemaOp_pool> SchemaOp_hash;
   typedef DLFifoList<SchemaOp,SchemaOp,SchemaOp_pool>::Head  SchemaOp_head;
+  typedef LocalDLFifoList<SchemaOp,SchemaOp,SchemaOp_pool> LocalSchemaOp_list;
 
   SchemaOp_pool c_schemaOpPool;
   SchemaOp_hash c_schemaOpHash;
@@ -1681,7 +1697,7 @@ private:
     /*
       Store node id in high 8 bits to make op_key globally unique
      */
-    Uint32 op_key = 
+    Uint32 op_key =
       (getOwnNodeId() << 24) +
       ((c_opRecordSequence + 1) & 0x00FFFFFF);
     if (seizeSchemaOp<T>(trans_ptr, op_ptr, op_key, linked)) {
@@ -1959,9 +1975,12 @@ private:
   Uint32 check_write_obj(Uint32, Uint32, SchemaFile::EntryState, ErrorInfo&);
 
   typedef RecordPool<SchemaTrans,ArenaPool> SchemaTrans_pool;
+  typedef DLHashTable<SchemaTrans,SchemaTrans,SchemaTrans_pool> SchemaTrans_hash;
+  typedef DLFifoList<SchemaTrans,SchemaTrans,SchemaTrans_pool> SchemaTrans_list;
+
   SchemaTrans_pool c_schemaTransPool;
-  DLHashTable<SchemaTrans,SchemaTrans,SchemaTrans_pool> c_schemaTransHash;
-  DLFifoList<SchemaTrans,SchemaTrans,SchemaTrans_pool> c_schemaTransList;
+  SchemaTrans_hash c_schemaTransHash;
+  SchemaTrans_list c_schemaTransList;
   Uint32 c_schemaTransCount;
 
   bool seizeSchemaTrans(SchemaTransPtr&, Uint32 trans_key);
@@ -2226,8 +2245,11 @@ private:
 #endif
   };
 
-  ArrayPool<TxHandle> c_txHandlePool;
-  DLHashTable<TxHandle> c_txHandleHash;
+  typedef ArrayPool<TxHandle> TxHandle_pool;
+  typedef DLHashTable<TxHandle,TxHandle,TxHandle_pool> TxHandle_hash;
+
+  TxHandle_pool c_txHandlePool;
+  TxHandle_hash c_txHandleHash;
 
   bool seizeTxHandle(TxHandlePtr&);
   bool findTxHandle(TxHandlePtr&, Uint32 tx_key);
@@ -2932,12 +2954,12 @@ private:
     bool equal(const HashMapRecord& obj) const { return key == obj.key;}
 
   };
-  typedef Ptr<HashMapRecord> HashMapPtr;
-  typedef ArrayPool<HashMapRecord> HashMap_pool;
-  typedef KeyTableImpl<HashMap_pool, HashMapRecord> HashMap_hash;
+  typedef Ptr<HashMapRecord> HashMapRecordPtr;
+  typedef ArrayPool<HashMapRecord> HashMapRecord_pool;
+  typedef KeyTableImpl<HashMapRecord_pool, HashMapRecord> HashMapRecord_hash;
 
-  HashMap_pool c_hash_map_pool;
-  HashMap_hash c_hash_map_hash;
+  HashMapRecord_pool c_hash_map_pool;
+  HashMapRecord_hash c_hash_map_hash;
   RSS_AP_SNAPSHOT(c_hash_map_pool);
   RSS_AP_SNAPSHOT(g_hash_map);
 
@@ -3612,18 +3634,23 @@ private:
     Uint32 u_opSignalUtil   [PTR_ALIGN(opSignalUtilSize)];
     Uint32 nextPool;
   };
-  ArrayPool<OpRecordUnion> c_opRecordPool;
-  
+  typedef ArrayPool<OpRecordUnion> OpRecordUnion_pool;
+  OpRecordUnion_pool c_opRecordPool;
+
   // Operation records
-  KeyTable2C<OpCreateEvent, OpRecordUnion> c_opCreateEvent;
-  KeyTable2C<OpSubEvent, OpRecordUnion> c_opSubEvent;
-  KeyTable2C<OpDropEvent, OpRecordUnion> c_opDropEvent;
-  KeyTable2C<OpSignalUtil, OpRecordUnion> c_opSignalUtil;
+  typedef KeyTable2C<OpCreateEvent, OpRecordUnion> OpCreateEvent_pool;
+  typedef KeyTable2C<OpSubEvent, OpRecordUnion> OpSubEvent_pool;
+  typedef KeyTable2C<OpDropEvent, OpRecordUnion> OpDropEvent_pool;
+  typedef KeyTable2C<OpSignalUtil, OpRecordUnion> OpSignalUtil_pool;
+  OpCreateEvent_pool c_opCreateEvent;
+  OpSubEvent_pool c_opSubEvent;
+  OpDropEvent_pool c_opDropEvent;
+  OpSignalUtil_pool c_opSignalUtil;
 
   // Unique key for operation  XXX move to some system table
   Uint32 c_opRecordSequence;
 
-  void handleNdbdFailureCallback(Signal* signal, 
+  void handleNdbdFailureCallback(Signal* signal,
                                  Uint32 failedNodeId,
                                  Uint32 ignoredRc);
   void handleApiFailureCallback(Signal* signal,
@@ -3637,17 +3664,17 @@ private:
   void sendSTTORRY(Signal* signal);
   void sendNDB_STTORRY(Signal* signal);
   void initSchemaFile(Signal* signal);
-  
+
   /* ------------------------------------------------------------ */
   // Drop Table Handling
   /* ------------------------------------------------------------ */
   void releaseTableObject(Uint32 tableId, bool removeFromHash = true);
-  
+
   /* ------------------------------------------------------------ */
   // General Stuff
   /* ------------------------------------------------------------ */
   Uint32 getFreeObjId(Uint32 minId, bool both = false);
-  Uint32 getFreeTableRecord(Uint32 primaryTableId);
+  Uint32 getFreeTableRecord();
   Uint32 getFreeTriggerRecord();
   bool getNewAttributeRecord(TableRecordPtr tablePtr,
 			     AttributeRecordPtr & attrPtr);
@@ -3658,13 +3685,13 @@ private:
 			      const Uint32 undo_free_hi,
 			      const Uint32 undo_free_lo);
   void packFileIntoPages(SimpleProperties::Writer &, FilePtr, const Uint32);
-  
+
   void sendGET_TABINFOREQ(Signal* signal,
                           Uint32 tableId);
   void sendTC_SCHVERREQ(Signal* signal,
                         Uint32 tableId,
                         BlockReference tcRef);
-  
+
   /* ------------------------------------------------------------ */
   // System Restart Handling
   /* ------------------------------------------------------------ */
@@ -3672,7 +3699,7 @@ private:
   void sendSchemaData(Signal* signal);
   Uint32 sendSCHEMA_INFO(Signal* signal, Uint32 nodeId, Uint32* pagePointer);
   void sendDIHSTARTTAB_REQ(Signal* signal);
-  
+
   /* ------------------------------------------------------------ */
   // Receive Table Handling
   /* ------------------------------------------------------------ */
@@ -3682,21 +3709,21 @@ private:
 			 bool checkExist = true);
   void handleTabInfo(SimpleProperties::Reader & it, ParseDictTabInfoRecord *,
 		     DictTabInfo::Table & tableDesc);
-  
+
   void handleAddTableFailure(Signal* signal,
                              Uint32 failureLine,
                              Uint32 tableId);
   bool verifyTableCorrect(Signal* signal, Uint32 tableId);
-  
+
   /* ------------------------------------------------------------ */
   // Add Fragment Handling
   /* ------------------------------------------------------------ */
   void sendLQHADDATTRREQ(Signal*, SchemaOpPtr, Uint32 attributePtrI);
-  
+
   /* ------------------------------------------------------------ */
   // Read/Write Schema and Table files
   /* ------------------------------------------------------------ */
-  void updateSchemaState(Signal* signal, Uint32 tableId, 
+  void updateSchemaState(Signal* signal, Uint32 tableId,
 			 SchemaFile::TableEntry*, Callback*,
                          bool savetodisk = 1, bool dicttrans = 0);
   void startWriteSchemaFile(Signal* signal);
@@ -3712,13 +3739,13 @@ private:
   void closeWriteSchemaConf(Signal* signal,
                                FsConnectRecordPtr fsPtr);
   void initSchemaFile_conf(Signal* signal, Uint32 i, Uint32 returnCode);
-  
-  void writeTableFile(Signal* signal, Uint32 tableId, 
+
+  void writeTableFile(Signal* signal, Uint32 tableId,
 		      SegmentedSectionPtr tabInfo, Callback*);
   void writeTableFile(Signal* signal, SchemaOpPtr op_ptr, Uint32 tableId,
 		      OpSection opSection, Callback*);
   void startWriteTableFile(Signal* signal, Uint32 tableId);
-  void openTableFile(Signal* signal, 
+  void openTableFile(Signal* signal,
                      Uint32 fileNo,
                      Uint32 fsPtr,
                      Uint32 tableId,
@@ -3754,12 +3781,12 @@ private:
   /* ------------------------------------------------------------ */
   // Get table definitions
   /* ------------------------------------------------------------ */
-  void sendGET_TABINFOREF(Signal* signal, 
+  void sendGET_TABINFOREF(Signal* signal,
 			  GetTabInfoReq*,
 			  GetTabInfoRef::ErrorCode errorCode,
                           Uint32 errorLine);
 
-  void sendGET_TABLEID_REF(Signal* signal, 
+  void sendGET_TABLEID_REF(Signal* signal,
 			   GetTableIdReq * req,
 			   GetTableIdRef::ErrorCode errorCode);
 
@@ -3780,19 +3807,19 @@ private:
   void rebuildIndex_fromEndTrans(Signal*, Uint32 tx_key, Uint32 ret);
 
   // Events
-  void 
+  void
   createEventUTIL_PREPARE(Signal* signal,
 			  Uint32 callbackData,
 			  Uint32 returnCode);
-  void 
-  createEventUTIL_EXECUTE(Signal *signal, 
+  void
+  createEventUTIL_EXECUTE(Signal *signal,
 			  Uint32 callbackData,
 			  Uint32 returnCode);
-  void 
+  void
   dropEventUTIL_PREPARE_READ(Signal* signal,
 			     Uint32 callbackData,
 			     Uint32 returnCode);
-  void 
+  void
   dropEventUTIL_EXECUTE_READ(Signal* signal,
 			     Uint32 callbackData,
 			     Uint32 returnCode);
@@ -3800,8 +3827,8 @@ private:
   dropEventUTIL_PREPARE_DELETE(Signal* signal,
 			       Uint32 callbackData,
 			       Uint32 returnCode);
-  void 
-  dropEventUTIL_EXECUTE_DELETE(Signal *signal, 
+  void
+  dropEventUTIL_EXECUTE_DELETE(Signal *signal,
 			       Uint32 callbackData,
 			       Uint32 returnCode);
   void
@@ -3814,10 +3841,10 @@ private:
 			  Uint32 returnCode);
   int
   sendSignalUtilReq(Callback *c,
-		    BlockReference ref, 
-		    GlobalSignalNumber gsn, 
-		    Signal* signal, 
-		    Uint32 length, 
+		    BlockReference ref,
+		    GlobalSignalNumber gsn,
+		    Signal* signal,
+		    Uint32 length,
 		    JobBufferLevel jbuf,
 		    LinearSectionPtr ptr[3],
 		    Uint32 noOfSections);
@@ -3827,7 +3854,7 @@ private:
   void completeSubStartReq(Signal* signal, Uint32 ptrI,	Uint32 returnCode);
   void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
   void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
-  
+
   void dropEvent_sendReply(Signal* signal,
 			   OpDropEventPtr evntRecPtr);
 
@@ -3867,7 +3894,7 @@ private:
 				 const Uint32 prepareId,
 				 UtilPrepareReq::OperationTypeValue prepReq);
   void executeTransaction(Callback *c,
-			  Signal* signal, 
+			  Signal* signal,
 			  Uint32 senderData,
 			  Uint32 prepareId,
 			  Uint32 noAttr,
@@ -3878,7 +3905,7 @@ private:
   bool upgrade_suma_NotStarted(Uint32 err, Uint32 ref) const;
 
   // support
-  void getTableKeyList(TableRecordPtr, 
+  void getTableKeyList(TableRecordPtr,
 		       Id_array<MAX_ATTRIBUTES_IN_INDEX+1>& list);
   void getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id);
   void getIndexAttrList(TableRecordPtr indexPtr, IndexAttributeList& list);
@@ -3938,7 +3965,7 @@ private:
 public:
   void send_drop_file(Signal*, Uint32, Uint32, DropFileImplReq::RequestInfo);
   void send_drop_fg(Signal*, Uint32, Uint32, DropFilegroupImplReq::RequestInfo);
-  
+
   int checkSingleUserMode(Uint32 senderRef);
 
   friend NdbOut& operator<<(NdbOut& out, const ErrorInfo&);
@@ -3969,7 +3996,7 @@ public:
    */
   struct DictLockType;
   friend struct DictLockType;
-  
+
   struct DictLockType {
     DictLockReq::LockType lockType;
     const char* text;
@@ -3981,7 +4008,7 @@ public:
 
   Uint32 dict_lock_trylock(const DictLockReq* req);
   Uint32 dict_lock_unlock(Signal* signal, const DictLockReq* req);
-  
+
   LockQueue::Pool m_dict_lock_pool;
   LockQueue m_dict_lock;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-23 08:34:49 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-31 09:49:29 +0000
@@ -2705,6 +2705,23 @@ Dbspj::lookup_build(Build_context& ctx,
   const QN_LookupParameters * param = (const QN_LookupParameters*)qp;
   do
   {
+    err = DbspjErr::InvalidTreeNodeSpecification;
+    if (unlikely(node->len < QN_LookupNode::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
+    err = DbspjErr::InvalidTreeParametersSpecification;
+    DEBUG("param len: " << param->len);
+    if (unlikely(param->len < QN_LookupParameters::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
     err = createNode(ctx, requestPtr, treeNodePtr);
     if (unlikely(err != 0))
     {
@@ -2756,13 +2773,6 @@ Dbspj::lookup_build(Build_context& ctx,
       dst->requestInfo = requestInfo;
     }
 
-    err = DbspjErr::InvalidTreeNodeSpecification;
-    if (unlikely(node->len < QN_LookupNode::NodeSize))
-    {
-      DEBUG_CRASH();
-      break;
-    }
-
     if (treeBits & QN_LookupNode::L_UNIQUE_INDEX)
     {
       jam();
@@ -2775,14 +2785,6 @@ Dbspj::lookup_build(Build_context& ctx,
     Uint32 tableSchemaVersion = tableId + ((schemaVersion << 16) & 0xFFFF0000);
     dst->tableSchemaVersion = tableSchemaVersion;
 
-    err = DbspjErr::InvalidTreeParametersSpecification;
-    DEBUG("param len: " << param->len);
-    if (unlikely(param->len < QN_LookupParameters::NodeSize))
-    {
-      DEBUG_CRASH();
-      break;
-    }
-
     ctx.m_resultData = param->resultData;
     treeNodePtr.p->m_lookup_data.m_api_resultRef = ctx.m_resultRef;
     treeNodePtr.p->m_lookup_data.m_api_resultData = param->resultData;
@@ -3765,6 +3767,24 @@ Dbspj::scanFrag_build(Build_context& ctx
 
   do
   {
+    err = DbspjErr::InvalidTreeNodeSpecification;
+    DEBUG("scanFrag_build: len=" << node->len);
+    if (unlikely(node->len < QN_ScanFragNode::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
+    err = DbspjErr::InvalidTreeParametersSpecification;
+    DEBUG("param len: " << param->len);
+    if (unlikely(param->len < QN_ScanFragParameters::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
     err = createNode(ctx, requestPtr, treeNodePtr);
     if (unlikely(err != 0))
       break;
@@ -3810,24 +3830,9 @@ Dbspj::scanFrag_build(Build_context& ctx
                                (treeBits & DABits::NI_LINKED_DISK) == 0 &&
                                (paramBits & DABits::PI_DISK_ATTR) == 0);
     dst->requestInfo = requestInfo;
-
-    err = DbspjErr::InvalidTreeNodeSpecification;
-    DEBUG("scanFrag_build: len=" << node->len);
-    if (unlikely(node->len < QN_ScanFragNode::NodeSize))
-      break;
-
     dst->tableId = node->tableId;
     dst->schemaVersion = node->tableVersion;
 
-    err = DbspjErr::InvalidTreeParametersSpecification;
-    DEBUG("param len: " << param->len);
-    if (unlikely(param->len < QN_ScanFragParameters::NodeSize))
-    {
-      jam();
-      DEBUG_CRASH();
-      break;
-    }
-
     ctx.m_resultData = param->resultData;
 
     /**
@@ -4315,6 +4320,24 @@ Dbspj::scanIndex_build(Build_context& ct
 
   do
   {
+    err = DbspjErr::InvalidTreeNodeSpecification;
+    DEBUG("scanIndex_build: len=" << node->len);
+    if (unlikely(node->len < QN_ScanIndexNode::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
+    err = DbspjErr::InvalidTreeParametersSpecification;
+    DEBUG("param len: " << param->len);
+    if (unlikely(param->len < QN_ScanIndexParameters::NodeSize))
+    {
+      jam();
+      DEBUG_CRASH();
+      break;
+    }
+
     err = createNode(ctx, requestPtr, treeNodePtr);
     if (unlikely(err != 0))
       break;
@@ -4355,24 +4378,9 @@ Dbspj::scanIndex_build(Build_context& ct
                                (paramBits & DABits::PI_DISK_ATTR) == 0);
     ScanFragReq::setCorrFactorFlag(requestInfo, 1);
     dst->requestInfo = requestInfo;
-
-    err = DbspjErr::InvalidTreeNodeSpecification;
-    DEBUG("scanIndex_build: len=" << node->len);
-    if (unlikely(node->len < QN_ScanIndexNode::NodeSize))
-      break;
-
     dst->tableId = node->tableId;
     dst->schemaVersion = node->tableVersion;
 
-    err = DbspjErr::InvalidTreeParametersSpecification;
-    DEBUG("param len: " << param->len);
-    if (unlikely(param->len < QN_ScanIndexParameters::NodeSize))
-    {
-      jam();
-      DEBUG_CRASH();
-      break;
-    }
-
     ctx.m_resultData = param->resultData;
 
     /**
@@ -5735,7 +5743,7 @@ Dbspj::scanIndex_execSCAN_NEXTREQ(Signal
 
       DEBUG("scanIndex_execSCAN_NEXTREQ to: " << hex
             << treeNodePtr.p->m_send.m_ref
-              << ", m_node_no=" << treeNodePtr.p->m_node_no
+            << ", m_node_no=" << treeNodePtr.p->m_node_no
             << ", senderData: " << req->senderData);
 
 #ifdef DEBUG_SCAN_FRAGREQ

=== modified file 'storage/ndb/src/kernel/vm/Rope.cpp'
--- a/storage/ndb/src/kernel/vm/Rope.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/Rope.cpp	2011-11-02 14:06:43 +0000
@@ -58,7 +58,7 @@ ConstRope::compare(const char * str, Uin
     int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize());
     if(res != 0){
       if(DEBUG_ROPE)
-	ndbout_c("ConstRope::compare(%s, %d, %s) -> %d", str, left, 
+	ndbout_c("ConstRope::compare(%s, %d, %s) -> %d", str, left,
 		 (const char*)curr.p->data, res);
       return res;
     }
@@ -83,10 +83,10 @@ ConstRope::compare(const char * str, Uin
 }
 
 void
-Rope::copy(char* buf) const {
+LocalRope::copy(char* buf) const {
   char * ptr = buf;
   if(DEBUG_ROPE)
-    ndbout_c("Rope::copy() head = [ %d 0x%x 0x%x ]",
+    ndbout_c("LocalRope::copy() head = [ %d 0x%x 0x%x ]",
 	     head.used, head.firstItem, head.lastItem);
   Uint32 left = head.used;
   Ptr<Segment> curr;
@@ -103,13 +103,13 @@ Rope::copy(char* buf) const {
     memcpy(buf, curr.p->data, left);
   }
   if(DEBUG_ROPE)
-    ndbout_c("Rope::copy()-> %s", ptr);
+    ndbout_c("LocalRope::copy()-> %s", ptr);
 }
 
 int
-Rope::compare(const char * str, Uint32 len) const {
+LocalRope::compare(const char * str, Uint32 len) const {
   if(DEBUG_ROPE)
-    ndbout_c("Rope::compare(%s, %d)", str, (int) len);
+    ndbout_c("LocalRope::compare(%s, %d)", str, (int) len);
   Uint32 left = head.used > len ? len : head.used;
   Ptr<Segment> curr;
   curr.i = head.firstItem;
@@ -118,7 +118,7 @@ Rope::compare(const char * str, Uint32 l
     int res = memcmp(str, (const char*)curr.p->data, 4 * getSegmentSize());
     if(res != 0){
       if(DEBUG_ROPE)
-	ndbout_c("Rope::compare(%s, %d, %s) -> %d", str, (int) len, 
+	ndbout_c("LocalRope::compare(%s, %d, %s) -> %d", str, (int) len,
 		 (const char*)curr.p->data, res);
       return res;
     }
@@ -133,19 +133,19 @@ Rope::compare(const char * str, Uint32 l
     int res = memcmp(str, (const char*)curr.p->data, left);
     if(res){
       if(DEBUG_ROPE)
-	ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, res);
+	ndbout_c("LocalRope::compare(%s, %d) -> %d", str, (int) len, res);
       return res;
     }
   }
   if(DEBUG_ROPE)
-    ndbout_c("Rope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
+    ndbout_c("LocalRope::compare(%s, %d) -> %d", str, (int) len, head.used > len);
   return head.used > len;
 }
 
 bool
-Rope::assign(const char * s, Uint32 len, Uint32 hash){
+LocalRope::assign(const char * s, Uint32 len, Uint32 hash){
   if(DEBUG_ROPE)
-    ndbout_c("Rope::assign(%s, %d, 0x%x)", s, (int) len, hash);
+    ndbout_c("LocalRope::assign(%s, %d, 0x%x)", s, (int) len, hash);
   m_hash = hash;
   head.used = (head.used + 3) / 4;
   release();
@@ -164,7 +164,7 @@ Rope::assign(const char * s, Uint32 len,
     }
     head.used = len;
     if(DEBUG_ROPE)
-      ndbout_c("Rope::assign(...) head = [ %d 0x%x 0x%x ]",
+      ndbout_c("LocalRope::assign(...) head = [ %d 0x%x 0x%x ]",
 	       head.used, head.firstItem, head.lastItem);
     return true;
   }
@@ -172,20 +172,20 @@ Rope::assign(const char * s, Uint32 len,
 }
 
 void
-Rope::erase(){
+LocalRope::erase(){
   head.used = (head.used + 3) / 4;
   release();
 }
 
 Uint32
-Rope::hash(const char * p, Uint32 len){
+LocalRope::hash(const char * p, Uint32 len){
   if(DEBUG_ROPE)
-    ndbout_c("Rope::hash(%s, %d)", p, len);
+    ndbout_c("LocalRope::hash(%s, %d)", p, len);
   Uint32 h = 0;
   for (; len > 0; len--)
     h = (h << 5) + h + (* p++);
   if(DEBUG_ROPE)
-    ndbout_c("Rope::hash(...) -> 0x%x", h);
+    ndbout_c("LocalRope::hash(...) -> 0x%x", h);
   return h;
 }
 

=== modified file 'storage/ndb/src/kernel/vm/Rope.hpp'
--- a/storage/ndb/src/kernel/vm/Rope.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/Rope.hpp	2011-11-02 14:06:43 +0000
@@ -56,16 +56,16 @@ private:
   const RopeHandle & src;
 };
 
-class Rope : private RopeBase {
+class LocalRope : private RopeBase {
 public:
-  Rope(RopePool& thePool, RopeHandle& handle)  
+  LocalRope(RopePool& thePool, RopeHandle& handle)
     : RopeBase(thePool), src(handle)
   {
     this->head = src.m_head;
     m_hash = src.m_hash;
   }
   
-  ~Rope(){
+  ~LocalRope(){
     src.m_head = this->head;
     src.m_hash = m_hash;
   }
@@ -94,13 +94,13 @@ private:
 
 inline
 Uint32
-Rope::size() const {
+LocalRope::size() const {
   return head.used;
 }
 
 inline
 bool
-Rope::empty() const {
+LocalRope::empty() const {
   return head.used == 0;
 }
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0 branch (pekka.nousiainen:4629 to 4639) Pekka Nousiainen11 Nov