List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:February 23 2012 12:39pm
Subject:bzr push into mysql-trunk-cluster branch (ole.john.aske:3430 to 3431)
View as plain text  
 3431 Ole John Aske	2012-02-23 [merge]
      Merge mysql-5.5-cluster -> mysql-trunk-cluster

    added:
      mysql-test/suite/ndb_big/bug13637411-master.opt
      mysql-test/suite/ndb_big/bug13637411.cnf
      mysql-test/suite/ndb_big/bug13637411.test
      sql/ndb_conflict.cc
      sql/ndb_conflict.h
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java
    modified:
      cmd-line-utils/libedit/chartype.h
      cmd-line-utils/libedit/eln.c
      cmd-line-utils/libedit/readline.c
      include/mysql.h.pp
      mysql-test/r/key_cache.result
      mysql-test/suite/innodb/r/innodb.result
      mysql-test/suite/innodb/t/innodb.test
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_index_stat_partitions.result
      mysql-test/suite/ndb/r/ndb_statistics0.result
      mysql-test/suite/ndb/r/ndb_statistics1.result
      mysql-test/suite/ndb/t/ndb_basic.test
      mysql-test/suite/ndb/t/ndb_index_stat.test
      mysql-test/suite/ndb/t/ndb_index_stat_partitions.test
      mysql-test/suite/ndb/t/ndb_index_stat_restart.test
      mysql-test/suite/ndb/t/ndb_statistics.inc
      mysql-test/suite/ndb/t/ndb_statistics0.test
      mysql-test/suite/ndb/t/ndb_statistics1.test
      mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_basic.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_basic.test
      mysql-test/suite/rpl/t/rpl_known_bugs_detection.test
      mysql-test/t/key_cache.test
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ndb_share.cc
      sql/ndb_share.h
      sql/ndb_thd.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java
      storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java
      storage/ndb/cmake/os/WindowsCache.cmake
      storage/ndb/include/kernel/kernel_types.h
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/include/ndb_config.h.in
      storage/ndb/include/ndb_version.h.in
      storage/ndb/include/transporter/TransporterRegistry.hpp
      storage/ndb/include/util/BaseString.hpp
      storage/ndb/include/util/Bitmask.hpp
      storage/ndb/memcache/src/QueryPlan.cc
      storage/ndb/memcache/src/ndb_engine.c
      storage/ndb/ndb_configure.cmake
      storage/ndb/src/common/transporter/TransporterRegistry.cpp
      storage/ndb/src/common/util/BaseString.cpp
      storage/ndb/src/common/util/Bitmask.cpp
      storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
      storage/ndb/src/kernel/ndbd.cpp
      storage/ndb/src/kernel/vm/Configuration.cpp
      storage/ndb/src/kernel/vm/DynArr256.cpp
      storage/ndb/src/kernel/vm/DynArr256.hpp
      storage/ndb/src/kernel/vm/Emulator.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/dummy_nonmt.cpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.cpp
      storage/ndb/src/mgmsrv/ConfigInfo.cpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/src/ndbjtie/NdbApiWrapper.hpp
      storage/ndb/src/ndbjtie/com/mysql/ndbjtie/ndbapi/NdbScanOperation.java
      storage/ndb/src/ndbjtie/ndbapi_jtie.hpp
      storage/ndb/test/ndbapi/CMakeLists.txt
      storage/ndb/test/ndbapi/flexAsynch.cpp
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/command.cpp
      storage/ndb/test/run-test/conf-upgrade.cnf
      storage/ndb/test/run-test/setup.cpp
 3430 Ole John Aske	2012-01-27
      Accept new result for MTR test ndb_gis.test.

    modified:
      mysql-test/suite/ndb/r/ndb_gis.result
=== modified file 'cmd-line-utils/libedit/chartype.h'
--- a/cmd-line-utils/libedit/chartype.h	2012-01-11 17:40:29 +0000
+++ b/cmd-line-utils/libedit/chartype.h	2012-02-13 21:10:43 +0000
@@ -45,11 +45,11 @@
  * seems to actually advertise this properly, despite Unicode 3.1 having
  * been around since 2001... */
 
-/* XXXMYSQL : Added FreeBSD & AIX to bypass this check.
-  TODO : Verify if FreeBSD & AIX stores ISO 10646 in wchar_t. */
+/* XXXMYSQL : Added FreeBSD to bypass this check.
+  TODO : Verify if FreeBSD stores ISO 10646 in wchar_t. */
 #if !defined(__NetBSD__) && !defined(__sun) \
   && !(defined(__APPLE__) && defined(__MACH__)) \
-  && !defined(__FreeBSD__) && !defined(_AIX)
+  && !defined(__FreeBSD__)
 #ifndef __STDC_ISO_10646__
 /* In many places it is assumed that the first 127 code points are ASCII
  * compatible, so ensure wchar_t indeed does ISO 10646 and not some other

=== modified file 'cmd-line-utils/libedit/eln.c'
--- a/cmd-line-utils/libedit/eln.c	2012-01-11 17:40:29 +0000
+++ b/cmd-line-utils/libedit/eln.c	2012-02-13 21:10:43 +0000
@@ -200,7 +200,7 @@ el_set(EditLine *el, int op, ...)
 		    ret = -1;
 		    goto out;
 		}
-                /* XXX: The two strdups leak. */
+		// XXX: The two strdup's leak
 		ret = map_addfunc(el, Strdup(wargv[0]), Strdup(wargv[1]),
 		    func);
 		ct_free_argv(wargv);

=== modified file 'cmd-line-utils/libedit/readline.c'
--- a/cmd-line-utils/libedit/readline.c	2012-01-12 13:03:44 +0000
+++ b/cmd-line-utils/libedit/readline.c	2012-02-23 12:37:59 +0000
@@ -1978,7 +1978,7 @@ rl_callback_read_char()
 		} else
 			wbuf = NULL;
 		(*(void (*)(const char *))rl_linefunc)(wbuf);
-                /*el_set(e, EL_UNBUFFERED, 1);*/
+		//el_set(e, EL_UNBUFFERED, 1);
 	}
 }
 

=== modified file 'include/mysql.h.pp'
--- a/include/mysql.h.pp	2012-01-04 20:25:40 +0000
+++ b/include/mysql.h.pp	2012-02-23 12:37:59 +0000
@@ -53,15 +53,15 @@ enum enum_field_types { MYSQL_TYPE_DECIM
    MYSQL_TYPE_DATETIME2,
    MYSQL_TYPE_TIME2,
                         MYSQL_TYPE_NEWDECIMAL=246,
-                        MYSQL_TYPE_ENUM=247,
-                        MYSQL_TYPE_SET=248,
-                        MYSQL_TYPE_TINY_BLOB=249,
-                        MYSQL_TYPE_MEDIUM_BLOB=250,
-                        MYSQL_TYPE_LONG_BLOB=251,
-                        MYSQL_TYPE_BLOB=252,
-                        MYSQL_TYPE_VAR_STRING=253,
-                        MYSQL_TYPE_STRING=254,
-                        MYSQL_TYPE_GEOMETRY=255
+   MYSQL_TYPE_ENUM=247,
+   MYSQL_TYPE_SET=248,
+   MYSQL_TYPE_TINY_BLOB=249,
+   MYSQL_TYPE_MEDIUM_BLOB=250,
+   MYSQL_TYPE_LONG_BLOB=251,
+   MYSQL_TYPE_BLOB=252,
+   MYSQL_TYPE_VAR_STRING=253,
+   MYSQL_TYPE_STRING=254,
+   MYSQL_TYPE_GEOMETRY=255
 };
 enum mysql_enum_shutdown_level {
   SHUTDOWN_DEFAULT = 0,

=== modified file 'mysql-test/r/key_cache.result'
--- a/mysql-test/r/key_cache.result	2011-12-14 14:35:17 +0000
+++ b/mysql-test/r/key_cache.result	2012-02-23 12:37:59 +0000
@@ -383,19 +383,3 @@ Variable_name	Value
 key_cache_block_size	1536
 SET GLOBAL key_cache_block_size= @bug28478_key_cache_block_size;
 DROP TABLE t1;
-#
-# Bug#12361113: crash when load index into cache
-#
-# Note that this creates an empty disabled key cache!
-SET GLOBAL key_cache_none.key_cache_block_size = 1024;
-CREATE TABLE t1 (a INT, b INTEGER NOT NULL, KEY (b) ) ENGINE = MYISAM;
-INSERT INTO t1 VALUES (1, 1);
-CACHE INDEX t1 in key_cache_none;
-ERROR HY000: Unknown key cache 'key_cache_none'
-# The bug crashed the server at LOAD INDEX below. Now it will succeed 
-# since the default cache is used due to CACHE INDEX failed for
-# key_cache_none.
-LOAD INDEX INTO CACHE t1;
-Table	Op	Msg_type	Msg_text
-test.t1	preload_keys	status	OK
-DROP TABLE t1;

=== modified file 'mysql-test/suite/innodb/r/innodb.result'
--- a/mysql-test/suite/innodb/r/innodb.result	2012-01-04 20:25:40 +0000
+++ b/mysql-test/suite/innodb/r/innodb.result	2012-02-23 12:37:59 +0000
@@ -3165,14 +3165,3 @@ Handler_update	1
 Variable_name	Value
 Handler_delete	1
 DROP TABLE bug58912;
-create table t1 (f1 integer primary key) engine=innodb;
-flush status;
-show status like "handler_read_key";
-Variable_name	Value
-Handler_read_key	0
-select f1 from t1;
-f1
-show status like "handler_read_key";
-Variable_name	Value
-Handler_read_key	1
-drop table t1;

=== modified file 'mysql-test/suite/innodb/t/innodb.test'
--- a/mysql-test/suite/innodb/t/innodb.test	2012-01-04 20:25:40 +0000
+++ b/mysql-test/suite/innodb/t/innodb.test	2012-02-23 12:37:59 +0000
@@ -2565,17 +2565,6 @@ SET GLOBAL innodb_thread_concurrency = @
 # Clean up after the Bug#55284/Bug#58912 test case.
 DROP TABLE bug58912;
 
-#
-# Test fix for bug 13117023. InnoDB increments HA_READ_KEY_COUNT (aka
-# HANDLER_READ_KEY) when it should not.
-#
-create table t1 (f1 integer primary key) engine=innodb;
-flush status;
-show status like "handler_read_key";
-select f1 from t1;
-show status like "handler_read_key";
-drop table t1;
-
 #######################################################################
 #                                                                     #
 # Please, DO NOT TOUCH this file as well as the innodb.result file.   #

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2012-01-26 14:32:08 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2012-02-23 12:37:59 +0000
@@ -1001,4 +1001,12 @@ ENGINE=ndb;
 show warnings;
 Level	Code	Message
 drop table t1;
+create table
+abcdefghijklmnopqrstuvwxyz1234567890bcdefghijklmnopqrstuvwxyz123(id int
+primary key) engine=ndb;
+ERROR HY000: Can't create table 'test.abcdefghijklmnopqrstuvwxyz1234567890bcdefghijklmnopqrstuvwxyz123' (errno: 1059)
+show warnings;
+Level	Code	Message
+Warning	1059	Ndb has an internal limit of 63 bytes on the size of schema identifiers
+Error	1005	Can't create table 'test.abcdefghijklmnopqrstuvwxyz1234567890bcdefghijklmnopqrstuvwxyz123' (errno: 1059)
 End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/r/ndb_index_stat_partitions.result'
--- a/mysql-test/suite/ndb/r/ndb_index_stat_partitions.result	2011-09-19 19:51:16 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_stat_partitions.result	2012-01-26 12:11:53 +0000
@@ -32,14 +32,14 @@ partition by key (K) partitions 1;
 INSERT INTO t1(I,J,L) VALUES
 (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
 (6,6,6),(7,7,7),(8,8,8),(9,9,9),(0,0,0);
-INSERT INTO t1(I,J,L) SELECT I,1,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,2,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,3,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,4,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,5,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,6,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,7,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,8,I FROM t1;
+INSERT INTO t1(I,J,L) SELECT I,1,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,2,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,3,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,4,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,5,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,6,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,7,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,8,I FROM t1 ORDER BY K;
 select i, count(*) from t1 group by 1 order by 1;
 i	count(*)
 0	256

=== modified file 'mysql-test/suite/ndb/r/ndb_statistics0.result'
--- a/mysql-test/suite/ndb/r/ndb_statistics0.result	2012-01-04 20:25:40 +0000
+++ b/mysql-test/suite/ndb/r/ndb_statistics0.result	2012-02-23 12:37:59 +0000
@@ -34,12 +34,10 @@ CREATE TABLE t100 LIKE t10;
 INSERT INTO t100(I,J)
 SELECT X.J, X.J+(10*Y.J) FROM t10 AS X,t10 AS Y;
 CREATE TABLE t10000 LIKE t10;
+ALTER TABLE t10000 ENGINE=MYISAM;
 INSERT INTO t10000(I,J)
-SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
-WHERE X.J<50;
-INSERT INTO t10000(I,J)
-SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
-WHERE X.J>=50;
+SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y;
+ALTER TABLE t10000 ENGINE=NDBCLUSTER;
 ANALYZE TABLE t10,t100,t10000;
 Table	Op	Msg_type	Msg_text
 test.t10	analyze	status	OK

=== modified file 'mysql-test/suite/ndb/r/ndb_statistics1.result'
--- a/mysql-test/suite/ndb/r/ndb_statistics1.result	2012-01-04 20:25:40 +0000
+++ b/mysql-test/suite/ndb/r/ndb_statistics1.result	2012-02-23 12:37:59 +0000
@@ -30,12 +30,10 @@ CREATE TABLE t100 LIKE t10;
 INSERT INTO t100(I,J)
 SELECT X.J, X.J+(10*Y.J) FROM t10 AS X,t10 AS Y;
 CREATE TABLE t10000 LIKE t10;
+ALTER TABLE t10000 ENGINE=MYISAM;
 INSERT INTO t10000(I,J)
-SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
-WHERE X.J<50;
-INSERT INTO t10000(I,J)
-SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
-WHERE X.J>=50;
+SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y;
+ALTER TABLE t10000 ENGINE=NDBCLUSTER;
 ANALYZE TABLE t10,t100,t10000;
 Table	Op	Msg_type	Msg_text
 test.t10	analyze	status	OK

=== modified file 'mysql-test/suite/ndb/t/ndb_basic.test'
--- a/mysql-test/suite/ndb/t/ndb_basic.test	2012-01-10 08:53:25 +0000
+++ b/mysql-test/suite/ndb/t/ndb_basic.test	2012-02-17 10:27:55 +0000
@@ -837,4 +837,13 @@ ENGINE=ndb;
 show warnings;
 drop table t1;
 
+#
+#Bug #11753491 44940: MYSQLD CRASHES WHEN CREATING A CLUSTER TABLE WITH 64 CHARACTER TABLE NAME
+#
+--error 1005
+create table
+abcdefghijklmnopqrstuvwxyz1234567890bcdefghijklmnopqrstuvwxyz123(id int
+primary key) engine=ndb;
+show warnings;
+
 --echo End of 5.1 tests

=== modified file 'mysql-test/suite/ndb/t/ndb_index_stat.test'
--- a/mysql-test/suite/ndb/t/ndb_index_stat.test	2011-11-19 07:56:25 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_stat.test	2012-02-01 11:59:01 +0000
@@ -19,6 +19,7 @@ DROP TABLE IF EXISTS t1, t2;
 --enable_warnings
 
 set @is_enable_default = @@global.ndb_index_stat_enable;
+let is_table_exists = `select count(*) <> 2 from information_schema.tables where table_name in ('ndb_index_stat_head', 'ndb_index_stat_sample') and table_schema='mysql'`;
 
 set @is_enable = 1;
 source ndb_index_stat_enable.inc;

=== modified file 'mysql-test/suite/ndb/t/ndb_index_stat_partitions.test'
--- a/mysql-test/suite/ndb/t/ndb_index_stat_partitions.test	2011-09-19 08:16:01 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_stat_partitions.test	2012-01-26 12:11:53 +0000
@@ -24,14 +24,14 @@ INSERT INTO t1(I,J,L) VALUES
 (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
 (6,6,6),(7,7,7),(8,8,8),(9,9,9),(0,0,0);
 
-INSERT INTO t1(I,J,L) SELECT I,1,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,2,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,3,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,4,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,5,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,6,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,7,I FROM t1;
-INSERT INTO t1(I,J,L) SELECT I,8,I FROM t1;
+INSERT INTO t1(I,J,L) SELECT I,1,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,2,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,3,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,4,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,5,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,6,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,7,I FROM t1 ORDER BY K;
+INSERT INTO t1(I,J,L) SELECT I,8,I FROM t1 ORDER BY K;
 
 select i, count(*) from t1 group by 1 order by 1;
 select l, count(*) from t1 group by 1 order by 1;

=== modified file 'mysql-test/suite/ndb/t/ndb_index_stat_restart.test'
--- a/mysql-test/suite/ndb/t/ndb_index_stat_restart.test	2011-11-30 12:39:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_stat_restart.test	2012-02-01 11:59:01 +0000
@@ -6,6 +6,7 @@ DROP TABLE IF EXISTS t1;
 --enable_warnings
 
 set @is_enable_default = @@global.ndb_index_stat_enable;
+let is_table_exists = `select count(*) <> 2 from information_schema.tables where table_name in ('ndb_index_stat_head', 'ndb_index_stat_sample') and table_schema='mysql'`;
 
 set @is_enable = 1;
 source ndb_index_stat_enable.inc;

=== modified file 'mysql-test/suite/ndb/t/ndb_statistics.inc'
--- a/mysql-test/suite/ndb/t/ndb_statistics.inc	2011-07-02 07:05:32 +0000
+++ b/mysql-test/suite/ndb/t/ndb_statistics.inc	2012-02-01 10:23:10 +0000
@@ -21,14 +21,12 @@ INSERT INTO t100(I,J)
 
 CREATE TABLE t10000 LIKE t10;
 
-# Insert into t10000 in two chunks to not
-#  exhaust MaxNoOfConcurrentOperations
+# Insert into t10000 into myisam and alter to ndb
+#  not to exhaust MaxNoOfConcurrentOperations
+ALTER TABLE t10000 ENGINE=MYISAM;
 INSERT INTO t10000(I,J)
-  SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
-  WHERE X.J<50;
-INSERT INTO t10000(I,J)
-  SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y
-  WHERE X.J>=50;
+  SELECT X.J, X.J+(100*Y.J) FROM t100 AS X,t100 AS Y;
+ALTER TABLE t10000 ENGINE=NDBCLUSTER;
 
 ANALYZE TABLE t10,t100,t10000;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_statistics0.test'
--- a/mysql-test/suite/ndb/t/ndb_statistics0.test	2011-07-02 07:05:32 +0000
+++ b/mysql-test/suite/ndb/t/ndb_statistics0.test	2012-02-01 11:12:24 +0000
@@ -1,6 +1,7 @@
 # index stats OFF
 
 set @is_enable_default = @@global.ndb_index_stat_enable;
+let is_table_exists = `select count(*) <> 2 from information_schema.tables where table_name in ('ndb_index_stat_head', 'ndb_index_stat_sample') and table_schema='mysql'`;
 
 set @is_enable = 0;
 source ndb_index_stat_enable.inc;

=== modified file 'mysql-test/suite/ndb/t/ndb_statistics1.test'
--- a/mysql-test/suite/ndb/t/ndb_statistics1.test	2011-07-02 07:05:32 +0000
+++ b/mysql-test/suite/ndb/t/ndb_statistics1.test	2012-02-01 11:12:24 +0000
@@ -1,6 +1,7 @@
 # index stats ON
 
 set @is_enable_default = @@global.ndb_index_stat_enable;
+let is_table_exists = `select count(*) <> 2 from information_schema.tables where table_name in ('ndb_index_stat_head', 'ndb_index_stat_sample') and table_schema='mysql'`;
 
 set @is_enable = 1;
 source ndb_index_stat_enable.inc;

=== added file 'mysql-test/suite/ndb_big/bug13637411-master.opt'
--- a/mysql-test/suite/ndb_big/bug13637411-master.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_big/bug13637411-master.opt	2012-01-28 09:09:19 +0000
@@ -0,0 +1 @@
+--testcase-timeout=60

=== added file 'mysql-test/suite/ndb_big/bug13637411.cnf'
--- a/mysql-test/suite/ndb_big/bug13637411.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_big/bug13637411.cnf	2012-01-28 09:09:19 +0000
@@ -0,0 +1,29 @@
+!include include/default_mysqld.cnf
+
+[cluster_config.1]
+ndbd=
+ndb_mgmd=
+mysqld=
+
+NoOfReplicas=1
+DataMemory=21G
+IndexMemory=220M
+Diskless=1
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+
+ndb-cluster-connection-pool=1
+ndb-force-send=1
+ndb-use-exact-count=0
+ndb-extra-logging=1
+ndb-autoincrement-prefetch-sz=256
+engine-condition-pushdown=1
+ndb-wait-connected=600
+ndb-wait-setup=300
+
+[ENV]
+NDB_CONNECTSTRING=             @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                 @mysqld.1.1.port
+

=== added file 'mysql-test/suite/ndb_big/bug13637411.test'
--- a/mysql-test/suite/ndb_big/bug13637411.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_big/bug13637411.test	2012-01-28 15:29:25 +0000
@@ -0,0 +1,140 @@
+source suite.inc;
+source include/have_ndb.inc;
+result_format 2;
+
+call mtr.add_suppression("The table '.*' is full");
+
+select version();
+
+CREATE TABLE t1 (
+  c0 int unsigned not null primary key,
+  c00 char(255) not null default '',
+  c01 char(255) not null default '',
+  c02 char(255) not null default '',
+  c03 char(255) not null default '',
+  c04 char(255) not null default '',
+  c05 char(255) not null default '',
+  c06 char(255) not null default '',
+  c07 char(255) not null default '',
+  c08 char(255) not null default '',
+  c09 char(255) not null default '',
+  c10 char(255) not null default '',
+  c11 char(255) not null default '',
+  c12 char(255) not null default '',
+  c13 char(255) not null default '',
+  c14 char(255) not null default '',
+  c15 char(255) not null default '',
+  c16 char(255) not null default '',
+  c17 char(255) not null default '',
+  c18 char(255) not null default '',
+  c19 char(255) not null default '',
+  c20 char(255) not null default '',
+  c21 char(255) not null default '',
+  c22 char(255) not null default '',
+  c23 char(255) not null default '',
+  c24 char(255) not null default '',
+  c25 char(255) not null default '',
+  c26 char(255) not null default '',
+  c27 char(255) not null default '',
+  c28 char(255) not null default '',
+  c29 char(255) not null default ''
+) COMMENT='NDB_TABLE=NOLOGGING' ENGINE=ndbcluster partition by key(c0) partitions 1;
+
+let $batch = 200;
+
+## Load table...
+--echo Filling table with 15Gb of data
+disable_query_log;
+let $i = 0;
+let $lastgb = 0;
+while (`select (DATA_LENGTH / 1024 / 1024 / 1024) < 15 from INFORMATION_SCHEMA.PARTITIONS where table_name = 't1'`)
+{
+  let $b = $batch; # Number of values to INSERT per batch
+  let $separator = ;
+  let $sql = INSERT t1 (c0) VALUES;
+  while($b)
+  {
+    let $sql=$sql$separator($i*$batch + $b);
+    let $separator = ,;
+    dec $b;
+  }
+
+  --error 0,1297
+  eval $sql;
+  if (!$mysql_errno)
+  {
+    inc $i;
+  }
+
+  let $gb = `select round(DATA_LENGTH / 1024 / 1024 / 1024) from INFORMATION_SCHEMA.PARTITIONS where table_name = 't1'`;
+  if ($gb != $lastgb)
+  {
+    --echo $gb gb...
+    let $lastgb = $gb;
+  }
+}
+
+--echo Filling table up to 20Gb, expect error
+let $done = 0;
+while (!$done)
+{
+  let $b = $batch; # Number of values to INSERT per batch
+  let $separator = ;
+  let $sql = INSERT t1 (c0) VALUES;
+  while($b)
+  {
+    let $sql=$sql$separator($i*$batch + $b);
+    let $separator = ,;
+    dec $b;
+  }
+
+  --error 0,1114,1297
+  eval $sql;
+  if (!$mysql_errno)
+  {
+    inc $i;
+  }
+  if ($mysql_errno == 1114)
+  {
+    show warnings;
+    inc $done;
+  }
+  if (`select (DATA_LENGTH / 1024 / 1024 / 1024) >= 20 from INFORMATION_SCHEMA.PARTITIONS where table_name = 't1'`)
+  {
+    inc $done;
+    --echo 20g loaded!
+  }
+}
+enable_query_log;
+
+select count(*),max(c0)
+from t1;
+
+select (DATA_LENGTH / 1024 / 1024 / 1024)
+from INFORMATION_SCHEMA.PARTITIONS
+where table_name = 't1';
+
+--echo Clearing table
+disable_query_log;
+while ($i > 0)
+{
+  let $b = $batch; # 
+  let $separator = ;
+  let $sql = delete from t1 where c0 in (;
+  while($b)
+  {
+    let $sql=$sql$separator($i*$batch + $b);
+    let $separator = ,;
+    dec $b;
+  }
+  let $sql=$sql);
+  source run_query_with_retry.inc;
+
+  dec $i;
+}
+enable_query_log;
+
+drop table t1;
+
+## Test suceeded
+exit;

=== modified file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_basic.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_basic.result	2012-01-26 14:32:08 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_basic.result	2012-02-23 12:37:59 +0000
@@ -277,6 +277,142 @@ relevant
 [note] ndb slave: table test.t1allsame using conflict_fn ndb$max on attribute x.
 [note] ndb slave: table test.t2_max using conflict_fn ndb$max on attribute x.
 drop table t3oneex, t2diffex, t1allsame, t3oneex$EX, t2diffex$EX;
+delete from mysql.ndb_replication;
+Test exceptions table schema flexibility
+insert into mysql.ndb_replication values ("test", "t1", 0, 7, "NDB$MAX(X)");
+Test 'normal' mandatory column names + all table pks
+create table test.t1$EX(
+server_id int unsigned,
+master_server_id int unsigned,
+master_epoch bigint unsigned,
+count int unsigned,
+a int not null,
+b int not null,
+c int not null,
+primary key (server_id, master_server_id, master_epoch, count)) engine=ndb;
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+primary key(a,b,c)) engine=ndb;
+Generate a conflict on the slave
+insert into test.t1 values (1,1,1,1,1,1);
+update test.t1 set X=0 where a=1 and b=1 and c=1;
+Check that conflict has been recorded.
+select * from test.t1$EX;
+server_id	master_server_id	master_epoch	count	a	b	c
+2	1	<epoch_num>	1	1	1	1
+drop table test.t1;
+drop table test.t1$EX;
+Test 'normal' mandatory column names + all table pks +
+extra columns with same and different names to main table columns
+Also a defaulted extra column.
+create table test.t1$EX(
+server_id int unsigned,
+master_server_id int unsigned,
+master_epoch bigint unsigned,
+count int unsigned,
+a int not null,
+b int not null,
+c int not null,
+d int,                     # Same name as main table, but user defined
+lilljeholmen varchar(50) default 'Slussen',
+# Separate, user defined
+primary key (server_id, master_server_id, master_epoch, count)) engine=ndb;
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+primary key(a,b,c)) engine=ndb;
+Generate a conflict on the slave
+insert into test.t1 values (1,1,1,1,1,1);
+update test.t1 set X=0 where a=1 and b=1 and c=1;
+Check that conflict has been recorded.
+select * from test.t1$EX;
+server_id	master_server_id	master_epoch	count	a	b	c	d	lilljeholmen
+2	1	<epoch_num>	1	1	1	1	NULL	Slussen
+drop table test.t1;
+drop table test.t1$EX;
+Test unusual mandatory column names + all table pks +
+extra columns with same and different names to main table columns
+Also a defaulted extra column.
+create table test.t1$EX(
+monteverdi int unsigned,
+asparagi int unsigned,
+plenipotentiary bigint unsigned,
+mountebank int unsigned,
+a int not null,
+b int not null,
+c int not null,
+d int,                     # Same name as main table, but user defined
+lilljeholmen varchar(50) default 'Slussen',
+# Separate, user defined
+primary key (monteverdi, asparagi, plenipotentiary, mountebank)) engine=ndb;
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+primary key(a,b,c)) engine=ndb;
+Generate a conflict on the slave
+insert into test.t1 values (1,1,1,1,1,1);
+update test.t1 set X=0 where a=1 and b=1 and c=1;
+Check that conflict has been recorded.
+select * from test.t1$EX;
+monteverdi	asparagi	plenipotentiary	mountebank	a	b	c	d	lilljeholmen
+2	1	<epoch_num>	1	1	1	1	NULL	Slussen
+drop table test.t1;
+drop table test.t1$EX;
+Test unusual mandatory column names + all table pks which are same
+as 'normal' exceptions table column names plus extra columns with
+same and different names to main table columns
+Also a defaulted extra column.
+create table test.t1$EX(
+monteverdi int unsigned,
+asparagi int unsigned,
+plenipotentiary bigint unsigned,
+mountebank int unsigned,
+server_id int unsigned not null,
+master_server_id int unsigned not null,
+master_epoch bigint unsigned not null,
+count int unsigned not null,
+d int,                     # Same name as main table, but user defined
+lilljeholmen varchar(50) default 'Slussen',
+# Separate, user defined
+primary key (monteverdi, asparagi, plenipotentiary, mountebank)) engine=ndb;
+create table test.t1 (server_id int unsigned,
+master_server_id int unsigned,
+master_epoch bigint unsigned,
+count int unsigned,
+d int, e int, X int unsigned,
+primary key(server_id, master_server_id,
+master_epoch, count)) engine=ndb;
+Generate a conflict on the slave
+insert into test.t1 values (1,1,1,1,1,1,1);
+update test.t1 set X=0 where server_id=1 and master_server_id=1 and master_epoch=1 and count=1;
+Check that conflict has been recorded.
+select * from test.t1$EX;
+monteverdi	asparagi	plenipotentiary	mountebank	server_id	master_server_id	master_epoch	count	d	lilljeholmen
+2	1	<epoch_num>	1	1	1	1	1	NULL	Slussen
+drop table test.t1;
+drop table test.t1$EX;
+call mtr.add_suppression("NDB Slave: exceptions table .* has wrong definition .*");
+call mtr.add_suppression("NDB Slave: exceptions table .* has wrong definition .*");
+call mtr.add_suppression("NDB Slave: exceptions table .* has wrong definition .*");
+And some bad exceptions table schemata
+Keys in wrong positions
+create table test.t1$EX(
+a int not null,
+b int not null,
+c int not null,
+d int,                     # Same name as main table, but user defined
+lilljeholmen varchar(50) default 'Slussen',
+# Separate, user defined
+server_id int unsigned,
+master_server_id int unsigned,
+master_epoch bigint unsigned,
+count int unsigned,
+primary key (server_id, master_server_id, master_epoch, count)) engine=ndb;
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+primary key(a,b,c)) engine=ndb;
+show warnings;
+Level	Code	Message
+MySQLD error output for server 1.1 matching pattern %NDB Slave%
+relevant
+[note] ndb slave: table test.t1 using conflict_fn ndb$max on attribute x.
+[warning] ndb slave: exceptions table t1$ex has wrong definition (initial 4 columns)
+drop table test.t1;
+drop table test.t1$EX;
 "Cleanup"
 drop table mysql.ndb_replication;
 include/rpl_end.inc

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_basic.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_basic.test	2012-01-23 15:47:46 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_basic.test	2012-01-31 10:01:22 +0000
@@ -373,6 +373,208 @@ show variables like 'server_id';
 --connection master
 drop table t3oneex, t2diffex, t1allsame, t3oneex$EX, t2diffex$EX;
 
+delete from mysql.ndb_replication;
+
+--echo Test exceptions table schema flexibility
+#
+# An exceptions table should be able to have the mandatory columns with
+# different names, as long as the types match.
+# Also, not all main table primary key parts need be present
+# Finally, arbitrary extra columns should be allowed, as long as
+# they can be defaulted.
+#
+
+insert into mysql.ndb_replication values ("test", "t1", 0, 7, "NDB$MAX(X)");
+
+--echo Test 'normal' mandatory column names + all table pks
+
+create table test.t1$EX(
+   server_id int unsigned,
+   master_server_id int unsigned,
+   master_epoch bigint unsigned,
+   count int unsigned,
+   a int not null,
+   b int not null,
+   c int not null,
+   primary key (server_id, master_server_id, master_epoch, count)) engine=ndb;
+
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+                      primary key(a,b,c)) engine=ndb;
+
+--echo Generate a conflict on the slave
+
+insert into test.t1 values (1,1,1,1,1,1);
+--sync_slave_with_master slave
+--connection master
+update test.t1 set X=0 where a=1 and b=1 and c=1;
+--sync_slave_with_master slave
+--connection slave
+
+--echo Check that conflict has been recorded.
+--replace_column 3 <epoch_num>
+select * from test.t1$EX;
+
+--connection master
+drop table test.t1;
+drop table test.t1$EX;
+
+--echo Test 'normal' mandatory column names + all table pks +
+--echo extra columns with same and different names to main table columns
+--echo Also a defaulted extra column.
+
+create table test.t1$EX(
+   server_id int unsigned,
+   master_server_id int unsigned,
+   master_epoch bigint unsigned,
+   count int unsigned,
+   a int not null,
+   b int not null,
+   c int not null,
+   d int,                     # Same name as main table, but user defined
+   lilljeholmen varchar(50) default 'Slussen',
+                              # Separate, user defined
+   primary key (server_id, master_server_id, master_epoch, count)) engine=ndb;
+
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+                      primary key(a,b,c)) engine=ndb;
+
+--echo Generate a conflict on the slave
+
+insert into test.t1 values (1,1,1,1,1,1);
+--sync_slave_with_master slave
+--connection master
+update test.t1 set X=0 where a=1 and b=1 and c=1;
+--sync_slave_with_master slave
+--connection slave
+
+--echo Check that conflict has been recorded.
+--replace_column 3 <epoch_num>
+select * from test.t1$EX;
+
+--connection master
+drop table test.t1;
+drop table test.t1$EX;
+
+--echo Test unusual mandatory column names + all table pks +
+--echo extra columns with same and different names to main table columns
+--echo Also a defaulted extra column.
+
+create table test.t1$EX(
+   monteverdi int unsigned,
+   asparagi int unsigned,
+   plenipotentiary bigint unsigned,
+   mountebank int unsigned,
+   a int not null,
+   b int not null,
+   c int not null,
+   d int,                     # Same name as main table, but user defined
+   lilljeholmen varchar(50) default 'Slussen',
+                              # Separate, user defined
+   primary key (monteverdi, asparagi, plenipotentiary, mountebank)) engine=ndb;
+
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+                      primary key(a,b,c)) engine=ndb;
+
+--echo Generate a conflict on the slave
+
+insert into test.t1 values (1,1,1,1,1,1);
+--sync_slave_with_master slave
+--connection master
+update test.t1 set X=0 where a=1 and b=1 and c=1;
+--sync_slave_with_master slave
+--connection slave
+
+--echo Check that conflict has been recorded.
+--replace_column 3 <epoch_num>
+select * from test.t1$EX;
+
+--connection master
+drop table test.t1;
+drop table test.t1$EX;
+
+--echo Test unusual mandatory column names + all table pks which are same
+--echo as 'normal' exceptions table column names plus extra columns with
+--echo same and different names to main table columns
+--echo Also a defaulted extra column.
+
+create table test.t1$EX(
+   monteverdi int unsigned,
+   asparagi int unsigned,
+   plenipotentiary bigint unsigned,
+   mountebank int unsigned,
+   server_id int unsigned not null,
+   master_server_id int unsigned not null,
+   master_epoch bigint unsigned not null,
+   count int unsigned not null,
+   d int,                     # Same name as main table, but user defined
+   lilljeholmen varchar(50) default 'Slussen',
+                              # Separate, user defined
+   primary key (monteverdi, asparagi, plenipotentiary, mountebank)) engine=ndb;
+
+create table test.t1 (server_id int unsigned,
+                      master_server_id int unsigned,
+                      master_epoch bigint unsigned,
+                      count int unsigned,
+                      d int, e int, X int unsigned,
+                      primary key(server_id, master_server_id,
+                                  master_epoch, count)) engine=ndb;
+
+--echo Generate a conflict on the slave
+
+insert into test.t1 values (1,1,1,1,1,1,1);
+--sync_slave_with_master slave
+--connection master
+update test.t1 set X=0 where server_id=1 and master_server_id=1 and master_epoch=1 and count=1;
+--sync_slave_with_master slave
+--connection slave
+
+--echo Check that conflict has been recorded.
+--replace_column 3 <epoch_num>
+select * from test.t1$EX;
+
+--connection master
+drop table test.t1;
+drop table test.t1$EX;
+
+--connection server1
+call mtr.add_suppression("NDB Slave: exceptions table .* has wrong definition .*");
+--connection server2
+call mtr.add_suppression("NDB Slave: exceptions table .* has wrong definition .*");
+--connection slave
+call mtr.add_suppression("NDB Slave: exceptions table .* has wrong definition .*");
+--connection master
+
+--echo And some bad exceptions table schemata
+--echo   Keys in wrong positions
+create table test.t1$EX(
+   a int not null,
+   b int not null,
+   c int not null,
+   d int,                     # Same name as main table, but user defined
+   lilljeholmen varchar(50) default 'Slussen',
+                              # Separate, user defined
+   server_id int unsigned,
+   master_server_id int unsigned,
+   master_epoch bigint unsigned,
+   count int unsigned,
+   primary key (server_id, master_server_id, master_epoch, count)) engine=ndb;
+
+create table test.t1 (a int, b int, c int, d int, e int, X int unsigned,
+                      primary key(a,b,c)) engine=ndb;
+show warnings;
+
+--let $server_num=1.1
+--let $pattern=%NDB Slave%
+--let $limit=2
+
+--source suite/ndb_rpl/t/show_mysqld_warnings.inc
+
+drop table test.t1;
+drop table test.t1$EX;
+
+
+
+
 ###############
 --echo "Cleanup"
 

=== modified file 'mysql-test/suite/rpl/t/rpl_known_bugs_detection.test'
--- a/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test	2011-12-26 17:21:34 +0000
+++ b/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test	2012-02-23 12:37:59 +0000
@@ -55,6 +55,9 @@ start slave;
 
 # testcase with INSERT SELECT
 connection master;
+--disable_query_log
+call mtr.add_suppression(".*Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT.*");
+--enable_query_log
 CREATE TABLE t1 (
   id bigint(20) unsigned NOT NULL auto_increment,
   field_1 int(10) unsigned NOT NULL,

=== modified file 'mysql-test/t/key_cache.test'
--- a/mysql-test/t/key_cache.test	2011-12-14 14:33:01 +0000
+++ b/mysql-test/t/key_cache.test	2012-02-14 08:00:53 +0000
@@ -251,19 +251,3 @@ SET GLOBAL key_cache_block_size= @bug284
 DROP TABLE t1;
 
 # End of 4.1 tests
-
---echo #
---echo # Bug#12361113: crash when load index into cache
---echo #
-
---echo # Note that this creates an empty disabled key cache!
-SET GLOBAL key_cache_none.key_cache_block_size = 1024;
-CREATE TABLE t1 (a INT, b INTEGER NOT NULL, KEY (b) ) ENGINE = MYISAM;
-INSERT INTO t1 VALUES (1, 1);
---error ER_UNKNOWN_KEY_CACHE
-CACHE INDEX t1 in key_cache_none;
---echo # The bug crashed the server at LOAD INDEX below. Now it will succeed 
---echo # since the default cache is used due to CACHE INDEX failed for
---echo # key_cache_none.
-LOAD INDEX INTO CACHE t1;
-DROP TABLE t1;

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2012-01-26 14:32:08 +0000
+++ b/sql/ha_ndbcluster.cc	2012-02-23 12:37:59 +0000
@@ -50,7 +50,7 @@
 #include <mysql/plugin.h>
 #include <ndb_version.h>
 #include "ndb_mi.h"
-#include "ndb_conflict_trans.h"
+#include "ndb_conflict.h"
 #include "ndb_anyvalue.h"
 #include "ndb_binlog_extra_row_info.h"
 #include "ndb_event_data.h"
@@ -389,11 +389,6 @@ ndbcluster_alter_table_flags(uint flags)
 
 #define NDB_AUTO_INCREMENT_RETRIES 100
 #define BATCH_FLUSH_SIZE (32768)
-/*
-  Room for 10 instruction words, two labels (@ 2words/label)
-  + 2 extra words for the case of resolve_size == 8
-*/
-#define MAX_CONFLICT_INTERPRETED_PROG_SIZE 16
 
 static int ndb_to_mysql_error(const NdbError *ndberr);
 
@@ -873,8 +868,7 @@ static int ndb_to_mysql_error(const NdbE
 #ifdef HAVE_NDB_BINLOG
 
 int
-handle_conflict_op_error(Thd_ndb* thd_ndb,
-                         NdbTransaction* trans,
+handle_conflict_op_error(NdbTransaction* trans,
                          const NdbError& err,
                          const NdbOperation* op);
 
@@ -940,8 +934,7 @@ check_completed_operations_pre_commit(Th
 
       if (err.classification != NdbError::NoError)
       {
-        int res = handle_conflict_op_error(thd_ndb,
-                                           trans,
+        int res = handle_conflict_op_error(trans,
                                            err,
                                            first);
         if (res != 0)
@@ -2131,11 +2124,6 @@ int ha_ndbcluster::get_metadata(THD *thd
 
   ndbtab_g.release();
 
-#ifdef HAVE_NDB_BINLOG
-  ndbcluster_read_binlog_replication(thd, ndb, m_share, m_table,
-                                     ::server_id, FALSE);
-#endif
-
   DBUG_RETURN(0);
 
 err:
@@ -4418,610 +4406,10 @@ thd_allow_batch(const THD* thd)
 #endif
 }
 
-/**
-   st_ndb_slave_state constructor
-
-   Initialise Ndb Slave state object
-*/
-st_ndb_slave_state::st_ndb_slave_state()
-  : current_master_server_epoch(0),
-    current_max_rep_epoch(0),
-    conflict_flags(0),
-    retry_trans_count(0),
-    current_trans_row_conflict_count(0),
-    current_trans_row_reject_count(0),
-    current_trans_in_conflict_count(0),
-    max_rep_epoch(0),
-    sql_run_id(~Uint32(0)),
-    trans_row_conflict_count(0),
-    trans_row_reject_count(0),
-    trans_detect_iter_count(0),
-    trans_in_conflict_count(0),
-    trans_conflict_commit_count(0),
-    trans_conflict_apply_state(SAS_NORMAL),
-    trans_dependency_tracker(NULL)
-{
-  memset(current_violation_count, 0, sizeof(current_violation_count));
-  memset(total_violation_count, 0, sizeof(total_violation_count));
-
-  /* Init conflict handling state memroot */
-  const size_t CONFLICT_MEMROOT_BLOCK_SIZE = 32768;
-  init_alloc_root(&conflict_mem_root, CONFLICT_MEMROOT_BLOCK_SIZE, 0);
-};
-
-/**
-   resetPerAttemptCounters
-
-   Reset the per-epoch-transaction-application-attempt counters
-*/
-void
-st_ndb_slave_state::resetPerAttemptCounters()
-{
-  memset(current_violation_count, 0, sizeof(current_violation_count));
-  current_trans_row_conflict_count = 0;
-  current_trans_row_reject_count = 0;
-  current_trans_in_conflict_count = 0;
-
-  conflict_flags = 0;
-  current_max_rep_epoch = 0;
-}
-
-/**
-   atTransactionAbort()
-
-   Called by Slave SQL thread during transaction abort.
-*/
-void
-st_ndb_slave_state::atTransactionAbort()
-{
-  /* Reset current-transaction counters + state */
-  resetPerAttemptCounters();
-}
-
-/**
-   atTransactionCommit()
-
-   Called by Slave SQL thread after transaction commit
-*/
-void
-st_ndb_slave_state::atTransactionCommit()
-{
-  assert( ((trans_dependency_tracker == NULL) &&
-           (trans_conflict_apply_state == SAS_NORMAL)) ||
-          ((trans_dependency_tracker != NULL) &&
-           (trans_conflict_apply_state == SAS_TRACK_TRANS_DEPENDENCIES)) );
-  assert( trans_conflict_apply_state != SAS_APPLY_TRANS_DEPENDENCIES );
-
-  /* Merge committed transaction counters into total state
-   * Then reset current transaction counters
-   */
-  for (int i=0; i < CFT_NUMBER_OF_CFTS; i++)
-  {
-    total_violation_count[i]+= current_violation_count[i];
-  }
-  trans_row_conflict_count+= current_trans_row_conflict_count;
-  trans_row_reject_count+= current_trans_row_reject_count;
-  trans_in_conflict_count+= current_trans_in_conflict_count;
-
-  if (current_trans_in_conflict_count)
-    trans_conflict_commit_count++;
-
-  if (current_max_rep_epoch > max_rep_epoch)
-  {
-    DBUG_PRINT("info", ("Max replicated epoch increases from %llu to %llu",
-                        max_rep_epoch,
-                        current_max_rep_epoch));
-    max_rep_epoch = current_max_rep_epoch;
-  }
-
-  resetPerAttemptCounters();
-
-  /* Clear per-epoch-transaction retry_trans_count */
-  retry_trans_count = 0;
-}
-
-/**
-   atApplyStatusWrite
-
-   Called by Slave SQL thread when applying an event to the
-   ndb_apply_status table
-*/
-void
-st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id,
-                                       Uint32 row_server_id,
-                                       Uint64 row_epoch,
-                                       bool is_row_server_id_local)
-{
-  if (row_server_id == master_server_id)
-  {
-    /*
-       WRITE_ROW to ndb_apply_status injected by MySQLD
-       immediately upstream of us.
-       Record epoch
-    */
-    current_master_server_epoch = row_epoch;
-    assert(! is_row_server_id_local);
-  }
-  else if (is_row_server_id_local)
-  {
-    DBUG_PRINT("info", ("Recording application of local server %u epoch %llu "
-                        " which is %s.",
-                        row_server_id, row_epoch,
-                        (row_epoch > g_ndb_slave_state.current_max_rep_epoch)?
-                        " new highest." : " older than previously applied"));
-    if (row_epoch > current_max_rep_epoch)
-    {
-      /*
-        Store new highest epoch in thdvar.  If we commit successfully
-        then this can become the new global max
-      */
-      current_max_rep_epoch = row_epoch;
-    }
-  }
-}
-
-/**
-   atResetSlave()
-
-   Called when RESET SLAVE command issued - in context of command client.
-*/
-void
-st_ndb_slave_state::atResetSlave()
-{
-  /* Reset the Maximum replicated epoch vars
-   * on slave reset
-   * No need to touch the sql_run_id as that
-   * will increment if the slave is started
-   * again.
-   */
-  resetPerAttemptCounters();
-
-  retry_trans_count = 0;
-  max_rep_epoch = 0;
-}
-
-/**
-   atStartSlave()
-
-   Called by Slave SQL thread when first applying a row to Ndb after
-   a START SLAVE command.
-*/
-void
-st_ndb_slave_state::atStartSlave()
-{
-#ifdef HAVE_NDB_BINLOG
-  if (trans_conflict_apply_state != SAS_NORMAL)
-  {
-    /*
-      Remove conflict handling state on a SQL thread
-      restart
-    */
-    atEndTransConflictHandling();
-    trans_conflict_apply_state = SAS_NORMAL;
-  }
-#endif
-};
 
 #ifdef HAVE_NDB_BINLOG
 
 /**
-   atBeginTransConflictHandling()
-
-   Called by Slave SQL thread when it determines that Transactional
-   Conflict handling is required
-*/
-void
-st_ndb_slave_state::atBeginTransConflictHandling()
-{
-  DBUG_ENTER("atBeginTransConflictHandling");
-  /*
-     Allocate and initialise Transactional Conflict
-     Resolution Handling Structures
-  */
-  assert(trans_dependency_tracker == NULL);
-  trans_dependency_tracker = DependencyTracker::newDependencyTracker(&conflict_mem_root);
-  DBUG_VOID_RETURN;
-};
-
-/**
-   atPrepareConflictDetection
-
-   Called by Slave SQL thread prior to defining an operation on
-   a table with conflict detection defined.
-*/
-int
-st_ndb_slave_state::atPrepareConflictDetection(const NdbDictionary::Table* table,
-                                               const NdbRecord* key_rec,
-                                               const uchar* row_data,
-                                               Uint64 transaction_id,
-                                               bool& handle_conflict_now)
-{
-  DBUG_ENTER("atPrepareConflictDetection");
-  /*
-    Slave is preparing to apply an operation with conflict detection.
-    If we're performing Transactional Conflict Resolution, take
-    extra steps
-  */
-  switch( trans_conflict_apply_state )
-  {
-  case SAS_NORMAL:
-    DBUG_PRINT("info", ("SAS_NORMAL : No special handling"));
-    /* No special handling */
-    break;
-  case SAS_TRACK_TRANS_DEPENDENCIES:
-  {
-    DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Tracking operation"));
-    /*
-      Track this operation and its transaction id, to determine
-      inter-transaction dependencies by {table, primary key}
-    */
-    assert( trans_dependency_tracker );
-
-    int res = trans_dependency_tracker
-      ->track_operation(table,
-                        key_rec,
-                        row_data,
-                        transaction_id);
-    if (res != 0)
-    {
-      sql_print_error("%s", trans_dependency_tracker->get_error_text());
-      DBUG_RETURN(res);
-    }
-    /* Proceed as normal */
-    break;
-  }
-  case SAS_APPLY_TRANS_DEPENDENCIES:
-  {
-    DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Deciding whether to apply"));
-    /*
-       Check if this operation's transaction id is marked in-conflict.
-       If it is, we tell the caller to perform conflict resolution now instead
-       of attempting to apply the operation.
-    */
-    assert( trans_dependency_tracker );
-
-    if (trans_dependency_tracker->in_conflict(transaction_id))
-    {
-      DBUG_PRINT("info", ("Event for transaction %llu is conflicting.  Handling.",
-                          transaction_id));
-      current_trans_row_reject_count++;
-      handle_conflict_now = true;
-      DBUG_RETURN(0);
-    }
-
-    /*
-       This transaction is not marked in-conflict, so continue with normal
-       processing.
-       Note that normal processing may subsequently detect a conflict which
-       didn't exist at the time of the previous TRACK_DEPENDENCIES pass.
-       In this case, we will rollback and repeat the TRACK_DEPENDENCIES
-       stage.
-    */
-    DBUG_PRINT("info", ("Event for transaction %llu is OK, applying",
-                        transaction_id));
-    break;
-  }
-  }
-  DBUG_RETURN(0);
-}
-
-/**
-   atTransConflictDetected
-
-   Called by the Slave SQL thread when a conflict is detected on
-   an executed operation.
-*/
-int
-st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id)
-{
-  DBUG_ENTER("atTransConflictDetected");
-
-  /*
-     The Slave has detected a conflict on an operation applied
-     to a table with Transactional Conflict Resolution defined.
-     Handle according to current state.
-  */
-  conflict_flags |= SCS_TRANS_CONFLICT_DETECTED_THIS_PASS;
-  current_trans_row_conflict_count++;
-
-  switch (trans_conflict_apply_state)
-  {
-  case SAS_NORMAL:
-  {
-    DBUG_PRINT("info", ("SAS_NORMAL : Conflict on op on table with trans detection."
-                        "Requires multi-pass resolution.  Will transition to "
-                        "SAS_TRACK_TRANS_DEPENDENCIES at Commit."));
-    /*
-      Conflict on table with transactional conflict resolution
-      defined.
-      This is the trigger that we will do transactional conflict
-      resolution.
-      Record that we need to do multiple passes to correctly
-      perform resolution.
-      TODO : Early exit from applying epoch?
-    */
-    break;
-  }
-  case SAS_TRACK_TRANS_DEPENDENCIES:
-  {
-    DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Operation in transaction %llu "
-                        "had conflict",
-                        transaction_id));
-    /*
-       Conflict on table with transactional conflict resolution
-       defined.
-       We will mark the operation's transaction_id as in-conflict,
-       so that any other operations on the transaction are also
-       considered in-conflict, and any dependent transactions are also
-       considered in-conflict.
-    */
-    assert(trans_dependency_tracker != NULL);
-    int res = trans_dependency_tracker
-      ->mark_conflict(transaction_id);
-
-    if (res != 0)
-    {
-      sql_print_error("%s", trans_dependency_tracker->get_error_text());
-      DBUG_RETURN(res);
-    }
-    break;
-  }
-  case SAS_APPLY_TRANS_DEPENDENCIES:
-  {
-    /*
-       This must be a new conflict, not noticed on the previous
-       pass.
-    */
-    DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Conflict detected.  "
-                        "Must be further conflict.  Will return to "
-                        "SAS_TRACK_TRANS_DEPENDENCIES state at commit."));
-    // TODO : Early exit from applying epoch
-    break;
-  }
-  default:
-    break;
-  }
-
-  DBUG_RETURN(0);
-}
-
-/**
-   atConflictPreCommit
-
-   Called by the Slave SQL thread prior to committing a Slave transaction.
-   This method can request that the Slave transaction is retried.
-
-
-   State transitions :
-
-                       START SLAVE /
-                       RESET SLAVE /
-                        STARTUP
-                            |
-                            |
-                            v
-                    ****************
-                    *  SAS_NORMAL  *
-                    ****************
-                       ^       |
-    No transactional   |       | Conflict on transactional table
-       conflicts       |       | (Rollback)
-       (Commit)        |       |
-                       |       v
-            **********************************
-            *  SAS_TRACK_TRANS_DEPENDENCIES  *
-            **********************************
-               ^          I              ^
-     More      I          I Dependencies |
-    conflicts  I          I determined   | No new conflicts
-     found     I          I (Rollback)   | (Commit)
-    (Rollback) I          I              |
-               I          v              |
-           **********************************
-           *  SAS_APPLY_TRANS_DEPENDENCIES  *
-           **********************************
-
-
-   Operation
-     The initial state is SAS_NORMAL.
-
-     On detecting a conflict on a transactional conflict detetecing table,
-     SAS_TRACK_TRANS_DEPENDENCIES is entered, and the epoch transaction is
-     rolled back and reapplied.
-
-     In SAS_TRACK_TRANS_DEPENDENCIES state, transaction dependencies and
-     conflicts are tracked as the epoch transaction is applied.
-
-     Then the Slave transitions to SAS_APPLY_TRANS_DEPENDENCIES state, and
-     the epoch transaction is rolled back and reapplied.
-
-     In the SAS_APPLY_TRANS_DEPENDENCIES state, operations for transactions
-     marked as in-conflict are not applied.
-
-     If this results in no new conflicts, the epoch transaction is committed,
-     and the SAS_TRACK_TRANS_DEPENDENCIES state is re-entered for processing
-     the next replicated epch transaction.
-     If it results in new conflicts, the epoch transactions is rolled back, and
-     the SAS_TRACK_TRANS_DEPENDENCIES state is re-entered again, to determine
-     the new set of dependencies.
-
-     If no conflicts are found in the SAS_TRACK_TRANS_DEPENDENCIES state, then
-     the epoch transaction is committed, and the Slave transitions to SAS_NORMAL
-     state.
-
-
-   Properties
-     1) Normally, there is no transaction dependency tracking overhead paid by
-        the slave.
-
-     2) On first detecting a transactional conflict, the epoch transaction must be
-        applied at least three times, with two rollbacks.
-
-     3) Transactional conflicts detected in subsequent epochs require the epoch
-        transaction to be applied two times, with one rollback.
-
-     4) A loop between states SAS_TRACK_TRANS_DEPENDENCIES and SAS_APPLY_TRANS_
-        DEPENDENCIES occurs when further transactional conflicts are discovered
-        in SAS_APPLY_TRANS_DEPENDENCIES state.  This implies that the  conflicts
-        discovered in the SAS_TRACK_TRANS_DEPENDENCIES state must not be complete,
-        so we revisit that state to get a more complete picture.
-
-     5) The number of iterations of this loop is fixed to a hard coded limit, after
-        which the Slave will stop with an error.  This should be an unlikely
-        occurrence, as it requires not just n conflicts, but at least 1 new conflict
-        appearing between the transactions in the epoch transaction and the
-        database between the two states, n times in a row.
-
-     6) Where conflicts are occasional, as expected, the post-commit transition to
-        SAS_TRACK_TRANS_DEPENDENCIES rather than SAS_NORMAL results in one epoch
-        transaction having its transaction dependencies needlessly tracked.
-
-*/
-int
-st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans)
-{
-  DBUG_ENTER("atConflictPreCommit");
-
-  /*
-    Prior to committing a Slave transaction, we check whether
-    Transactional conflicts have been detected which require
-    us to retry the slave transaction
-  */
-  retry_slave_trans = false;
-  switch(trans_conflict_apply_state)
-  {
-  case SAS_NORMAL:
-  {
-    DBUG_PRINT("info", ("SAS_NORMAL"));
-    /*
-       Normal case.  Only if we defined conflict detection on a table
-       with transactional conflict detection, and saw conflicts (on any table)
-       do we go to another state
-     */
-    if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS)
-    {
-      DBUG_PRINT("info", ("Conflict(s) detected this pass, transitioning to "
-                          "SAS_TRACK_TRANS_DEPENDENCIES."));
-      assert(conflict_flags & SCS_OPS_DEFINED);
-      /* Transactional conflict resolution required, switch state */
-      atBeginTransConflictHandling();
-      resetPerAttemptCounters();
-      trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES;
-      retry_slave_trans = true;
-    }
-    break;
-  }
-  case SAS_TRACK_TRANS_DEPENDENCIES:
-  {
-    DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES"));
-
-    if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS)
-    {
-      /*
-         Conflict on table with transactional detection
-         this pass, we have collected the details and
-         dependencies, now transition to
-         SAS_APPLY_TRANS_DEPENDENCIES and
-         reapply the epoch transaction without the
-         conflicting transactions.
-      */
-      assert(conflict_flags & SCS_OPS_DEFINED);
-      DBUG_PRINT("info", ("Transactional conflicts, transitioning to "
-                          "SAS_APPLY_TRANS_DEPENDENCIES"));
-
-      trans_conflict_apply_state = SAS_APPLY_TRANS_DEPENDENCIES;
-      trans_detect_iter_count++;
-      retry_slave_trans = true;
-      break;
-    }
-    else
-    {
-      /*
-         No transactional conflicts detected this pass, lets
-         return to SAS_NORMAL state after commit for more efficient
-         application of epoch transactions
-      */
-      DBUG_PRINT("info", ("No transactional conflicts, transitioning to "
-                          "SAS_NORMAL"));
-      atEndTransConflictHandling();
-      trans_conflict_apply_state = SAS_NORMAL;
-      break;
-    }
-  }
-  case SAS_APPLY_TRANS_DEPENDENCIES:
-  {
-    DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES"));
-    assert(conflict_flags & SCS_OPS_DEFINED);
-    /*
-       We've applied the Slave epoch transaction subject to the
-       conflict detection.  If any further transactional
-       conflicts have been observed, then we must repeat the
-       process.
-    */
-    atEndTransConflictHandling();
-    atBeginTransConflictHandling();
-    trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES;
-
-    if (unlikely(conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS))
-    {
-      DBUG_PRINT("info", ("Further conflict(s) detected, repeating the "
-                          "TRACK_TRANS_DEPENDENCIES pass"));
-      /*
-         Further conflict observed when applying, need
-         to re-determine dependencies
-      */
-      resetPerAttemptCounters();
-      retry_slave_trans = true;
-      break;
-    }
-
-
-    DBUG_PRINT("info", ("No further conflicts detected, committing and "
-                        "returning to SAS_TRACK_TRANS_DEPENDENCIES state"));
-    /*
-       With dependencies taken into account, no further
-       conflicts detected, can now proceed to commit
-    */
-    break;
-  }
-  }
-
-  /*
-    Clear conflict flags, to ensure that we detect any new conflicts
-  */
-  conflict_flags = 0;
-
-  if (retry_slave_trans)
-  {
-    DBUG_PRINT("info", ("Requesting transaction restart"));
-    DBUG_RETURN(1);
-  }
-
-  DBUG_PRINT("info", ("Allowing commit to proceed"));
-  DBUG_RETURN(0);
-}
-
-/**
-   atEndTransConflictHandling
-
-   Called when transactional conflict handling has completed.
-*/
-void
-st_ndb_slave_state::atEndTransConflictHandling()
-{
-  DBUG_ENTER("atEndTransConflictHandling");
-  /* Release any conflict handling state */
-  if (trans_dependency_tracker)
-  {
-    current_trans_in_conflict_count =
-      trans_dependency_tracker->get_conflict_count();
-    trans_dependency_tracker = NULL;
-    free_root(&conflict_mem_root, MY_MARK_BLOCKS_FREE);
-  }
-  DBUG_VOID_RETURN;
-};
-
-/**
    prepare_conflict_detection
 
    This method is called during operation definition by the slave,
@@ -5202,8 +4590,7 @@ ha_ndbcluster::prepare_conflict_detectio
 */
 
 int
-handle_conflict_op_error(Thd_ndb* thd_ndb,
-                         NdbTransaction* trans,
+handle_conflict_op_error(NdbTransaction* trans,
                          const NdbError& err,
                          const NdbOperation* op)
 {
@@ -5954,92 +5341,40 @@ handle_row_conflict(NDB_CONFLICT_FN_SHAR
   }
 
   if (cfn_share &&
-      cfn_share->m_ex_tab != NULL)
+      cfn_share->m_ex_tab_writer.hasTable())
   {
     NdbError err;
-    assert(err.code == 0);
-    do
+    if (cfn_share->m_ex_tab_writer.writeRow(conflict_trans,
+                                            key_rec,
+                                            ::server_id,
+                                            ndb_mi_get_master_server_id(),
+                                            g_ndb_slave_state.current_master_server_epoch,
+                                            pk_row,
+                                            err) != 0)
     {
-      /* Have exceptions table, add row to it */
-      const NDBTAB *ex_tab= cfn_share->m_ex_tab;
-
-      /* get insert op */
-      NdbOperation *ex_op= conflict_trans->getNdbOperation(ex_tab);
-      if (ex_op == NULL)
+      if (err.code != 0)
       {
-        err= conflict_trans->getNdbError();
-        break;
-      }
-      if (ex_op->insertTuple() == -1)
-      {
-        err= ex_op->getNdbError();
-        break;
-      }
-      {
-        uint32 server_id= (uint32)::server_id;
-        uint32 master_server_id= (uint32) ndb_mi_get_master_server_id();
-        uint64 master_epoch= (uint64) g_ndb_slave_state.current_master_server_epoch;
-        uint32 count= (uint32)++(cfn_share->m_count);
-        if (ex_op->setValue((Uint32)0, (const char *)&(server_id)) ||
-            ex_op->setValue((Uint32)1, (const char *)&(master_server_id)) ||
-            ex_op->setValue((Uint32)2, (const char *)&(master_epoch)) ||
-            ex_op->setValue((Uint32)3, (const char *)&(count)))
+        if (err.status == NdbError::TemporaryError)
         {
-          err= ex_op->getNdbError();
-          break;
+          /* Slave will roll back and retry entire transaction. */
+          ERR_RETURN(err);
         }
-      }
-      /* copy primary keys */
-      {
-        const int fixed_cols= 4;
-        int nkey= cfn_share->m_pk_cols;
-        int k;
-        for (k= 0; k < nkey; k++)
+        else
         {
-          DBUG_ASSERT(pk_row != NULL);
-          const uchar* data=
-            (const uchar*) NdbDictionary::getValuePtr(key_rec,
-                                                      (const char*) pk_row,
-                                                      cfn_share->m_key_attrids[k]);
-          if (ex_op->setValue((Uint32)(fixed_cols + k), (const char*)data) == -1)
-          {
-            err= ex_op->getNdbError();
-            break;
-          }
+          char msg[FN_REFLEN];
+          my_snprintf(msg, sizeof(msg), "%s conflict handling "
+                      "on table %s hit Ndb error %d '%s'",
+                      handling_type,
+                      table_name,
+                      err.code,
+                      err.message);
+          push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+                              ER_EXCEPTIONS_WRITE_ERROR,
+                              ER(ER_EXCEPTIONS_WRITE_ERROR), msg);
+          /* Slave will stop replication. */
+          DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR);
         }
       }
-    } while (0);
-
-    if (err.code != 0)
-    {
-      char msg[FN_REFLEN];
-      my_snprintf(msg, sizeof(msg), "%s conflict handling "
-                  "on table %s hit Ndb error %d '%s'",
-                  handling_type,
-                  table_name,
-                  err.code,
-                  err.message);
-
-      if (err.classification == NdbError::SchemaError)
-      {
-        /* Something up with Exceptions table schema, forget it */
-        NdbDictionary::Dictionary* dict= conflict_trans->getNdb()->getDictionary();
-        dict->removeTableGlobal(*(cfn_share->m_ex_tab), false);
-        cfn_share->m_ex_tab= NULL;
-      }
-      else if (err.status == NdbError::TemporaryError)
-      {
-        /* Slave will roll back and retry entire transaction. */
-        ERR_RETURN(err);
-      }
-      else
-      {
-        push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
-                            ER_EXCEPTIONS_WRITE_ERROR,
-                            ER(ER_EXCEPTIONS_WRITE_ERROR), msg);
-        /* Slave will stop replication. */
-        DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR);
-      }
     }
   } /* if (cfn_share->m_ex_tab != NULL) */
 
@@ -8814,6 +8149,7 @@ int ndbcluster_commit(handlerton *hton, 
      * an execute(NoCommit) before committing, as conflict op handling
      * is done by execute(NoCommit)
      */
+    /* TODO : Add as function */
     if (g_ndb_slave_state.conflict_flags & SCS_OPS_DEFINED)
     {
       if (thd_ndb->m_unsent_bytes)
@@ -9948,6 +9284,19 @@ int ha_ndbcluster::create(const char *na
   DBUG_ASSERT(*fn_rext((char*)name) == 0);
   set_dbname(name);
   set_tabname(name);
+  
+  /*
+    Check that database name and table name will fit within limits
+  */
+  if (strlen(m_dbname) > NDB_MAX_DDL_NAME_BYTESIZE ||
+      strlen(m_tabname) > NDB_MAX_DDL_NAME_BYTESIZE)
+  {
+    my_errno= ER_TOO_LONG_IDENT;
+    push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+                        ER_TOO_LONG_IDENT,
+                        "Ndb has an internal limit of %u bytes on the size of schema identifiers", NDB_MAX_DDL_NAME_BYTESIZE);
+    DBUG_RETURN(my_errno);
+  }
 
   if ((my_errno= check_ndb_connection(thd)))
     DBUG_RETURN(my_errno);
@@ -10046,6 +9395,7 @@ int ha_ndbcluster::create(const char *na
   /* Reset database name */
   ndb->setDatabaseName(m_dbname);
 
+  /* TODO : Add as per conflict function 'virtual' */
   /* Use ndb_replication information as required */
   if (conflict_fn != NULL)
   {
@@ -12312,41 +11662,41 @@ ndbcluster_find_files(handlerton *hton, 
 
   if (thd == injector_thd)
   {
-    /*
-      Don't delete anything when called from
-      the binlog thread. This is a kludge to avoid
-      that something is deleted when "Ndb schema dist"
-      uses find_files() to check for "local tables in db"
-    */
-  }
-  else
-  {
-    /*
-      Delete old files
-      (.frm files with corresponding .ndb + does not exists in NDB)
-    */
-    List_iterator_fast<char> it3(delete_list);
-    while ((file_name_str= it3++))
-    {
-      DBUG_PRINT("info", ("Deleting local files for table '%s.%s'",
-                          db, file_name_str));
-
-      // Delete the table and its related files from disk
-      Ndb_local_schema::Table local_table(thd, db, file_name_str);
-      local_table.remove_table();
-
-      // Flush the table out of ndbapi's dictionary cache
-      Ndb_table_guard ndbtab_g(ndb->getDictionary(), file_name_str);
-      ndbtab_g.invalidate();
-
-      // Flush the table from table def. cache.
-      TABLE_LIST table_list;
-      memset(&table_list, 0, sizeof(table_list));
-      table_list.db= (char*)db;
-      table_list.alias= table_list.table_name= file_name_str;
-      close_cached_tables(thd, &table_list, false, 0);
-
-      DBUG_ASSERT(!thd->is_error());
+    /*
+      Don't delete anything when called from
+      the binlog thread. This is a kludge to avoid
+      that something is deleted when "Ndb schema dist"
+      uses find_files() to check for "local tables in db"
+    */
+  }
+  else
+  {
+    /*
+      Delete old files
+      (.frm files with corresponding .ndb + does not exists in NDB)
+    */
+    List_iterator_fast<char> it3(delete_list);
+    while ((file_name_str= it3++))
+    {
+      DBUG_PRINT("info", ("Deleting local files for table '%s.%s'",
+                          db, file_name_str));
+
+      // Delete the table and its related files from disk
+      Ndb_local_schema::Table local_table(thd, db, file_name_str);
+      local_table.remove_table();
+
+      // Flush the table out of ndbapi's dictionary cache
+      Ndb_table_guard ndbtab_g(ndb->getDictionary(), file_name_str);
+      ndbtab_g.invalidate();
+
+      // Flush the table from table def. cache.
+      TABLE_LIST table_list;
+      memset(&table_list, 0, sizeof(table_list));
+      table_list.db= (char*)db;
+      table_list.alias= table_list.table_name= file_name_str;
+      close_cached_tables(thd, &table_list, false, 0);
+
+      DBUG_ASSERT(!thd->is_error());
     }
   }
 
@@ -13851,7 +13201,6 @@ NDB_SHARE *ndbcluster_get_share(const ch
   DBUG_RETURN(share);
 }
 
-
 void ndbcluster_real_free_share(NDB_SHARE **share)
 {
   DBUG_ENTER("ndbcluster_real_free_share");

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2012-01-26 14:32:08 +0000
+++ b/sql/ha_ndbcluster.h	2012-02-23 12:37:59 +0000
@@ -21,12 +21,17 @@
 */
 
 
+/* DDL names have to fit in system table ndb_schema */
+#define NDB_MAX_DDL_NAME_BYTESIZE 63
+#define NDB_MAX_DDL_NAME_BYTESIZE_STR "63"
+
 /* Blob tables and events are internal to NDB and must never be accessed */
 #define IS_NDB_BLOB_PREFIX(A) is_prefix(A, "NDB$BLOB")
 
 #include <ndbapi/NdbApi.hpp>
 #include <ndbapi/ndbapi_limits.h>
 #include <kernel/ndb_limits.h>
+#include "ndb_conflict.h"
 
 #define NDB_IGNORE_VALUE(x) (void)x
 
@@ -110,99 +115,6 @@ public:
 #include "ndb_ndbapi_util.h"
 #include "ndb_share.h"
 
-enum enum_slave_trans_conflict_apply_state
-{
-  /* Normal with optional row-level conflict detection */
-  SAS_NORMAL,
-
-  /*
-    SAS_TRACK_TRANS_DEPENDENCIES
-    Track inter-transaction dependencies
-  */
-  SAS_TRACK_TRANS_DEPENDENCIES,
-
-  /*
-    SAS_APPLY_TRANS_DEPENDENCIES
-    Apply only non conflicting transactions
-  */
-  SAS_APPLY_TRANS_DEPENDENCIES
-};
-
-enum enum_slave_conflict_flags
-{
-  /* Conflict detection Ops defined */
-  SCS_OPS_DEFINED = 1,
-  /* Conflict detected on table with transactional resolution */
-  SCS_TRANS_CONFLICT_DETECTED_THIS_PASS = 2
-};
-
-/*
-  State associated with the Slave thread
-  (From the Ndb handler's point of view)
-*/
-struct st_ndb_slave_state
-{
-  /* Counter values for current slave transaction */
-  Uint32 current_violation_count[CFT_NUMBER_OF_CFTS];
-  Uint64 current_master_server_epoch;
-  Uint64 current_max_rep_epoch;
-  uint8 conflict_flags; /* enum_slave_conflict_flags */
-    /* Transactional conflict detection */
-  Uint32 retry_trans_count;
-  Uint32 current_trans_row_conflict_count;
-  Uint32 current_trans_row_reject_count;
-  Uint32 current_trans_in_conflict_count;
-
-  /* Cumulative counter values */
-  Uint64 total_violation_count[CFT_NUMBER_OF_CFTS];
-  Uint64 max_rep_epoch;
-  Uint32 sql_run_id;
-  /* Transactional conflict detection */
-  Uint64 trans_row_conflict_count;
-  Uint64 trans_row_reject_count;
-  Uint64 trans_detect_iter_count;
-  Uint64 trans_in_conflict_count;
-  Uint64 trans_conflict_commit_count;
-
-  static const Uint32 MAX_RETRY_TRANS_COUNT = 100;
-
-  /*
-    Slave Apply State
-
-    State of Binlog application from Ndb point of view.
-  */
-  enum_slave_trans_conflict_apply_state trans_conflict_apply_state;
-
-  MEM_ROOT conflict_mem_root;
-  class DependencyTracker* trans_dependency_tracker;
-
-  /* Methods */
-  void atStartSlave();
-  int  atPrepareConflictDetection(const NdbDictionary::Table* table,
-                                  const NdbRecord* key_rec,
-                                  const uchar* row_data,
-                                  Uint64 transaction_id,
-                                  bool& handle_conflict_now);
-  int  atTransConflictDetected(Uint64 transaction_id);
-  int  atConflictPreCommit(bool& retry_slave_trans);
-
-  void atBeginTransConflictHandling();
-  void atEndTransConflictHandling();
-
-  void atTransactionCommit();
-  void atTransactionAbort();
-  void atResetSlave();
-
-  void atApplyStatusWrite(Uint32 master_server_id,
-                          Uint32 row_server_id,
-                          Uint64 row_epoch,
-                          bool is_row_server_id_local);
-
-  void resetPerAttemptCounters();
-
-  st_ndb_slave_state();
-};
-
 struct Ndb_local_table_statistics {
   int no_uncommitted_rows_count;
   ulong last_count;

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2012-01-26 14:32:08 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2012-02-23 12:37:59 +0000
@@ -1022,8 +1022,12 @@ ndb_schema_table__create(THD *thd)
                              STRING_WITH_LEN("mysql"),
                              STRING_WITH_LEN("ndb_schema"),
                              // table_definition
-                             "db VARBINARY(63) NOT NULL,"
-                             "name VARBINARY(63) NOT NULL,"
+                             "db VARBINARY("
+                             NDB_MAX_DDL_NAME_BYTESIZE_STR
+                             ") NOT NULL,"
+                             "name VARBINARY("
+                             NDB_MAX_DDL_NAME_BYTESIZE_STR
+                             ") NOT NULL,"
                              "slock BINARY(32) NOT NULL,"
                              "query BLOB NOT NULL,"
                              "node_id INT UNSIGNED NOT NULL,"
@@ -4014,6 +4018,8 @@ slave_set_resolve_fn(THD *thd, NDB_SHARE
   cfn_share->m_resolve_column= field_index;
   cfn_share->m_flags = flags;
 
+  /* Init Exceptions Table Writer */
+  new (&cfn_share->m_ex_tab_writer) ExceptionsTableWriter();
   {
     /* get exceptions table */
     char ex_tab_name[FN_REFLEN];
@@ -4025,71 +4031,31 @@ slave_set_resolve_fn(THD *thd, NDB_SHARE
     const NDBTAB *ex_tab= ndbtab_g.get_table();
     if (ex_tab)
     {
-      const int fixed_cols= 4;
-      bool ok=
-        ex_tab->getNoOfColumns() >= fixed_cols &&
-        ex_tab->getNoOfPrimaryKeys() == 4 &&
-        /* server id */
-        ex_tab->getColumn(0)->getType() == NDBCOL::Unsigned &&
-        ex_tab->getColumn(0)->getPrimaryKey() &&
-        /* master_server_id */
-        ex_tab->getColumn(1)->getType() == NDBCOL::Unsigned &&
-        ex_tab->getColumn(1)->getPrimaryKey() &&
-        /* master_epoch */
-        ex_tab->getColumn(2)->getType() == NDBCOL::Bigunsigned &&
-        ex_tab->getColumn(2)->getPrimaryKey() &&
-        /* count */
-        ex_tab->getColumn(3)->getType() == NDBCOL::Unsigned &&
-        ex_tab->getColumn(3)->getPrimaryKey();
-      if (ok)
-      {
-        int ncol= ndbtab->getNoOfColumns();
-        int nkey= ndbtab->getNoOfPrimaryKeys();
-        int i, k;
-        for (i= k= 0; i < ncol && k < nkey; i++)
-        {
-          const NdbDictionary::Column* col= ndbtab->getColumn(i);
-          if (col->getPrimaryKey())
-          {
-            const NdbDictionary::Column* ex_col=
-              ex_tab->getColumn(fixed_cols + k);
-            ok=
-              ex_col != NULL &&
-              col->getType() == ex_col->getType() &&
-              col->getLength() == ex_col->getLength() &&
-              col->getNullable() == ex_col->getNullable();
-            if (!ok)
-              break;
-            /*
-               Store mapping of Exception table key# to
-               orig table attrid
-            */
-            cfn_share->m_key_attrids[k]= i;
-            k++;
-          }
-        }
-        if (ok)
+      char msgBuf[ FN_REFLEN ];
+      const char* msg = NULL;
+      if (cfn_share->m_ex_tab_writer.init(ndbtab,
+                                          ex_tab,
+                                          msgBuf,
+                                          sizeof(msgBuf),
+                                          &msg) == 0)
+      {
+        /* Ok */
+        /* Hold our table reference outside the table_guard scope */
+        ndbtab_g.release();
+        if (opt_ndb_extra_logging)
         {
-          cfn_share->m_ex_tab= ex_tab;
-          cfn_share->m_pk_cols= nkey;
-          ndbtab_g.release();
-          if (opt_ndb_extra_logging)
-            sql_print_information("NDB Slave: Table %s.%s logging exceptions to %s.%s",
-                                  share->db,
-                                  share->table_name,
-                                  share->db,
-                                  ex_tab_name);
+          sql_print_information("NDB Slave: Table %s.%s logging exceptions to %s.%s",
+                                share->db,
+                                share->table_name,
+                                share->db,
+                                ex_tab_name);
         }
-        else
-          sql_print_warning("NDB Slave: exceptions table %s has wrong "
-                            "definition (column %d)",
-                            ex_tab_name, fixed_cols + k);
       }
       else
-        sql_print_warning("NDB Slave: exceptions table %s has wrong "
-                          "definition (initial %d columns)",
-                          ex_tab_name, fixed_cols);
-    }
+      {
+        sql_print_warning("%s", msg);
+      }
+    } /* if (ex_tab) */
   }
   DBUG_RETURN(0);
 }

=== added file 'sql/ndb_conflict.cc'
--- a/sql/ndb_conflict.cc	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_conflict.cc	2012-01-31 13:19:19 +0000
@@ -0,0 +1,805 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+#include <my_global.h> /* For config defines */
+
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
+/* distcheck does not compile from here... */
+
+#include "ha_ndbcluster_glue.h"
+#include "ndb_conflict.h"
+
+#ifdef HAVE_NDB_BINLOG
+
+#define NDBTAB NdbDictionary::Table
+#define NDBCOL NdbDictionary::Column
+
+int
+ExceptionsTableWriter::init(const NdbDictionary::Table* mainTable,
+                            const NdbDictionary::Table* exceptionsTable,
+                            char* msg_buf,
+                            uint msg_buf_len,
+                            const char** msg)
+{
+  const char* ex_tab_name = exceptionsTable->getName();
+  const int fixed_cols= 4;
+  bool ok=
+    exceptionsTable->getNoOfColumns() >= fixed_cols &&
+    exceptionsTable->getNoOfPrimaryKeys() == 4 &&
+    /* server id */
+    exceptionsTable->getColumn(0)->getType() == NDBCOL::Unsigned &&
+    exceptionsTable->getColumn(0)->getPrimaryKey() &&
+    /* master_server_id */
+    exceptionsTable->getColumn(1)->getType() == NDBCOL::Unsigned &&
+    exceptionsTable->getColumn(1)->getPrimaryKey() &&
+    /* master_epoch */
+    exceptionsTable->getColumn(2)->getType() == NDBCOL::Bigunsigned &&
+    exceptionsTable->getColumn(2)->getPrimaryKey() &&
+    /* count */
+    exceptionsTable->getColumn(3)->getType() == NDBCOL::Unsigned &&
+    exceptionsTable->getColumn(3)->getPrimaryKey();
+  if (ok)
+  {
+    int ncol= mainTable->getNoOfColumns();
+    int nkey= mainTable->getNoOfPrimaryKeys();
+    int i, k;
+    for (i= k= 0; i < ncol && k < nkey; i++)
+    {
+      const NdbDictionary::Column* col= mainTable->getColumn(i);
+      if (col->getPrimaryKey())
+      {
+        const NdbDictionary::Column* ex_col=
+          exceptionsTable->getColumn(fixed_cols + k);
+        ok=
+          ex_col != NULL &&
+          col->getType() == ex_col->getType() &&
+          col->getLength() == ex_col->getLength() &&
+          col->getNullable() == ex_col->getNullable();
+        if (!ok)
+          break;
+        /*
+          Store mapping of Exception table key# to
+          orig table attrid
+        */
+        m_key_attrids[k]= i;
+        k++;
+      }
+    }
+    if (ok)
+    {
+      m_ex_tab= exceptionsTable;
+      m_pk_cols= nkey;
+      return 0;
+    }
+    else
+      my_snprintf(msg_buf, msg_buf_len,
+                  "NDB Slave: exceptions table %s has wrong "
+                  "definition (column %d)",
+                  ex_tab_name, fixed_cols + k);
+  }
+  else
+    my_snprintf(msg_buf, msg_buf_len,
+                "NDB Slave: exceptions table %s has wrong "
+                "definition (initial %d columns)",
+                ex_tab_name, fixed_cols);
+
+  *msg = msg_buf;
+  return -1;
+}
+
+void
+ExceptionsTableWriter::free(Ndb* ndb)
+{
+  if (m_ex_tab)
+  {
+    NdbDictionary::Dictionary* dict = ndb->getDictionary();
+    dict->removeTableGlobal(*m_ex_tab, 0);
+    m_ex_tab= 0;
+  }
+}
+
+int
+ExceptionsTableWriter::writeRow(NdbTransaction* trans,
+                                const NdbRecord* keyRecord,
+                                uint32 server_id,
+                                uint32 master_server_id,
+                                uint64 master_epoch,
+                                const uchar* rowPtr,
+                                NdbError& err)
+{
+  DBUG_ENTER("ExceptionsTableWriter::writeRow");
+  assert(err.code == 0);
+  do
+  {
+    /* Have exceptions table, add row to it */
+    const NDBTAB *ex_tab= m_ex_tab;
+
+    /* get insert op */
+    NdbOperation *ex_op= trans->getNdbOperation(ex_tab);
+    if (ex_op == NULL)
+    {
+      err= trans->getNdbError();
+      break;
+    }
+    if (ex_op->insertTuple() == -1)
+    {
+      err= ex_op->getNdbError();
+      break;
+    }
+    {
+      uint32 count= (uint32)++m_count;
+      if (ex_op->setValue((Uint32)0, (const char *)&(server_id)) ||
+          ex_op->setValue((Uint32)1, (const char *)&(master_server_id)) ||
+          ex_op->setValue((Uint32)2, (const char *)&(master_epoch)) ||
+          ex_op->setValue((Uint32)3, (const char *)&(count)))
+      {
+        err= ex_op->getNdbError();
+        break;
+      }
+    }
+    /* copy primary keys */
+    {
+      const int fixed_cols= 4;
+      int nkey= m_pk_cols;
+      int k;
+      for (k= 0; k < nkey; k++)
+      {
+        DBUG_ASSERT(rowPtr != NULL);
+        const uchar* data=
+          (const uchar*) NdbDictionary::getValuePtr(keyRecord,
+                                                    (const char*) rowPtr,
+                                                    m_key_attrids[k]);
+        if (ex_op->setValue((Uint32)(fixed_cols + k), (const char*)data) == -1)
+        {
+          err= ex_op->getNdbError();
+          break;
+        }
+      }
+    }
+  } while (0);
+
+  if (err.code != 0)
+  {
+    if (err.classification == NdbError::SchemaError)
+    {
+      /* Something up with Exceptions table schema, forget it.
+       * No further exceptions will be recorded.
+       * TODO : Log this somehow
+       */
+      NdbDictionary::Dictionary* dict= trans->getNdb()->getDictionary();
+      dict->removeTableGlobal(*m_ex_tab, false);
+      m_ex_tab= NULL;
+      DBUG_RETURN(0);
+    }
+    DBUG_RETURN(-1);
+  }
+  DBUG_RETURN(0);
+}
+
+/* HAVE_NDB_BINLOG */
+#endif
+
+/**
+   st_ndb_slave_state constructor
+
+   Initialise Ndb Slave state object
+*/
+st_ndb_slave_state::st_ndb_slave_state()
+  : current_master_server_epoch(0),
+    current_max_rep_epoch(0),
+    conflict_flags(0),
+    retry_trans_count(0),
+    current_trans_row_conflict_count(0),
+    current_trans_row_reject_count(0),
+    current_trans_in_conflict_count(0),
+    max_rep_epoch(0),
+    sql_run_id(~Uint32(0)),
+    trans_row_conflict_count(0),
+    trans_row_reject_count(0),
+    trans_detect_iter_count(0),
+    trans_in_conflict_count(0),
+    trans_conflict_commit_count(0),
+    trans_conflict_apply_state(SAS_NORMAL),
+    trans_dependency_tracker(NULL)
+{
+  memset(current_violation_count, 0, sizeof(current_violation_count));
+  memset(total_violation_count, 0, sizeof(total_violation_count));
+
+  /* Init conflict handling state memroot */
+  const size_t CONFLICT_MEMROOT_BLOCK_SIZE = 32768;
+  init_alloc_root(&conflict_mem_root, CONFLICT_MEMROOT_BLOCK_SIZE, 0);
+};
+
+/**
+   resetPerAttemptCounters
+
+   Reset the per-epoch-transaction-application-attempt counters
+*/
+void
+st_ndb_slave_state::resetPerAttemptCounters()
+{
+  memset(current_violation_count, 0, sizeof(current_violation_count));
+  current_trans_row_conflict_count = 0;
+  current_trans_row_reject_count = 0;
+  current_trans_in_conflict_count = 0;
+
+  conflict_flags = 0;
+  current_max_rep_epoch = 0;
+}
+
+/**
+   atTransactionAbort()
+
+   Called by Slave SQL thread during transaction abort.
+*/
+void
+st_ndb_slave_state::atTransactionAbort()
+{
+  /* Reset current-transaction counters + state */
+  resetPerAttemptCounters();
+}
+
+
+
+/**
+   atTransactionCommit()
+
+   Called by Slave SQL thread after transaction commit
+*/
+void
+st_ndb_slave_state::atTransactionCommit()
+{
+  assert( ((trans_dependency_tracker == NULL) &&
+           (trans_conflict_apply_state == SAS_NORMAL)) ||
+          ((trans_dependency_tracker != NULL) &&
+           (trans_conflict_apply_state == SAS_TRACK_TRANS_DEPENDENCIES)) );
+  assert( trans_conflict_apply_state != SAS_APPLY_TRANS_DEPENDENCIES );
+
+  /* Merge committed transaction counters into total state
+   * Then reset current transaction counters
+   */
+  for (int i=0; i < CFT_NUMBER_OF_CFTS; i++)
+  {
+    total_violation_count[i]+= current_violation_count[i];
+  }
+  trans_row_conflict_count+= current_trans_row_conflict_count;
+  trans_row_reject_count+= current_trans_row_reject_count;
+  trans_in_conflict_count+= current_trans_in_conflict_count;
+
+  if (current_trans_in_conflict_count)
+    trans_conflict_commit_count++;
+
+  if (current_max_rep_epoch > max_rep_epoch)
+  {
+    DBUG_PRINT("info", ("Max replicated epoch increases from %llu to %llu",
+                        max_rep_epoch,
+                        current_max_rep_epoch));
+    max_rep_epoch = current_max_rep_epoch;
+  }
+
+  resetPerAttemptCounters();
+
+  /* Clear per-epoch-transaction retry_trans_count */
+  retry_trans_count = 0;
+}
+
+/**
+   atApplyStatusWrite
+
+   Called by Slave SQL thread when applying an event to the
+   ndb_apply_status table
+*/
+void
+st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id,
+                                       Uint32 row_server_id,
+                                       Uint64 row_epoch,
+                                       bool is_row_server_id_local)
+{
+  if (row_server_id == master_server_id)
+  {
+    /*
+       WRITE_ROW to ndb_apply_status injected by MySQLD
+       immediately upstream of us.
+       Record epoch
+    */
+    current_master_server_epoch = row_epoch;
+    assert(! is_row_server_id_local);
+  }
+  else if (is_row_server_id_local)
+  {
+    DBUG_PRINT("info", ("Recording application of local server %u epoch %llu "
+                        " which is %s.",
+                        row_server_id, row_epoch,
+                        (row_epoch > current_max_rep_epoch)?
+                        " new highest." : " older than previously applied"));
+    if (row_epoch > current_max_rep_epoch)
+    {
+      /*
+        Store new highest epoch in thdvar.  If we commit successfully
+        then this can become the new global max
+      */
+      current_max_rep_epoch = row_epoch;
+    }
+  }
+}
+
+/**
+   atResetSlave()
+
+   Called when RESET SLAVE command issued - in context of command client.
+*/
+void
+st_ndb_slave_state::atResetSlave()
+{
+  /* Reset the Maximum replicated epoch vars
+   * on slave reset
+   * No need to touch the sql_run_id as that
+   * will increment if the slave is started
+   * again.
+   */
+  resetPerAttemptCounters();
+
+  retry_trans_count = 0;
+  max_rep_epoch = 0;
+}
+
+
+/**
+   atStartSlave()
+
+   Called by Slave SQL thread when first applying a row to Ndb after
+   a START SLAVE command.
+*/
+void
+st_ndb_slave_state::atStartSlave()
+{
+#ifdef HAVE_NDB_BINLOG
+  if (trans_conflict_apply_state != SAS_NORMAL)
+  {
+    /*
+      Remove conflict handling state on a SQL thread
+      restart
+    */
+    atEndTransConflictHandling();
+    trans_conflict_apply_state = SAS_NORMAL;
+  }
+#endif
+};
+
+#ifdef HAVE_NDB_BINLOG
+
+/**
+   atEndTransConflictHandling
+
+   Called when transactional conflict handling has completed.
+*/
+void
+st_ndb_slave_state::atEndTransConflictHandling()
+{
+  DBUG_ENTER("atEndTransConflictHandling");
+  /* Release any conflict handling state */
+  if (trans_dependency_tracker)
+  {
+    current_trans_in_conflict_count =
+      trans_dependency_tracker->get_conflict_count();
+    trans_dependency_tracker = NULL;
+    free_root(&conflict_mem_root, MY_MARK_BLOCKS_FREE);
+  }
+  DBUG_VOID_RETURN;
+};
+
+/**
+   atBeginTransConflictHandling()
+
+   Called by Slave SQL thread when it determines that Transactional
+   Conflict handling is required
+*/
+void
+st_ndb_slave_state::atBeginTransConflictHandling()
+{
+  DBUG_ENTER("atBeginTransConflictHandling");
+  /*
+     Allocate and initialise Transactional Conflict
+     Resolution Handling Structures
+  */
+  assert(trans_dependency_tracker == NULL);
+  trans_dependency_tracker = DependencyTracker::newDependencyTracker(&conflict_mem_root);
+  DBUG_VOID_RETURN;
+};
+
+/**
+   atPrepareConflictDetection
+
+   Called by Slave SQL thread prior to defining an operation on
+   a table with conflict detection defined.
+*/
+int
+st_ndb_slave_state::atPrepareConflictDetection(const NdbDictionary::Table* table,
+                                               const NdbRecord* key_rec,
+                                               const uchar* row_data,
+                                               Uint64 transaction_id,
+                                               bool& handle_conflict_now)
+{
+  DBUG_ENTER("atPrepareConflictDetection");
+  /*
+    Slave is preparing to apply an operation with conflict detection.
+    If we're performing Transactional Conflict Resolution, take
+    extra steps
+  */
+  switch( trans_conflict_apply_state )
+  {
+  case SAS_NORMAL:
+    DBUG_PRINT("info", ("SAS_NORMAL : No special handling"));
+    /* No special handling */
+    break;
+  case SAS_TRACK_TRANS_DEPENDENCIES:
+  {
+    DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Tracking operation"));
+    /*
+      Track this operation and its transaction id, to determine
+      inter-transaction dependencies by {table, primary key}
+    */
+    assert( trans_dependency_tracker );
+
+    int res = trans_dependency_tracker
+      ->track_operation(table,
+                        key_rec,
+                        row_data,
+                        transaction_id);
+    if (res != 0)
+    {
+      sql_print_error("%s", trans_dependency_tracker->get_error_text());
+      DBUG_RETURN(res);
+    }
+    /* Proceed as normal */
+    break;
+  }
+  case SAS_APPLY_TRANS_DEPENDENCIES:
+  {
+    DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Deciding whether to apply"));
+    /*
+       Check if this operation's transaction id is marked in-conflict.
+       If it is, we tell the caller to perform conflict resolution now instead
+       of attempting to apply the operation.
+    */
+    assert( trans_dependency_tracker );
+
+    if (trans_dependency_tracker->in_conflict(transaction_id))
+    {
+      DBUG_PRINT("info", ("Event for transaction %llu is conflicting.  Handling.",
+                          transaction_id));
+      current_trans_row_reject_count++;
+      handle_conflict_now = true;
+      DBUG_RETURN(0);
+    }
+
+    /*
+       This transaction is not marked in-conflict, so continue with normal
+       processing.
+       Note that normal processing may subsequently detect a conflict which
+       didn't exist at the time of the previous TRACK_DEPENDENCIES pass.
+       In this case, we will rollback and repeat the TRACK_DEPENDENCIES
+       stage.
+    */
+    DBUG_PRINT("info", ("Event for transaction %llu is OK, applying",
+                        transaction_id));
+    break;
+  }
+  }
+  DBUG_RETURN(0);
+}
+
+/**
+   atTransConflictDetected
+
+   Called by the Slave SQL thread when a conflict is detected on
+   an executed operation.
+*/
+int
+st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id)
+{
+  DBUG_ENTER("atTransConflictDetected");
+
+  /*
+     The Slave has detected a conflict on an operation applied
+     to a table with Transactional Conflict Resolution defined.
+     Handle according to current state.
+  */
+  conflict_flags |= SCS_TRANS_CONFLICT_DETECTED_THIS_PASS;
+  current_trans_row_conflict_count++;
+
+  switch (trans_conflict_apply_state)
+  {
+  case SAS_NORMAL:
+  {
+    DBUG_PRINT("info", ("SAS_NORMAL : Conflict on op on table with trans detection."
+                        "Requires multi-pass resolution.  Will transition to "
+                        "SAS_TRACK_TRANS_DEPENDENCIES at Commit."));
+    /*
+      Conflict on table with transactional conflict resolution
+      defined.
+      This is the trigger that we will do transactional conflict
+      resolution.
+      Record that we need to do multiple passes to correctly
+      perform resolution.
+      TODO : Early exit from applying epoch?
+    */
+    break;
+  }
+  case SAS_TRACK_TRANS_DEPENDENCIES:
+  {
+    DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Operation in transaction %llu "
+                        "had conflict",
+                        transaction_id));
+    /*
+       Conflict on table with transactional conflict resolution
+       defined.
+       We will mark the operation's transaction_id as in-conflict,
+       so that any other operations on the transaction are also
+       considered in-conflict, and any dependent transactions are also
+       considered in-conflict.
+    */
+    assert(trans_dependency_tracker != NULL);
+    int res = trans_dependency_tracker
+      ->mark_conflict(transaction_id);
+
+    if (res != 0)
+    {
+      sql_print_error("%s", trans_dependency_tracker->get_error_text());
+      DBUG_RETURN(res);
+    }
+    break;
+  }
+  case SAS_APPLY_TRANS_DEPENDENCIES:
+  {
+    /*
+       This must be a new conflict, not noticed on the previous
+       pass.
+    */
+    DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Conflict detected.  "
+                        "Must be further conflict.  Will return to "
+                        "SAS_TRACK_TRANS_DEPENDENCIES state at commit."));
+    // TODO : Early exit from applying epoch
+    break;
+  }
+  default:
+    break;
+  }
+
+  DBUG_RETURN(0);
+}
+
+/**
+   atConflictPreCommit
+
+   Called by the Slave SQL thread prior to committing a Slave transaction.
+   This method can request that the Slave transaction is retried.
+
+
+   State transitions :
+
+                       START SLAVE /
+                       RESET SLAVE /
+                        STARTUP
+                            |
+                            |
+                            v
+                    ****************
+                    *  SAS_NORMAL  *
+                    ****************
+                       ^       |
+    No transactional   |       | Conflict on transactional table
+       conflicts       |       | (Rollback)
+       (Commit)        |       |
+                       |       v
+            **********************************
+            *  SAS_TRACK_TRANS_DEPENDENCIES  *
+            **********************************
+               ^          I              ^
+     More      I          I Dependencies |
+    conflicts  I          I determined   | No new conflicts
+     found     I          I (Rollback)   | (Commit)
+    (Rollback) I          I              |
+               I          v              |
+           **********************************
+           *  SAS_APPLY_TRANS_DEPENDENCIES  *
+           **********************************
+
+
+   Operation
+     The initial state is SAS_NORMAL.
+
+     On detecting a conflict on a transactional conflict detetecing table,
+     SAS_TRACK_TRANS_DEPENDENCIES is entered, and the epoch transaction is
+     rolled back and reapplied.
+
+     In SAS_TRACK_TRANS_DEPENDENCIES state, transaction dependencies and
+     conflicts are tracked as the epoch transaction is applied.
+
+     Then the Slave transitions to SAS_APPLY_TRANS_DEPENDENCIES state, and
+     the epoch transaction is rolled back and reapplied.
+
+     In the SAS_APPLY_TRANS_DEPENDENCIES state, operations for transactions
+     marked as in-conflict are not applied.
+
+     If this results in no new conflicts, the epoch transaction is committed,
+     and the SAS_TRACK_TRANS_DEPENDENCIES state is re-entered for processing
+     the next replicated epch transaction.
+     If it results in new conflicts, the epoch transactions is rolled back, and
+     the SAS_TRACK_TRANS_DEPENDENCIES state is re-entered again, to determine
+     the new set of dependencies.
+
+     If no conflicts are found in the SAS_TRACK_TRANS_DEPENDENCIES state, then
+     the epoch transaction is committed, and the Slave transitions to SAS_NORMAL
+     state.
+
+
+   Properties
+     1) Normally, there is no transaction dependency tracking overhead paid by
+        the slave.
+
+     2) On first detecting a transactional conflict, the epoch transaction must be
+        applied at least three times, with two rollbacks.
+
+     3) Transactional conflicts detected in subsequent epochs require the epoch
+        transaction to be applied two times, with one rollback.
+
+     4) A loop between states SAS_TRACK_TRANS_DEPENDENCIES and SAS_APPLY_TRANS_
+        DEPENDENCIES occurs when further transactional conflicts are discovered
+        in SAS_APPLY_TRANS_DEPENDENCIES state.  This implies that the  conflicts
+        discovered in the SAS_TRACK_TRANS_DEPENDENCIES state must not be complete,
+        so we revisit that state to get a more complete picture.
+
+     5) The number of iterations of this loop is fixed to a hard coded limit, after
+        which the Slave will stop with an error.  This should be an unlikely
+        occurrence, as it requires not just n conflicts, but at least 1 new conflict
+        appearing between the transactions in the epoch transaction and the
+        database between the two states, n times in a row.
+
+     6) Where conflicts are occasional, as expected, the post-commit transition to
+        SAS_TRACK_TRANS_DEPENDENCIES rather than SAS_NORMAL results in one epoch
+        transaction having its transaction dependencies needlessly tracked.
+
+*/
+int
+st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans)
+{
+  DBUG_ENTER("atConflictPreCommit");
+
+  /*
+    Prior to committing a Slave transaction, we check whether
+    Transactional conflicts have been detected which require
+    us to retry the slave transaction
+  */
+  retry_slave_trans = false;
+  switch(trans_conflict_apply_state)
+  {
+  case SAS_NORMAL:
+  {
+    DBUG_PRINT("info", ("SAS_NORMAL"));
+    /*
+       Normal case.  Only if we defined conflict detection on a table
+       with transactional conflict detection, and saw conflicts (on any table)
+       do we go to another state
+     */
+    if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS)
+    {
+      DBUG_PRINT("info", ("Conflict(s) detected this pass, transitioning to "
+                          "SAS_TRACK_TRANS_DEPENDENCIES."));
+      assert(conflict_flags & SCS_OPS_DEFINED);
+      /* Transactional conflict resolution required, switch state */
+      atBeginTransConflictHandling();
+      resetPerAttemptCounters();
+      trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES;
+      retry_slave_trans = true;
+    }
+    break;
+  }
+  case SAS_TRACK_TRANS_DEPENDENCIES:
+  {
+    DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES"));
+
+    if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS)
+    {
+      /*
+         Conflict on table with transactional detection
+         this pass, we have collected the details and
+         dependencies, now transition to
+         SAS_APPLY_TRANS_DEPENDENCIES and
+         reapply the epoch transaction without the
+         conflicting transactions.
+      */
+      assert(conflict_flags & SCS_OPS_DEFINED);
+      DBUG_PRINT("info", ("Transactional conflicts, transitioning to "
+                          "SAS_APPLY_TRANS_DEPENDENCIES"));
+
+      trans_conflict_apply_state = SAS_APPLY_TRANS_DEPENDENCIES;
+      trans_detect_iter_count++;
+      retry_slave_trans = true;
+      break;
+    }
+    else
+    {
+      /*
+         No transactional conflicts detected this pass, lets
+         return to SAS_NORMAL state after commit for more efficient
+         application of epoch transactions
+      */
+      DBUG_PRINT("info", ("No transactional conflicts, transitioning to "
+                          "SAS_NORMAL"));
+      atEndTransConflictHandling();
+      trans_conflict_apply_state = SAS_NORMAL;
+      break;
+    }
+  }
+  case SAS_APPLY_TRANS_DEPENDENCIES:
+  {
+    DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES"));
+    assert(conflict_flags & SCS_OPS_DEFINED);
+    /*
+       We've applied the Slave epoch transaction subject to the
+       conflict detection.  If any further transactional
+       conflicts have been observed, then we must repeat the
+       process.
+    */
+    atEndTransConflictHandling();
+    atBeginTransConflictHandling();
+    trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES;
+
+    if (unlikely(conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS))
+    {
+      DBUG_PRINT("info", ("Further conflict(s) detected, repeating the "
+                          "TRACK_TRANS_DEPENDENCIES pass"));
+      /*
+         Further conflict observed when applying, need
+         to re-determine dependencies
+      */
+      resetPerAttemptCounters();
+      retry_slave_trans = true;
+      break;
+    }
+
+
+    DBUG_PRINT("info", ("No further conflicts detected, committing and "
+                        "returning to SAS_TRACK_TRANS_DEPENDENCIES state"));
+    /*
+       With dependencies taken into account, no further
+       conflicts detected, can now proceed to commit
+    */
+    break;
+  }
+  }
+
+  /*
+    Clear conflict flags, to ensure that we detect any new conflicts
+  */
+  conflict_flags = 0;
+
+  if (retry_slave_trans)
+  {
+    DBUG_PRINT("info", ("Requesting transaction restart"));
+    DBUG_RETURN(1);
+  }
+
+  DBUG_PRINT("info", ("Allowing commit to proceed"));
+  DBUG_RETURN(0);
+}
+
+/* HAVE_NDB_BINLOG */
+#endif
+
+/* WITH_NDBCLUSTER_STORAGE_ENGINE */
+#endif

=== added file 'sql/ndb_conflict.h'
--- a/sql/ndb_conflict.h	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_conflict.h	2012-01-31 13:19:19 +0000
@@ -0,0 +1,315 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_CONFLICT_H
+#define NDB_CONFLICT_H
+
+#include "ndb_conflict_trans.h"
+#include <ndbapi/NdbDictionary.hpp>
+#include <ndbapi/NdbTransaction.hpp>
+
+enum enum_conflict_fn_type
+{
+  CFT_NDB_UNDEF = 0
+  ,CFT_NDB_MAX
+  ,CFT_NDB_OLD
+  ,CFT_NDB_MAX_DEL_WIN
+  ,CFT_NDB_EPOCH
+  ,CFT_NDB_EPOCH_TRANS
+  ,CFT_NUMBER_OF_CFTS /* End marker */
+};
+
+#ifdef HAVE_NDB_BINLOG
+static const Uint32 MAX_CONFLICT_ARGS= 8;
+
+enum enum_conflict_fn_arg_type
+{
+  CFAT_END
+  ,CFAT_COLUMN_NAME
+  ,CFAT_EXTRA_GCI_BITS
+};
+
+struct st_conflict_fn_arg
+{
+  enum_conflict_fn_arg_type type;
+  union
+  {
+    char resolveColNameBuff[ NAME_CHAR_LEN + 1 ]; // CFAT_COLUMN_NAME
+    uint32 extraGciBits; // CFAT_EXTRA_GCI_BITS
+  };
+};
+
+struct st_conflict_fn_arg_def
+{
+  enum enum_conflict_fn_arg_type arg_type;
+  bool optional;
+};
+
+/* What type of operation was issued */
+enum enum_conflicting_op_type
+{                /* NdbApi          */
+  WRITE_ROW,     /* insert (!write) */
+  UPDATE_ROW,    /* update          */
+  DELETE_ROW,    /* delete          */
+  REFRESH_ROW    /* refresh         */
+};
+
+/*
+  Room for 10 instruction words, two labels (@ 2words/label)
+  + 2 extra words for the case of resolve_size == 8
+*/
+#define MAX_CONFLICT_INTERPRETED_PROG_SIZE 16
+
+/*
+  prepare_detect_func
+
+  Type of function used to prepare for conflict detection on
+  an NdbApi operation
+*/
+typedef int (* prepare_detect_func) (struct st_ndbcluster_conflict_fn_share* cfn_share,
+                                     enum_conflicting_op_type op_type,
+                                     const NdbRecord* data_record,
+                                     const uchar* old_data,
+                                     const uchar* new_data,
+                                     const MY_BITMAP* write_set,
+                                     class NdbInterpretedCode* code);
+
+enum enum_conflict_fn_flags
+{
+  CF_TRANSACTIONAL = 1
+};
+
+struct st_conflict_fn_def
+{
+  const char *name;
+  enum_conflict_fn_type type;
+  const st_conflict_fn_arg_def* arg_defs;
+  prepare_detect_func prep_func;
+  uint8 flags; /* enum_conflict_fn_flags */
+};
+
+/* What sort of conflict was found */
+enum enum_conflict_cause
+{
+  ROW_ALREADY_EXISTS,   /* On insert */
+  ROW_DOES_NOT_EXIST,   /* On Update, Delete */
+  ROW_IN_CONFLICT,      /* On Update, Delete */
+  TRANS_IN_CONFLICT     /* Any of above, or implied by transaction */
+};
+
+/* NdbOperation custom data which points out handler and record. */
+struct Ndb_exceptions_data {
+  struct NDB_SHARE* share;
+  const NdbRecord* key_rec;
+  const uchar* row;
+  enum_conflicting_op_type op_type;
+  Uint64 trans_id;
+};
+
+enum enum_conflict_fn_table_flags
+{
+  CFF_NONE         = 0,
+  CFF_REFRESH_ROWS = 1
+};
+
+/*
+   Maximum supported key parts (16)
+   (Ndb supports 32, but MySQL has a lower limit)
+*/
+static const int NDB_MAX_KEY_PARTS = MAX_REF_PARTS;
+
+/**
+   ExceptionsTableWriter
+
+   Helper class for inserting entries into an exceptions
+   table
+*/
+class ExceptionsTableWriter
+{
+public:
+  ExceptionsTableWriter()
+    :m_pk_cols(0), m_ex_tab(NULL), m_count(0)
+  {};
+
+  ~ExceptionsTableWriter()
+  {};
+
+  /**
+     hasTable
+
+     Returns true if there is an Exceptions table
+  */
+  bool hasTable() const
+  {
+    return m_ex_tab != NULL;
+  };
+
+  /**
+    init
+
+    Initialise ExceptionsTableWriter with main and exceptions
+    tables.
+
+    May set a warning message on success or error.
+  */
+  int init(const NdbDictionary::Table* mainTable,
+           const NdbDictionary::Table* exceptionsTable,
+           char* msg_buf,
+           uint msg_buf_len,
+           const char** msg);
+
+  /**
+     free
+
+     Release reference to exceptions table
+  */
+  void free(Ndb* ndb);
+
+  /**
+     writeRow
+
+     Write a row to the Exceptions Table for the given
+     key
+  */
+  int writeRow(NdbTransaction* trans,
+               const NdbRecord* keyRecord,
+               uint32 server_id,
+               uint32 master_server_id,
+               uint64 master_epoch,
+               const uchar* rowPtr,
+               NdbError& err);
+
+private:
+  /* info about original table */
+  uint8 m_pk_cols;
+  uint16 m_key_attrids[ NDB_MAX_KEY_PARTS ];
+
+  const NdbDictionary::Table *m_ex_tab;
+  uint32 m_count;
+};
+
+typedef struct st_ndbcluster_conflict_fn_share {
+  const st_conflict_fn_def* m_conflict_fn;
+
+  /* info about original table */
+  uint16 m_resolve_column;
+  uint8 m_resolve_size;
+  uint8 m_flags;
+
+  ExceptionsTableWriter m_ex_tab_writer;
+} NDB_CONFLICT_FN_SHARE;
+
+
+/* HAVE_NDB_BINLOG */
+#endif
+
+enum enum_slave_trans_conflict_apply_state
+{
+  /* Normal with optional row-level conflict detection */
+  SAS_NORMAL,
+
+  /*
+    SAS_TRACK_TRANS_DEPENDENCIES
+    Track inter-transaction dependencies
+  */
+  SAS_TRACK_TRANS_DEPENDENCIES,
+
+  /*
+    SAS_APPLY_TRANS_DEPENDENCIES
+    Apply only non conflicting transactions
+  */
+  SAS_APPLY_TRANS_DEPENDENCIES
+};
+
+enum enum_slave_conflict_flags
+{
+  /* Conflict detection Ops defined */
+  SCS_OPS_DEFINED = 1,
+  /* Conflict detected on table with transactional resolution */
+  SCS_TRANS_CONFLICT_DETECTED_THIS_PASS = 2
+};
+
+/*
+  State associated with the Slave thread
+  (From the Ndb handler's point of view)
+*/
+struct st_ndb_slave_state
+{
+  /* Counter values for current slave transaction */
+  Uint32 current_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 current_master_server_epoch;
+  Uint64 current_max_rep_epoch;
+  uint8 conflict_flags; /* enum_slave_conflict_flags */
+    /* Transactional conflict detection */
+  Uint32 retry_trans_count;
+  Uint32 current_trans_row_conflict_count;
+  Uint32 current_trans_row_reject_count;
+  Uint32 current_trans_in_conflict_count;
+
+  /* Cumulative counter values */
+  Uint64 total_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 max_rep_epoch;
+  Uint32 sql_run_id;
+  /* Transactional conflict detection */
+  Uint64 trans_row_conflict_count;
+  Uint64 trans_row_reject_count;
+  Uint64 trans_detect_iter_count;
+  Uint64 trans_in_conflict_count;
+  Uint64 trans_conflict_commit_count;
+
+  static const Uint32 MAX_RETRY_TRANS_COUNT = 100;
+
+  /*
+    Slave Apply State
+
+    State of Binlog application from Ndb point of view.
+  */
+  enum_slave_trans_conflict_apply_state trans_conflict_apply_state;
+
+  MEM_ROOT conflict_mem_root;
+  class DependencyTracker* trans_dependency_tracker;
+
+  /* Methods */
+  void atStartSlave();
+  int  atPrepareConflictDetection(const NdbDictionary::Table* table,
+                                  const NdbRecord* key_rec,
+                                  const uchar* row_data,
+                                  Uint64 transaction_id,
+                                  bool& handle_conflict_now);
+  int  atTransConflictDetected(Uint64 transaction_id);
+  int  atConflictPreCommit(bool& retry_slave_trans);
+
+  void atBeginTransConflictHandling();
+  void atEndTransConflictHandling();
+
+  void atTransactionCommit();
+  void atTransactionAbort();
+  void atResetSlave();
+
+  void atApplyStatusWrite(Uint32 master_server_id,
+                          Uint32 row_server_id,
+                          Uint64 row_epoch,
+                          bool is_row_server_id_local);
+
+  void resetPerAttemptCounters();
+
+  st_ndb_slave_state();
+};
+
+
+/* NDB_CONFLICT_H */
+#endif

=== modified file 'sql/ndb_share.cc'
--- a/sql/ndb_share.cc	2011-11-10 08:21:36 +0000
+++ b/sql/ndb_share.cc	2012-01-31 11:11:20 +0000
@@ -33,11 +33,11 @@ NDB_SHARE::destroy(NDB_SHARE* share)
   pthread_mutex_destroy(&share->mutex);
 
 #ifdef HAVE_NDB_BINLOG
-  if (share->m_cfn_share && share->m_cfn_share->m_ex_tab && g_ndb)
+  if (share->m_cfn_share && 
+      share->m_cfn_share->m_ex_tab_writer.hasTable() && 
+      g_ndb)
   {
-    NdbDictionary::Dictionary *dict= g_ndb->getDictionary();
-    dict->removeTableGlobal(*(share->m_cfn_share->m_ex_tab), 0);
-    share->m_cfn_share->m_ex_tab= 0;
+    share->m_cfn_share->m_ex_tab_writer.free(g_ndb);
   }
 #endif
   share->new_op= 0;

=== modified file 'sql/ndb_share.h'
--- a/sql/ndb_share.h	2012-01-25 17:50:29 +0000
+++ b/sql/ndb_share.h	2012-01-31 11:11:20 +0000
@@ -26,6 +26,7 @@
 #include <sql_const.h>       // MAX_REF_PARTS
 
 #include <ndbapi/Ndb.hpp>    // Ndb::TupleIdRange
+#include "ndb_conflict.h"
 
 enum NDB_SHARE_STATE {
   NSS_INITIAL= 0,
@@ -33,18 +34,6 @@ enum NDB_SHARE_STATE {
   NSS_ALTERED 
 };
 
-
-enum enum_conflict_fn_type
-{
-  CFT_NDB_UNDEF = 0
-  ,CFT_NDB_MAX
-  ,CFT_NDB_OLD
-  ,CFT_NDB_MAX_DEL_WIN
-  ,CFT_NDB_EPOCH
-  ,CFT_NDB_EPOCH_TRANS
-  ,CFT_NUMBER_OF_CFTS /* End marker */
-};
-
 #ifdef HAVE_NDB_BINLOG
 enum Ndb_binlog_type
 {
@@ -56,112 +45,6 @@ enum Ndb_binlog_type
   ,NBT_UPDATED_ONLY_USE_UPDATE  = NBT_UPDATED_ONLY | NBT_USE_UPDATE
   ,NBT_FULL_USE_UPDATE          = NBT_FULL         | NBT_USE_UPDATE
 };
-
-static const Uint32 MAX_CONFLICT_ARGS= 8;
-
-enum enum_conflict_fn_arg_type
-{
-  CFAT_END
-  ,CFAT_COLUMN_NAME
-  ,CFAT_EXTRA_GCI_BITS
-};
-
-struct st_conflict_fn_arg
-{
-  enum_conflict_fn_arg_type type;
-  union
-  {
-    char resolveColNameBuff[ NAME_CHAR_LEN + 1 ]; // CFAT_COLUMN_NAME
-    uint32 extraGciBits; // CFAT_EXTRA_GCI_BITS
-  };
-};
-
-struct st_conflict_fn_arg_def
-{
-  enum enum_conflict_fn_arg_type arg_type;
-  bool optional;
-};
-
-/* What type of operation was issued */
-enum enum_conflicting_op_type
-{                /* NdbApi          */
-  WRITE_ROW,     /* insert (!write) */
-  UPDATE_ROW,    /* update          */
-  DELETE_ROW,    /* delete          */
-  REFRESH_ROW    /* refresh         */
-};
-
-/*
-  prepare_detect_func
-
-  Type of function used to prepare for conflict detection on
-  an NdbApi operation
-*/
-typedef int (* prepare_detect_func) (struct st_ndbcluster_conflict_fn_share* cfn_share,
-                                     enum_conflicting_op_type op_type,
-                                     const NdbRecord* data_record,
-                                     const uchar* old_data,
-                                     const uchar* new_data,
-                                     const MY_BITMAP* write_set,
-                                     class NdbInterpretedCode* code);
-
-enum enum_conflict_fn_flags
-{
-  CF_TRANSACTIONAL = 1
-};
-
-struct st_conflict_fn_def
-{
-  const char *name;
-  enum_conflict_fn_type type;
-  const st_conflict_fn_arg_def* arg_defs;
-  prepare_detect_func prep_func;
-  uint8 flags; /* enum_conflict_fn_flags */
-};
-
-/* What sort of conflict was found */
-enum enum_conflict_cause
-{
-  ROW_ALREADY_EXISTS,   /* On insert */
-  ROW_DOES_NOT_EXIST,   /* On Update, Delete */
-  ROW_IN_CONFLICT,      /* On Update, Delete */
-  TRANS_IN_CONFLICT     /* Any of above, or implied by transaction */
-};
-
-/* NdbOperation custom data which points out handler and record. */
-struct Ndb_exceptions_data {
-  struct NDB_SHARE* share;
-  const NdbRecord* key_rec;
-  const uchar* row;
-  enum_conflicting_op_type op_type;
-  Uint64 trans_id;
-};
-
-enum enum_conflict_fn_table_flags
-{
-  CFF_NONE         = 0,
-  CFF_REFRESH_ROWS = 1
-};
-
-/*
-   Maximum supported key parts (16)
-   (Ndb supports 32, but MySQL has a lower limit)
-*/
-static const int NDB_MAX_KEY_PARTS = MAX_REF_PARTS;
-
-typedef struct st_ndbcluster_conflict_fn_share {
-  const st_conflict_fn_def* m_conflict_fn;
-
-  /* info about original table */
-  uint8 m_pk_cols;
-  uint16 m_resolve_column;
-  uint8 m_resolve_size;
-  uint8 m_flags;
-  uint16 m_key_attrids[ NDB_MAX_KEY_PARTS ];
-
-  const NdbDictionary::Table *m_ex_tab;
-  uint32 m_count;
-} NDB_CONFLICT_FN_SHARE;
 #endif
 
 

=== modified file 'sql/ndb_thd.cc'
--- a/sql/ndb_thd.cc	2012-01-26 14:32:08 +0000
+++ b/sql/ndb_thd.cc	2012-02-23 12:37:59 +0000
@@ -19,6 +19,7 @@
 #define MYSQL_SERVER
 #endif
 
+#include "ha_ndbcluster_glue.h"
 #include "ndb_thd.h"
 #include "ndb_thd_ndb.h"
 

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2012-01-26 14:32:08 +0000
+++ b/storage/ndb/CMakeLists.txt	2012-02-23 12:37:59 +0000
@@ -89,6 +89,7 @@ SET(NDBCLUSTER_SOURCES
   ../../sql/ndb_component.cc
   ../../sql/ndb_local_schema.cc
   ../../sql/ndb_repl_tab.cc
+  ../../sql/ndb_conflict.cc
 )
 
 # Include directories used when building ha_ndbcluster

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2011-10-27 23:43:25 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ import com.mysql.clusterj.SessionFactory
 
 import com.mysql.clusterj.core.spi.DomainTypeHandler;
 import com.mysql.clusterj.core.spi.DomainTypeHandlerFactory;
+
 import com.mysql.clusterj.core.metadata.DomainTypeHandlerFactoryImpl;
 
 import com.mysql.clusterj.core.store.Db;
@@ -497,9 +498,14 @@ public class SessionFactoryImpl implemen
                 // remove the ndb dictionary cached table definition
                 tableName = domainTypeHandler.getTableName();
                 if (tableName != null) {
+                    if (logger.isDebugEnabled())logger.debug("Removing dictionary entry for table " + tableName
+                            + " for class " + cls.getName());
                     dictionary.removeCachedTable(tableName);
                 }
             }
+            for (ClusterConnection clusterConnection: pooledConnections) {
+                clusterConnection.unloadSchema(tableName);
+            }
             return tableName;
         }
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2012-02-08 17:27:45 +0000
@@ -300,10 +300,12 @@ public class SessionImpl implements Sess
         Table storeTable = domainTypeHandler.getStoreTable();
         // perform a primary key operation
         final Operation op = clusterTransaction.getSelectOperation(storeTable);
+        op.beginDefinition();
         // set the keys into the operation
         domainTypeHandler.operationSetKeys(instanceHandler, op);
         // set the expected columns into the operation
         domainTypeHandler.operationGetValues(op);
+        op.endDefinition();
         final ResultData rs = op.resultData(false);
         final SessionImpl cacheManager = this;
         // defer execution of the key operation until the next find, flush, or query
@@ -465,7 +467,9 @@ public class SessionImpl implements Sess
         Operation op = null;
         try {
             op = clusterTransaction.getDeleteOperation(storeTable);
+            op.beginDefinition();
             domainTypeHandler.operationSetKeys(valueHandler, op);
+            op.endDefinition();
         } catch (ClusterJException ex) {
             failAutoTransaction();
             throw new ClusterJException(
@@ -576,10 +580,12 @@ public class SessionImpl implements Sess
         Table storeTable = domainTypeHandler.getStoreTable();
         // perform a single select by key operation
         Operation op = clusterTransaction.getSelectOperation(storeTable);
+        op.beginDefinition();
         // set the keys into the operation
         domainTypeHandler.operationSetKeys(keyHandler, op);
         // set the expected columns into the operation
         domainTypeHandler.operationGetValues(op);
+        op.endDefinition();
         // execute the select and get results
         ResultData rs = op.resultData();
         return rs;
@@ -1405,6 +1411,9 @@ public class SessionImpl implements Sess
         }
     }
 
+    /** Unload the schema associated with the domain class. This allows schema changes to work.
+     * @param cls the class for which to unload the schema
+     */
     public String unloadSchema(Class<?> cls) {
         return factory.unloadSchema(cls, dictionary);
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2011-11-23 10:22:31 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2012-02-09 10:22:48 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -208,10 +208,12 @@ public class QueryDomainTypeImpl<T> impl
             case PRIMARY_KEY: {
                 // perform a select operation
                 Operation op = session.getSelectOperation(domainTypeHandler.getStoreTable());
+                op.beginDefinition();
                 // set key values into the operation
                 index.operationSetKeys(context, op);
                 // set the expected columns into the operation
                 domainTypeHandler.operationGetValues(op);
+                op.endDefinition();
                 // execute the select and get results
                 result = op.resultData();
                 break;
@@ -310,8 +312,10 @@ public class QueryDomainTypeImpl<T> impl
                     // perform a delete by primary key operation
                     if (logger.isDetailEnabled()) logger.detail("Using delete by primary key.");
                     Operation op = session.getDeleteOperation(domainTypeHandler.getStoreTable());
+                    op.beginDefinition();
                     // set key values into the operation
                     index.operationSetKeys(context, op);
+                    op.endDefinition();
                     // execute the delete operation
                     session.executeNoCommit(false, true);
                     errorCode = op.errorCode();

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java	2011-03-08 00:44:56 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -34,4 +34,6 @@ public interface ClusterConnection {
 
     public void close(Db db);
 
+    public void unloadSchema(String tableName);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java'
--- a/storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java	2011-08-03 01:02:19 +0000
+++ b/storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -584,6 +584,7 @@ public class NdbOpenJPAStoreManager exte
         session.startAutoTransaction();
         try {
             Operation op = session.getSelectOperation(storeTable);
+            op.beginDefinition();
             int[] keyFields = domainTypeHandler.getKeyFieldNumbers();
             BitSet fieldsInResult = new BitSet();
             for (int i : keyFields) {
@@ -597,6 +598,7 @@ public class NdbOpenJPAStoreManager exte
                 fieldHandler.operationGetValue(op);
                 fieldsInResult.set(fieldHandler.getFieldNumber());
             }
+            op.endDefinition();
             ResultData resultData = op.resultData();
             NdbOpenJPAResult result = new NdbOpenJPAResult(resultData, domainTypeHandler, fieldsInResult);
             session.endAutoTransaction();

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java	2011-02-06 21:37:05 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -84,7 +84,7 @@ public class BinaryPKTest extends Abstra
             if (0 == i % 4) {
                 byte[] key = getStoragePK(i);
                 BinaryPK instance = session.find(BinaryPK.class, key);
-                verifyResult("update verify", instance, i, true);
+                verifyResult("update verify ", instance, i, true);
             }
         }
     }
@@ -145,13 +145,13 @@ public class BinaryPKTest extends Abstra
     }
 
     protected byte[] getStoragePK(int index) {
-        return new byte[] {0, (byte)(index/256), (byte)(index%256)};
+        return new byte[] {0, (byte)((index/256) + 65), (byte)((index%256) + 65)};
     }
 
     protected byte[] getResultPK(int index) {
         byte[] result = new byte[255];
-        result[1] = (byte)(index/256);
-        result[2] = (byte)(index%256);
+        result[1] = (byte)((index/256) + 65);
+        result[2] = (byte)((index%256) + 65);
         return result;
     }
 
@@ -160,28 +160,31 @@ public class BinaryPKTest extends Abstra
     }
 
     protected void verifyStorage(String where, BinaryPK instance, int index, boolean updated) {
-        errorIfNotEqual(where + "id failed", toString(getStoragePK(index)), toString(instance.getId()));
-        errorIfNotEqual(where + "number failed", index, instance.getNumber());
+        errorIfNotEqual(where + "mismatch on id", toString(getStoragePK(index)), toString(instance.getId()));
+        errorIfNotEqual(where + "mismatch on number", index, instance.getNumber());
         if (updated) {
-            errorIfNotEqual(where + "Value failed", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
         } else {
-            errorIfNotEqual(where + "Value failed", getValue(index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(index), instance.getName());
 
         }
     }
 
     protected void verifyResult(String where, BinaryPK instance, int index, boolean updated) {
-        errorIfNotEqual(where + "id failed", toString(getResultPK(index)), toString(instance.getId()));
-        errorIfNotEqual("number failed", index, instance.getNumber());
+        errorIfNotEqual(where + "mismatch on id", toString(getResultPK(index)), toString(instance.getId()));
+        errorIfNotEqual("mismatch on number", index, instance.getNumber());
         if (updated) {
-            errorIfNotEqual(where + "Value failed", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
         } else {
-            errorIfNotEqual(where + "Value failed", getValue(index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(index), instance.getName());
 
         }
     }
 
     private String toString(byte[] id) {
+        if (id == null) {
+            return "null";
+        }
         StringBuilder builder = new StringBuilder();
         for (int i = 0; i < id.length; ++i) {
             builder.append(String.valueOf(id[i]));

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java	2012-01-21 02:22:20 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java	2012-02-08 17:27:45 +0000
@@ -17,6 +17,8 @@
 
 package testsuite.clusterj;
 
+import java.nio.ByteBuffer;
+
 import org.junit.Ignore;
 
 import com.mysql.clusterj.ClusterJFatalUserException;
@@ -33,14 +35,48 @@ public class StressTest extends Abstract
 
     private static final int NUMBER_TO_INSERT = 4000;
 
-    private static final int ITERATIONS = 5;
+    private static final int ITERATIONS = 7;
+
+    private static final int ITERATIONS_TO_DROP = 3;
 
-    private static final String tableName = "stress";
+    private static String tableName;
+
+    private static final String STRESS_TEST_TABLE_PROPERTY_NAME = "com.mysql.clusterj.StressTestTable";
+
+    static {
+        String env = System.getenv(STRESS_TEST_TABLE_PROPERTY_NAME);
+        String def = (env == null)?"stress":env;
+        tableName = System.getProperty(STRESS_TEST_TABLE_PROPERTY_NAME, def);
+    }
 
     private ColumnMetadata[] columnMetadatas;
 
     private Timer timer = new Timer();
 
+    private static int BYTES_LENGTH = 12000;
+
+    private static ByteBuffer BYTES = ByteBuffer.allocate(BYTES_LENGTH);
+
+    static {
+        for (int i = 0; i < BYTES_LENGTH; ++i) {
+            // only printable bytes from ABC..^_`
+            BYTES.put((byte)((i % 32) + 65));
+        }
+    }
+
+    private static int STRING_LENGTH = 12000;
+
+    private static String STRING;
+
+    static {
+        StringBuilder builder = new StringBuilder();
+        for (int i = 0; i < STRING_LENGTH; ++i) {
+            // only printable bytes from ABC..^_`
+            builder.append((byte)((i % 32) + 65));
+        }
+        STRING = builder.toString();
+    }
+
     @Override
     java.lang.Class<? extends IdBase> getModelClass() {
         return Stress.class;
@@ -55,7 +91,25 @@ public class StressTest extends Abstract
         columnMetadatas = session.newInstance(Stress.class).columnMetadata();
     }
 
-    public void testInsAattr_indy() {
+    public void testIndy() {
+        insAattr_indy();
+        getA_indy();
+        delA_indy();
+    }
+
+    public void testEach() {
+        insAattr_each();
+        getA_each();
+        delA_each();
+    }
+
+    public void testBulk() {
+        insAattr_bulk();
+        getA_bulk();
+        delA_bulk();
+    }
+
+    public void insAattr_indy() {
         long total = 0;
         for (int i = 0; i < ITERATIONS; ++i) {
             // garbage collect what we can before each test
@@ -67,13 +121,13 @@ public class StressTest extends Abstract
             }
             // drop the first iteration
             timer.stop();
-            if (i > 0) total += timer.time();
-            System.out.println("testInsAattr_indy: " + timer.time());
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("insAattr_indy: " + timer.time());
         }
-        System.out.println("Average: " + total/(ITERATIONS - 1) + "\n");
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
     }
 
-    public void testInsAattr_each() {
+    public void insAattr_each() {
         long total = 0;
         for (int i = 0; i < ITERATIONS; ++i) {
             // garbage collect what we can before each test
@@ -88,13 +142,13 @@ public class StressTest extends Abstract
             session.currentTransaction().commit();
             // drop the first iteration
             timer.stop();
-            if (i > 0) total += timer.time();
-            System.out.println("testInsAattr_each: " + timer.time());
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("insAattr_each: " + timer.time());
         }
-        System.out.println("Average: " + total/(ITERATIONS - 1) + "\n");
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
     }
 
-    public void testInsAattr_bulk() {
+    public void insAattr_bulk() {
         long total = 0;
         for (int i = 0; i < ITERATIONS; ++i) {
             // garbage collect what we can before each test
@@ -108,10 +162,122 @@ public class StressTest extends Abstract
             session.currentTransaction().commit();
             // drop the first iteration
             timer.stop();
-            if (i > 0) total += timer.time();
-            System.out.println("testInsAattr_bulk: " + timer.time());
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("insAattr_bulk: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void getA_indy() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.find(Stress.class, key);
+            }
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("getA_indy: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void getA_each() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.find(Stress.class, key);
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("getA_each: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void getA_bulk() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                Stress instance = createObject(key);
+                session.load(instance);
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("getA_bulk: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void delA_indy() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.deletePersistent(Stress.class, key);
+            }
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("delA_indy: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void delA_each() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.deletePersistent(Stress.class, key);
+                session.flush();
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("delA_each: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void delA_bulk() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.deletePersistent(Stress.class, key);
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("delA_bulk: " + timer.time());
         }
-        System.out.println("Average: " + total/(ITERATIONS - 1) + "\n");
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
     }
 
     protected Stress createObject(int key) {
@@ -125,14 +291,39 @@ public class StressTest extends Abstract
                 continue;
             }
             Class<?> cls = columnMetadata.javaType();
+            int length = columnMetadata.maximumLength();
             if (int.class == cls) {
-                value = key;
+                value = key + columnNumber;
             } else if (long.class == cls) {
-                value = (long)key;
+                value = (long)(key + columnNumber);
             } else if (float.class == cls) {
-                value = (float)key;
+                value = (float)(key + columnNumber);
             } else if (double.class == cls) {
-                value = (double)key;
+                value = (double)(key + columnNumber);
+            } else if (short.class == cls) {
+                value = (short)(key + columnNumber);
+            } else if (byte.class == cls) {
+                value = (byte)(key + columnNumber);
+            } else if (Integer.class == cls) {
+                value = (int)(key + columnNumber);
+            } else if (Long.class == cls) {
+                value = (long)(key + columnNumber);
+            } else if (Float.class == cls) {
+                value = (float)(key + columnNumber);
+            } else if (Double.class == cls) {
+                value = (double)(key + columnNumber);
+            } else if (Short.class == cls) {
+                value = (short)(key + columnNumber);
+            } else if (Byte.class == cls) {
+                value = (byte)(key + columnNumber);
+            } else if (String.class == cls) {
+                // take 'n' characters from the static String
+                value = STRING.substring(key + columnNumber, key + columnNumber + length);
+            } else if (byte[].class == cls) {
+                // take 'n' bytes from the static byte array
+                value = new byte[length];
+                BYTES.position((key + columnNumber));
+                BYTES.get((byte[])value);
             } else {
                 throw new ClusterJFatalUserException("Unsupported column type " + cls.getName()
                         + " for column " + columnMetadata.name());
@@ -146,6 +337,7 @@ public class StressTest extends Abstract
         public Stress() {}
 
         public String table() {
+            System.out.println("Stress table being used: " + tableName);
             return tableName;
         }
 

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java	2012-01-23 20:54:27 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java	2012-02-08 17:27:45 +0000
@@ -204,6 +204,7 @@ public class ClusterConnectionImpl
         NdbRecordImpl result = ndbRecordImplMap.get(tableName);
         if (result != null) {
             // case 1
+            if (logger.isDebugEnabled())logger.debug("NdbRecordImpl found for " + tableName);
             return result;
         } else {
             NdbRecordImpl newNdbRecordImpl = new NdbRecordImpl(storeTable, dictionaryForNdbRecord);
@@ -221,4 +222,17 @@ public class ClusterConnectionImpl
         }
     }
 
+    /** Remove the cached NdbRecord associated with this table. This allows schema change to work.
+     * @param tableName the name of the table
+     */
+    public void unloadSchema(String tableName) {
+        if (logger.isDebugEnabled())logger.debug("Removing cached NdbRecord for " + tableName);
+        NdbRecordImpl ndbRecordImpl = ndbRecordImplMap.remove(tableName);
+        if (ndbRecordImpl != null) {
+            ndbRecordImpl.releaseNdbRecord();
+        }
+        if (logger.isDebugEnabled())logger.debug("Removing dictionary entry for cached table " + tableName);
+        dictionaryForNdbRecord.removeCachedTable(tableName);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java	2012-02-08 17:27:45 +0000
@@ -209,13 +209,16 @@ class ClusterTransactionImpl implements 
 
     public Operation getDeleteOperation(Table storeTable) {
         enlist();
+        if (logger.isTraceEnabled()) logger.trace("Table: " + storeTable.getName());
+        if (USE_NDBRECORD) {
+            return new NdbRecordDeleteOperationImpl(this, storeTable);
+        }
         TableConst ndbTable = ndbDictionary.getTable(storeTable.getName());
         handleError(ndbTable, ndbDictionary);
         NdbOperation ndbOperation = ndbTransaction.getNdbOperation(ndbTable);
         handleError(ndbOperation, ndbTransaction);
         int returnCode = ndbOperation.deleteTuple();
         handleError(returnCode, ndbTransaction);
-        if (logger.isTraceEnabled()) logger.trace("Table: " + storeTable.getName());;
         return new OperationImpl(ndbOperation, this);
     }
 
@@ -223,7 +226,7 @@ class ClusterTransactionImpl implements 
         enlist();
         if (logger.isTraceEnabled()) logger.trace("Table: " + storeTable.getName());
         if (USE_NDBRECORD) {
-            return new NdbRecordOperationImpl(this, storeTable);
+            return new NdbRecordInsertOperationImpl(this, storeTable);
         }
         TableConst ndbTable = ndbDictionary.getTable(storeTable.getName());
         handleError(ndbTable, ndbDictionary);
@@ -284,6 +287,9 @@ class ClusterTransactionImpl implements 
 
     public Operation getSelectOperation(Table storeTable) {
         enlist();
+        if (USE_NDBRECORD) {
+            return new NdbRecordKeyOperationImpl(this, storeTable);
+        }
         TableConst ndbTable = ndbDictionary.getTable(storeTable.getName());
         handleError(ndbTable, ndbDictionary);
         NdbOperation ndbOperation = ndbTransaction.getNdbOperation(ndbTable);
@@ -404,6 +410,40 @@ class ClusterTransactionImpl implements 
         return operation;
     }
 
+    /** Create an NdbOperation for delete using NdbRecord.
+     * 
+     * @param ndbRecord the NdbRecord
+     * @param buffer the buffer with data for the operation
+     * @param mask the mask of column values already set in the buffer
+     * @param options the OperationOptions for this operation
+     * @return the delete operation
+     */
+    public NdbOperationConst deleteTuple(NdbRecordConst ndbRecord,
+            ByteBuffer buffer, byte[] mask, OperationOptionsConst options) {
+        NdbOperationConst operation = ndbTransaction.deleteTuple(ndbRecord, buffer, ndbRecord, null, mask, options, 0);
+        handleError(operation, ndbTransaction);
+        return operation;
+    }
+
+    /** Create an NdbOperation for key read using NdbRecord. The 'find' lock mode is used.
+     * 
+     * @param ndbRecordKeys the NdbRecord for the key
+     * @param keyBuffer the buffer with the key for the operation
+     * @param ndbRecordValues the NdbRecord for the value
+     * @param valueBuffer the buffer with the value returned by the operation
+     * @param mask the mask of column values to be read
+     * @param options the OperationOptions for this operation
+     * @return the ndb operation for key read
+     */
+    public NdbOperationConst readTuple(NdbRecordConst ndbRecordKeys, ByteBuffer keyBuffer,
+            NdbRecordConst ndbRecordValues, ByteBuffer valueBuffer,
+            byte[] mask, OperationOptionsConst options) {
+        NdbOperationConst operation = ndbTransaction.readTuple(ndbRecordKeys, keyBuffer, 
+                ndbRecordValues, valueBuffer, findLockMode, mask, options, 0);
+        handleError(operation, ndbTransaction);
+        return operation;
+    }
+
     public void postExecuteCallback(Runnable callback) {
         postExecuteCallbacks.add(callback);
     }

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java	2012-02-08 17:27:45 +0000
@@ -90,7 +90,7 @@ class DbImpl implements com.mysql.cluste
         handleError(returnCode, ndb);
         ndbDictionary = ndb.getDictionary();
         handleError(ndbDictionary, ndb);
-        this.dictionary = new DictionaryImpl(ndbDictionary);
+        this.dictionary = new DictionaryImpl(ndbDictionary, clusterConnection);
     }
 
     public void close() {

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2012-02-08 17:27:45 +0000
@@ -46,8 +46,11 @@ class DictionaryImpl implements com.mysq
 
     private Dictionary ndbDictionary;
 
-    public DictionaryImpl(Dictionary ndbDictionary) {
+    private ClusterConnectionImpl clusterConnection;
+
+    public DictionaryImpl(Dictionary ndbDictionary, ClusterConnectionImpl clusterConnection) {
         this.ndbDictionary = ndbDictionary;
+        this.clusterConnection = clusterConnection;
     }
 
     public Table getTable(String tableName) {
@@ -123,8 +126,14 @@ class DictionaryImpl implements com.mysq
         }
     }
 
+    /** Remove cached table from this ndb dictionary. This allows schema change to work.
+     * @param tableName the name of the table
+     */
     public void removeCachedTable(String tableName) {
+        // remove the cached table from this dictionary
         ndbDictionary.removeCachedTable(tableName);
+        // also remove the cached NdbRecord associated with this table
+        clusterConnection.unloadSchema(tableName);
     }
 
     public Dictionary getNdbDictionary() {

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.mysql.clusterj.core.store.Table;
+
+public class NdbRecordDeleteOperationImpl extends NdbRecordOperationImpl {
+
+    /** The number of columns for this operation */
+    protected int numberOfColumns;
+
+    public NdbRecordDeleteOperationImpl(
+            ClusterTransactionImpl clusterTransaction, Table storeTable) {
+        super(clusterTransaction);
+        this.ndbRecordKeys = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.keyBufferSize = ndbRecordKeys.getBufferSize();
+        this.numberOfColumns = ndbRecordKeys.getNumberOfColumns();
+    }
+
+    public void beginDefinition() {
+        // allocate a buffer for the operation data
+        keyBuffer = ByteBuffer.allocateDirect(keyBufferSize);
+        // use platform's native byte ordering
+        keyBuffer.order(ByteOrder.nativeOrder());
+        mask = new byte[1 + (numberOfColumns/8)];
+    }
+
+    public void endDefinition() {
+        // position the buffer at the beginning for ndbjtie
+        keyBuffer.position(0);
+        keyBuffer.limit(keyBufferSize);
+        // create the delete operation
+        ndbOperation = clusterTransaction.deleteTuple(ndbRecordKeys.getNdbRecord(), keyBuffer, mask, null);
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java	2012-02-08 17:27:45 +0000
@@ -45,11 +45,11 @@ import com.mysql.ndbjtie.ndbapi.NdbDicti
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.TableConst;
 
 /**
- * Wrapper around an NdbRecord. The default implementation can be used for insert,
+ * Wrapper around an NdbRecord. The default implementation can be used for create, read, update, or delete
  * using an NdbRecord that defines every column in the table. After construction, the instance is
  * read-only and can be shared among all threads that use the same cluster connection; and the size of the
  * buffer required for operations is available. The NdbRecord instance is released when the cluster
- * connection is closed. Column values can be set using a provided
+ * connection is closed or when schema change invalidates it. Column values can be set using a provided
  * buffer and buffer manager.
  */
 public class NdbRecordImpl {
@@ -65,11 +65,11 @@ public class NdbRecordImpl {
     /** The size of the NdbRecord struct */
     protected final static int SIZEOF_RECORD_SPECIFICATION = ClusterConnectionServiceImpl.SIZEOF_RECORD_SPECIFICATION;
 
-    /** The NdbRecord for this operation */
+    /** The NdbRecord for this operation, created at construction */
     private NdbRecord ndbRecord = null;
 
     /** The store columns for this operation */
-    protected List<Column> storeColumns = new ArrayList<Column>();
+    protected Column[] storeColumns = null;
 
     /** The RecordSpecificationArray used to define the columns in the NdbRecord */
     private RecordSpecificationArray recordSpecificationArray;
@@ -77,27 +77,30 @@ public class NdbRecordImpl {
     /** The NdbTable */
     TableConst tableConst;
 
-    /** The size of the receive buffer for this operation (may be zero for non-read operations) */
+    /** The size of the receive buffer for this operation */
     protected int bufferSize;
 
-    /** The maximum column id for this operation (may be zero for non-read operations) */
+    /** The maximum column id for this operation */
     protected int maximumColumnId;
 
-    /** The offsets into the buffer for each column (may be null for non-read operations) */
+    /** The offsets into the buffer for each column */
     protected int[] offsets;
 
+    /** The lengths of the column data */
+    protected int[] lengths;
+
     /** Values for setting column mask and null bit mask */
     protected final static byte[] BIT_IN_BYTE_MASK = new byte[] {1, 2, 4, 8, 16, 32, 64, -128};
 
-    /** The position in the null indicator for the field */
-    protected int nullablePositions[] = null;
-
     /** The null indicator for the field bit in the byte */
     protected int nullbitBitInByte[] = null;
 
     /** The null indicator for the field byte offset*/
     protected int nullbitByteOffset[] = null;
 
+    /** The size of the null indicator byte array */
+    protected int nullIndicatorSize;
+
     /** The maximum length of any column in this operation */
     protected int maximumColumnLength;
 
@@ -122,13 +125,13 @@ public class NdbRecordImpl {
         this.numberOfColumns = tableConst.getNoOfColumns();
         this.recordSpecificationArray = RecordSpecificationArray.create(numberOfColumns);
         this.offsets = new int[numberOfColumns];
-        this.nullablePositions = new int[numberOfColumns];
+        this.lengths = new int[numberOfColumns];
         this.nullbitBitInByte = new int[numberOfColumns];
         this.nullbitByteOffset = new int[numberOfColumns];
+        this.storeColumns = new Column[numberOfColumns];
         this.ndbRecord = createNdbRecord(storeTable, ndbDictionary);
     }
 
-
     public int setBigInteger(ByteBuffer buffer, Column storeColumn, BigInteger value) {
         int columnId = storeColumn.getColumnId();
         int newPosition = offsets[columnId];
@@ -140,7 +143,12 @@ public class NdbRecordImpl {
 
     public int setByte(ByteBuffer buffer, Column storeColumn, byte value) {
         int columnId = storeColumn.getColumnId();
-        buffer.put(offsets[columnId], value);
+        if (storeColumn.getLength() == 4) {
+            // the byte is stored as a BIT array of four bytes
+            buffer.putInt(offsets[columnId], value);
+        } else {
+            buffer.put(offsets[columnId], (byte)value);
+        }
         return columnId;
     }
 
@@ -156,6 +164,7 @@ public class NdbRecordImpl {
         int columnId = storeColumn.getColumnId();
         int newPosition = offsets[columnId];
         buffer.position(newPosition);
+        // TODO provide the buffer to Utility.convertValue to avoid copying
         ByteBuffer decimalBuffer = Utility.convertValue(storeColumn, value);
         buffer.put(decimalBuffer);
         return columnId;
@@ -175,7 +184,8 @@ public class NdbRecordImpl {
 
     public int setInt(ByteBuffer buffer, Column storeColumn, Integer value) {
         int columnId = storeColumn.getColumnId();
-        buffer.putInt(offsets[columnId], value);
+        int storageValue = Utility.convertIntValueForStorage(storeColumn, value);
+        buffer.putInt(offsets[columnId], storageValue);
         return columnId;
     }
 
@@ -197,19 +207,275 @@ public class NdbRecordImpl {
 
     public int setShort(ByteBuffer buffer, Column storeColumn, Short value) {
         int columnId = storeColumn.getColumnId();
-        buffer.putShort(offsets[columnId], value);
+        if (storeColumn.getLength() == 4) {
+            // the short is stored as a BIT array of four bytes
+            buffer.putInt(offsets[columnId], value);
+        } else {
+            buffer.putShort(offsets[columnId], (short)value);
+        }
         return columnId;
     }
 
     public int setString(ByteBuffer buffer, BufferManager bufferManager, Column storeColumn, String value) {
         int columnId = storeColumn.getColumnId();
         buffer.position(offsets[columnId]);
+        // TODO provide the buffer to Utility.encode to avoid copying
         // for now, use the encode method to encode the value then copy it
         ByteBuffer converted = Utility.encode(value, storeColumn, bufferManager);
         buffer.put(converted);
         return columnId;
     }
 
+    public boolean getBoolean(ByteBuffer buffer, int columnId) {
+        int value = buffer.getInt(offsets[columnId]);
+        return Utility.getBoolean(storeColumns[columnId], value);
+    }
+
+    public byte getByte(ByteBuffer buffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        if (storeColumn.getLength() == 4) {
+            // the byte was stored in a BIT column as four bytes
+            return (byte)buffer.get(offsets[columnId]);
+        } else {
+            // the byte was stored as a byte
+            return buffer.get(offsets[columnId]);
+        }
+    }
+
+    public byte[] getBytes(ByteBuffer byteBuffer, int columnId) {
+        return getBytes(byteBuffer, storeColumns[columnId]);
+    }
+
+    public byte[] getBytes(ByteBuffer byteBuffer, Column storeColumn) {
+        int columnId = storeColumn.getColumnId();
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        int prefixLength = storeColumn.getPrefixLength();
+        int actualLength = lengths[columnId];
+        int offset = offsets[columnId];
+        switch (prefixLength) {
+            case 0:
+                break;
+            case 1:
+                actualLength = (byteBuffer.get(offset) + 256) % 256;
+                offset += 1;
+                break;
+            case 2:
+                actualLength = (byteBuffer.get(offset) + 256) % 256;
+                int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
+                actualLength += 256 * length2;
+                offset += 2;
+                break;
+            default:
+                throw new ClusterJFatalInternalException(
+                        local.message("ERR_Invalid_Prefix_Length", prefixLength));
+        }
+        byteBuffer.position(offset);
+        byte[] result = new byte[actualLength];
+        byteBuffer.get(result);
+        return result;
+     }
+
+    public double getDouble(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        double result = buffer.getDouble();
+        return result;
+    }
+
+    public float getFloat(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        float result = buffer.getFloat();
+        return result;
+    }
+
+    public int getInt(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        int value = buffer.getInt();
+        return Utility.getInt(storeColumns[columnId], value);
+    }
+
+    public long getLong(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        long value = buffer.getLong();
+        return Utility.getLong(storeColumns[columnId], value);
+    }
+
+    public short getShort(ByteBuffer buffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        if (storeColumn.getLength() == 4) {
+            // the short was stored in a BIT column as four bytes
+            return (short)buffer.get(offsets[columnId]);
+        } else {
+            // the short was stored as a short
+            return buffer.getShort(offsets[columnId]);
+        }
+    }
+
+    public String getString(ByteBuffer byteBuffer, int columnId, BufferManager bufferManager) {
+      if (isNull(byteBuffer, columnId)) {
+          return null;
+      }
+      Column storeColumn = storeColumns[columnId];
+      int prefixLength = storeColumn.getPrefixLength();
+      int actualLength;
+      int offset = offsets[columnId];
+      byteBuffer.limit(byteBuffer.capacity());
+      switch (prefixLength) {
+          case 0:
+              actualLength = lengths[columnId];
+              break;
+          case 1:
+              actualLength = (byteBuffer.get(offset) + 256) % 256;
+              offset += 1;
+              break;
+          case 2:
+              actualLength = (byteBuffer.get(offset) + 256) % 256;
+              int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
+              actualLength += 256 * length2;
+              offset += 2;
+              break;
+          default:
+              throw new ClusterJFatalInternalException(
+                      local.message("ERR_Invalid_Prefix_Length", prefixLength));
+      }
+      byteBuffer.position(offset);
+      byteBuffer.limit(offset + actualLength);
+
+      String result = Utility.decode(byteBuffer, storeColumn.getCharsetNumber(), bufferManager);
+      return result;
+    }
+
+    public BigInteger getBigInteger(ByteBuffer byteBuffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        int index = storeColumn.getColumnId();
+        int offset = offsets[index];
+        int precision = storeColumn.getPrecision();
+        int scale = storeColumn.getScale();
+        int length = Utility.getDecimalColumnSpace(precision, scale);
+        byteBuffer.position(offset);
+        return Utility.getBigInteger(byteBuffer, length, precision, scale);
+    }
+
+    public BigInteger getBigInteger(ByteBuffer byteBuffer, Column storeColumn) {
+        int index = storeColumn.getColumnId();
+        int offset = offsets[index];
+        int precision = storeColumn.getPrecision();
+        int scale = storeColumn.getScale();
+        int length = Utility.getDecimalColumnSpace(precision, scale);
+        byteBuffer.position(offset);
+        return Utility.getBigInteger(byteBuffer, length, precision, scale);
+    }
+
+    public BigDecimal getDecimal(ByteBuffer byteBuffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        int index = storeColumn.getColumnId();
+        int offset = offsets[index];
+        int precision = storeColumn.getPrecision();
+        int scale = storeColumn.getScale();
+        int length = Utility.getDecimalColumnSpace(precision, scale);
+        byteBuffer.position(offset);
+        return Utility.getDecimal(byteBuffer, length, precision, scale);
+      }
+
+    public BigDecimal getDecimal(ByteBuffer byteBuffer, Column storeColumn) {
+      int index = storeColumn.getColumnId();
+      int offset = offsets[index];
+      int precision = storeColumn.getPrecision();
+      int scale = storeColumn.getScale();
+      int length = Utility.getDecimalColumnSpace(precision, scale);
+      byteBuffer.position(offset);
+      return Utility.getDecimal(byteBuffer, length, precision, scale);
+    }
+
+    public Boolean getObjectBoolean(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return Boolean.valueOf(getBoolean(byteBuffer, columnId));        
+    }
+
+    public Boolean getObjectBoolean(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectBoolean(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Byte getObjectByte(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getByte(byteBuffer, columnId);        
+    }
+
+    public Byte getObjectByte(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectByte(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Float getObjectFloat(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getFloat(byteBuffer, columnId);        
+    }
+
+    public Float getObjectFloat(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectFloat(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Double getObjectDouble(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getDouble(byteBuffer, columnId);        
+    }
+
+    public Double getObjectDouble(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectDouble(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Integer getObjectInteger(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getInt(byteBuffer, columnId);        
+    }
+
+    public Integer getObjectInteger(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectInteger(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Long getObjectLong(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getLong(byteBuffer, columnId);        
+    }
+
+    public Long getObjectLong(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectLong(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Short getObjectShort(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getShort(byteBuffer, columnId);        
+    }
+
+    public Short getObjectShort(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectShort(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public boolean isNull(ByteBuffer buffer, int columnId) {
+        if (!storeColumns[columnId].getNullable()) {
+            return false;
+        }
+        int index = nullbitByteOffset[columnId];
+        byte mask = BIT_IN_BYTE_MASK[nullbitBitInByte[columnId]];
+        byte nullbyte = buffer.get(index);
+        boolean result = (nullbyte & mask) != 0;
+        return result;
+    }
+
     protected static void handleError(Object object, Dictionary ndbDictionary) {
         if (object != null) {
             return;
@@ -225,9 +491,11 @@ public class NdbRecordImpl {
         List<Column> align2 = new ArrayList<Column>();
         List<Column> align1 = new ArrayList<Column>();
         List<Column> nullables = new ArrayList<Column>();
+        int i = 0;
         for (String columnName: columnNames) {
             Column storeColumn = storeTable.getColumn(columnName);
-            storeColumns.add(storeColumn);
+            lengths[i] = storeColumn.getLength();
+            storeColumns[i++] = storeColumn;
             // for each column, put into alignment bucket
             switch (storeColumn.getType()) {
                 case Bigint:
@@ -285,6 +553,7 @@ public class NdbRecordImpl {
         offset = nullables.size() + 7 / 8;
         // align the first column following the nullable column indicators to 8
         offset = (7 + offset) / 8 * 8;
+        nullIndicatorSize = offset;
         for (Column storeColumn: align8) {
             handleColumn(8, storeColumn);
         }
@@ -299,6 +568,8 @@ public class NdbRecordImpl {
         }
         bufferSize = offset;
 
+        if (logger.isDebugEnabled()) logger.debug(dump());
+
         // now create an NdbRecord
         NdbRecord result = ndbDictionary.createRecord(tableConst, recordSpecificationArray,
                 numberOfColumns, SIZEOF_RECORD_SPECIFICATION, 0);
@@ -322,26 +593,41 @@ public class NdbRecordImpl {
         recordSpecification.offset(offset);
         offsets[columnId] = offset;
         int columnSpace = storeColumn.getColumnSpace();
-        offset += (columnSpace==0)?8:columnSpace;
+        offset += ((columnSpace==0)?alignment:columnSpace);
         if (storeColumn.getNullable()) {
-            nullablePositions[columnId] = nullablePosition++;
             int nullbitByteOffsetValue = nullablePosition/8;
             int nullbitBitInByteValue = nullablePosition - nullablePosition / 8 * 8;
             nullbitBitInByte[columnId] = nullbitBitInByteValue;
             nullbitByteOffset[columnId] = nullbitByteOffsetValue;
             recordSpecification.nullbit_byte_offset(nullbitByteOffsetValue);
             recordSpecification.nullbit_bit_in_byte(nullbitBitInByteValue);
+            ++nullablePosition;
         } else {
             recordSpecification.nullbit_byte_offset(0);
             recordSpecification.nullbit_bit_in_byte(0);
         }
-        if (logger.isDetailEnabled()) logger.detail(
-                "column: " + storeColumn.getName()
-                + " columnSpace: " + columnSpace 
-                + " offset: " + offsets[columnId]
-                + " nullable position: " + nullablePositions[columnId]
-                + " nullbitByteOffset: " + nullbitByteOffset[columnId]
-                + " nullbitBitInByte: " +  nullbitBitInByte[columnId]);
+    }
+
+    private String dump() {
+        StringBuilder builder = new StringBuilder(tableConst.getName());
+        builder.append(" numberOfColumns: ");
+        builder.append(numberOfColumns);
+        builder.append('\n');
+        for (int columnId = 0; columnId < numberOfColumns; ++columnId) {
+            Column storeColumn = storeColumns[columnId];
+            builder.append(" column: ");
+            builder.append(storeColumn.getName());
+            builder.append(" offset: ");
+            builder.append(offsets[columnId]);
+            builder.append(" length: ");
+            builder.append(lengths[columnId]);
+            builder.append(" nullbitBitInByte: ");
+            builder.append(nullbitBitInByte[columnId]);
+            builder.append(" nullbitByteOffset: ");
+            builder.append(nullbitByteOffset[columnId]);
+            builder.append('\n');
+        }
+        return builder.toString();
     }
 
     TableConst getNdbTable(String tableName) {
@@ -367,9 +653,14 @@ public class NdbRecordImpl {
 
     protected void releaseNdbRecord() {
         if (ndbRecord != null) {
+            if (logger.isDebugEnabled())logger.debug("Releasing NdbRecord for " + tableConst.getName());
             ndbDictionary.releaseRecord(ndbRecord);
             ndbRecord = null;
         }
     }
 
+    public int getNullIndicatorSize() {
+        return nullIndicatorSize;
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.mysql.clusterj.core.store.Table;
+
+public class NdbRecordInsertOperationImpl extends NdbRecordOperationImpl {
+
+    /** The number of columns for this operation */
+    protected int numberOfColumns;
+
+    public NdbRecordInsertOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable) {
+        super(clusterTransaction);
+        this.ndbRecordValues = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.ndbRecordKeys = ndbRecordValues;
+        this.valueBufferSize = ndbRecordValues.getBufferSize();
+        this.numberOfColumns = ndbRecordValues.getNumberOfColumns();
+        this.blobs = new NdbRecordBlobImpl[this.numberOfColumns];
+    }
+
+    public void beginDefinition() {
+        // allocate a buffer for the operation data
+        valueBuffer = ByteBuffer.allocateDirect(valueBufferSize);
+        // use platform's native byte ordering
+        valueBuffer.order(ByteOrder.nativeOrder());
+        // use value buffer for key buffer also
+        keyBuffer = valueBuffer;
+        mask = new byte[1 + (numberOfColumns/8)];
+    }
+
+    public void endDefinition() {
+        // position the buffer at the beginning for ndbjtie
+        valueBuffer.position(0);
+        valueBuffer.limit(valueBufferSize);
+        // create the insert operation
+        ndbOperation = clusterTransaction.insertTuple(ndbRecordValues.getNdbRecord(), valueBuffer, mask, null);
+        // now set the NdbBlob into the blobs
+        for (NdbRecordBlobImpl blob: activeBlobs) {
+            if (blob != null) {
+                blob.setNdbBlob();
+            }
+        }
+    }
+
+}

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,114 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.mysql.clusterj.core.store.Column;
+import com.mysql.clusterj.core.store.ResultData;
+import com.mysql.clusterj.core.store.Table;
+
+public class NdbRecordKeyOperationImpl extends NdbRecordOperationImpl {
+
+    /** The number of columns in the table */
+    protected int numberOfColumns;
+
+    public NdbRecordKeyOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable) {
+        super(clusterTransaction);
+        this.ndbRecordKeys = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.keyBufferSize = ndbRecordKeys.getBufferSize();
+        this.ndbRecordValues = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.valueBufferSize = ndbRecordValues.getBufferSize();
+        this.numberOfColumns = ndbRecordValues.getNumberOfColumns();
+        this.blobs = new NdbRecordBlobImpl[this.numberOfColumns];
+    }
+
+    public void beginDefinition() {
+        // allocate a buffer for the key data
+        keyBuffer = ByteBuffer.allocateDirect(keyBufferSize);
+        keyBuffer.order(ByteOrder.nativeOrder());
+        // allocate a buffer for the value result data
+        valueBuffer = ByteBuffer.allocateDirect(valueBufferSize);
+        valueBuffer.order(ByteOrder.nativeOrder());
+        mask = new byte[1 + (numberOfColumns/8)];
+    }
+
+    /** Specify the columns to be used for the operation.
+     */
+    public void getValue(Column storeColumn) {
+        int columnId = storeColumn.getColumnId();
+        columnSet(columnId);
+    }
+
+    /**
+     * Mark this blob column to be read.
+     * @param storeColumn the store column
+     */
+    @Override
+    public void getBlob(Column storeColumn) {
+        // create an NdbRecordBlobImpl for the blob
+        int columnId = storeColumn.getColumnId();
+        columnSet(columnId);
+        NdbRecordBlobImpl blob = new NdbRecordBlobImpl(this, storeColumn);
+        blobs[columnId] = blob;
+    }
+
+    public void endDefinition() {
+        // position the key buffer at the beginning for ndbjtie
+        keyBuffer.position(0);
+        keyBuffer.limit(keyBufferSize);
+        // position the value buffer at the beginning for ndbjtie
+        valueBuffer.position(0);
+        valueBuffer.limit(valueBufferSize);
+        // create the key operation
+        ndbOperation = clusterTransaction.readTuple(ndbRecordKeys.getNdbRecord(), keyBuffer,
+                ndbRecordValues.getNdbRecord(), valueBuffer, mask, null);
+        // set up a callback when this operation is executed
+        clusterTransaction.postExecuteCallback(new Runnable() {
+            public void run() {
+                for (int columnId = 0; columnId < numberOfColumns; ++columnId) {
+                    NdbRecordBlobImpl blob = blobs[columnId];
+                    if (blob != null) {
+                        blob.setNdbBlob();
+                    }
+                }
+            }
+        });
+    }
+
+    /** Construct a new ResultData using the saved column data and then execute the operation.
+     */
+    @Override
+    public ResultData resultData() {
+        return resultData(true);
+    }
+
+    /** Construct a new ResultData and if requested, execute the operation.
+     */
+    @Override
+    public ResultData resultData(boolean execute) {
+        NdbRecordResultDataImpl result =
+            new NdbRecordResultDataImpl(this, ndbRecordValues, valueBuffer, bufferManager);
+        if (execute) {
+            clusterTransaction.executeNoCommit(false, true);
+        }
+        return result;
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -21,7 +21,6 @@ import java.math.BigDecimal;
 import java.math.BigInteger;
 
 import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -32,7 +31,6 @@ import com.mysql.clusterj.core.store.Blo
 import com.mysql.clusterj.core.store.Column;
 import com.mysql.clusterj.core.store.Operation;
 import com.mysql.clusterj.core.store.ResultData;
-import com.mysql.clusterj.core.store.Table;
 import com.mysql.clusterj.core.util.I18NHelper;
 import com.mysql.clusterj.core.util.Logger;
 import com.mysql.clusterj.core.util.LoggerFactoryService;
@@ -46,7 +44,7 @@ import com.mysql.ndbjtie.ndbapi.NdbDicti
 /**
  * Implementation of store operation that uses NdbRecord.
  */
-class NdbRecordOperationImpl implements Operation {
+public abstract class NdbRecordOperationImpl implements Operation {
 
     /** My message translator */
     static final I18NHelper local = I18NHelper
@@ -60,16 +58,22 @@ class NdbRecordOperationImpl implements 
     protected ClusterTransactionImpl clusterTransaction;
 
     /** The NdbOperation wrapped by this object */
-    private NdbOperationConst ndbOperation = null;
+    protected NdbOperationConst ndbOperation = null;
 
-    /** The NdbRecord for this operation */
-    private NdbRecordImpl ndbRecordImpl = null;
+    /** The NdbRecord for keys */
+    protected NdbRecordImpl ndbRecordKeys = null;
 
-    /** The mask for this operation, which contains a bit set for each column to be inserted */
+    /** The NdbRecord for values */
+    protected NdbRecordImpl ndbRecordValues = null;
+
+    /** The mask for this operation, which contains a bit set for each column accessed */
     byte[] mask;
 
-    /** The ByteBuffer containing all of the data */
-    ByteBuffer buffer = null;
+    /** The ByteBuffer containing keys */
+    ByteBuffer keyBuffer = null;
+
+    /** The ByteBuffer containing values */
+    ByteBuffer valueBuffer = null;
 
     /** Blobs for this NdbRecord */
     protected NdbRecordBlobImpl[] blobs = null;
@@ -77,75 +81,85 @@ class NdbRecordOperationImpl implements 
     /** Blobs that have been accessed for this operation */
     protected List<NdbRecordBlobImpl> activeBlobs = new ArrayList<NdbRecordBlobImpl>();
 
-    /** The size of the receive buffer for this operation (may be zero for non-read operations) */
-    protected int bufferSize;
+    /** The size of the key buffer for this operation */
+    protected int keyBufferSize;
+
+    /** The size of the value buffer for this operation */
+    protected int valueBufferSize;
 
-    /** The number of columns for this operation */
-    protected int numberOfColumns;
+    /** The size of the null indicator byte array */
+    protected int nullIndicatorSize;
 
+    /** The buffer manager for string encode and decode */
     protected BufferManager bufferManager;
 
     /** Constructor used for insert and delete operations that do not need to read data.
      * 
      * @param clusterTransaction the cluster transaction
-     * @param transaction the ndb transaction
-     * @param storeTable the store table
      */
-    public NdbRecordOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable) {
-        this.ndbRecordImpl = clusterTransaction.getCachedNdbRecordImpl(storeTable);
-        this.bufferSize = ndbRecordImpl.getBufferSize();
-        this.numberOfColumns = ndbRecordImpl.getNumberOfColumns();
-        this.blobs = new NdbRecordBlobImpl[this.numberOfColumns];
+    public NdbRecordOperationImpl(ClusterTransactionImpl clusterTransaction) {
         this.clusterTransaction = clusterTransaction;
         this.bufferManager = clusterTransaction.getBufferManager();
     }
 
     public void equalBigInteger(Column storeColumn, BigInteger value) {
-        setBigInteger(storeColumn, value);
+        int columnId = ndbRecordKeys.setBigInteger(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalBoolean(Column storeColumn, boolean booleanValue) {
-        setBoolean(storeColumn, booleanValue);
+        byte value = (booleanValue?(byte)0x01:(byte)0x00);
+        equalByte(storeColumn, value);
     }
 
     public void equalByte(Column storeColumn, byte value) {
-        setByte(storeColumn, value);
+        int columnId = ndbRecordKeys.setByte(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalBytes(Column storeColumn, byte[] value) {
-        setBytes(storeColumn, value);
+        int columnId = ndbRecordKeys.setBytes(keyBuffer, storeColumn, value);
+        columnSet(columnId);
    }
 
     public void equalDecimal(Column storeColumn, BigDecimal value) {
-        setDecimal(storeColumn, value);
+        int columnId = ndbRecordKeys.setDecimal(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalDouble(Column storeColumn, double value) {
-        setDouble(storeColumn, value);
+        int columnId = ndbRecordKeys.setDouble(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalFloat(Column storeColumn, float value) {
-        setFloat(storeColumn, value);
+        int columnId = ndbRecordKeys.setFloat(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalInt(Column storeColumn, int value) {
-        setInt(storeColumn, value);
+        int columnId = ndbRecordKeys.setInt(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalShort(Column storeColumn, short value) {
-        setShort(storeColumn, value);
+        int columnId = ndbRecordKeys.setShort(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalLong(Column storeColumn, long value) {
-        setLong(storeColumn, value);
+        int columnId = ndbRecordKeys.setLong(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalString(Column storeColumn, String value) {
-        setString(storeColumn, value);
+        int columnId = ndbRecordKeys.setString(keyBuffer, bufferManager, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void getBlob(Column storeColumn) {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "getBlob"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.getBlob(Column)"));
     }
 
     /**
@@ -167,9 +181,11 @@ class NdbRecordOperationImpl implements 
     }
 
     /** Specify the columns to be used for the operation.
+     * This is implemented by a subclass.
      */
     public void getValue(Column storeColumn) {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "getValue"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.getValue(Column)"));
     }
 
     public void postExecuteCallback(Runnable callback) {
@@ -177,19 +193,23 @@ class NdbRecordOperationImpl implements 
     }
 
     /** Construct a new ResultData using the saved column data and then execute the operation.
+     * This is implemented by a subclass.
      */
     public ResultData resultData() {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "resultData"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.resultData()"));
     }
 
     /** Construct a new ResultData and if requested, execute the operation.
+     * This is implemented by a subclass.
      */
     public ResultData resultData(boolean execute) {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "resultData"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.resultData(boolean)"));
     }
 
     public void setBigInteger(Column storeColumn, BigInteger value) {
-        int columnId = ndbRecordImpl.setBigInteger(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setBigInteger(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
@@ -199,52 +219,52 @@ class NdbRecordOperationImpl implements 
     }
 
     public void setByte(Column storeColumn, byte value) {
-        int columnId = ndbRecordImpl.setByte(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setByte(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setBytes(Column storeColumn, byte[] value) {
-        int columnId = ndbRecordImpl.setBytes(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setBytes(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setDecimal(Column storeColumn, BigDecimal value) {
-        int columnId = ndbRecordImpl.setDecimal(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setDecimal(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setDouble(Column storeColumn, Double value) {
-        int columnId = ndbRecordImpl.setDouble(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setDouble(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setFloat(Column storeColumn, Float value) {
-        int columnId = ndbRecordImpl.setFloat(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setFloat(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setInt(Column storeColumn, Integer value) {
-        int columnId = ndbRecordImpl.setInt(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setInt(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setLong(Column storeColumn, long value) {
-        int columnId = ndbRecordImpl.setLong(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setLong(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setNull(Column storeColumn) {
-        int columnId = ndbRecordImpl.setNull(buffer, storeColumn);
+        int columnId = ndbRecordValues.setNull(valueBuffer, storeColumn);
         columnSet(columnId);
     }
 
     public void setShort(Column storeColumn, Short value) {
-        int columnId = ndbRecordImpl.setShort(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setShort(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setString(Column storeColumn, String value) {
-        int columnId = ndbRecordImpl.setString(buffer, bufferManager, storeColumn, value);
+        int columnId = ndbRecordValues.setString(valueBuffer, bufferManager, storeColumn, value);
         columnSet(columnId);
     }
 
@@ -276,28 +296,6 @@ class NdbRecordOperationImpl implements 
         }
     }
 
-    public void beginDefinition() {
-        // allocate a buffer for the operation data
-        buffer = ByteBuffer.allocateDirect(bufferSize);
-        // use platform's native byte ordering
-        buffer.order(ByteOrder.nativeOrder());
-        mask = new byte[1 + (numberOfColumns/8)];
-    }
-
-    public void endDefinition() {
-        // create the insert operation
-        buffer.position(0);
-        buffer.limit(bufferSize);
-        // create the insert operation
-        ndbOperation = clusterTransaction.insertTuple(ndbRecordImpl.getNdbRecord(), buffer, mask, null);
-        // now set the NdbBlob into the blobs
-        for (NdbRecordBlobImpl blob: activeBlobs) {
-            if (blob != null) {
-                blob.setNdbBlob();
-            }
-        }
-    }
-
     public NdbBlob getNdbBlob(Column storeColumn) {
         NdbBlob result = ndbOperation.getBlobHandle(storeColumn.getColumnId());
         handleError(result, ndbOperation);
@@ -308,11 +306,15 @@ class NdbRecordOperationImpl implements 
      * Set this column into the mask for NdbRecord operation.
      * @param columnId the column id
      */
-    private void columnSet(int columnId) {
+    protected void columnSet(int columnId) {
         int byteOffset = columnId / 8;
         int bitInByte = columnId - (byteOffset * 8);
         mask[byteOffset] |= NdbRecordImpl.BIT_IN_BYTE_MASK[bitInByte];
         
     }
 
+    public NdbRecordImpl getValueNdbRecord() {
+        return ndbRecordValues;
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,282 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+
+import java.nio.ByteBuffer;
+import com.mysql.clusterj.ClusterJFatalInternalException;
+
+import com.mysql.clusterj.core.store.Blob;
+import com.mysql.clusterj.core.store.Column;
+import com.mysql.clusterj.core.store.ResultData;
+
+import com.mysql.clusterj.core.util.I18NHelper;
+import com.mysql.clusterj.core.util.Logger;
+import com.mysql.clusterj.core.util.LoggerFactoryService;
+import com.mysql.clusterj.tie.DbImpl.BufferManager;
+
+/**
+ *
+ */
+class NdbRecordResultDataImpl implements ResultData {
+
+    /** My message translator */
+    static final I18NHelper local = I18NHelper
+            .getInstance(NdbRecordResultDataImpl.class);
+
+    /** My logger */
+    static final Logger logger = LoggerFactoryService.getFactory()
+            .getInstance(NdbRecordResultDataImpl.class);
+
+    /** Flags for iterating a scan */
+    protected final int RESULT_READY = 0;
+    protected final int SCAN_FINISHED = 1;
+    protected final int CACHE_EMPTY = 2;
+
+    /** The NdbOperation that defines the result */
+    private NdbRecordOperationImpl operation = null;
+
+    /** The NdbRecordImpl that defines the buffer layout */
+    private NdbRecordImpl record = null;
+
+    /** The flag indicating that there are no more results */
+    private boolean nextDone;
+
+    /** The ByteBuffer containing the results */
+    private ByteBuffer buffer = null;
+
+    /** The buffer manager */
+    private BufferManager bufferManager;
+
+    /** Construct the ResultDataImpl based on an NdbRecordOperationImpl, and the 
+     * buffer manager to help with string columns.
+     * @param operation the NdbRecordOperationImpl
+     * @param bufferManager the buffer manager
+     */
+    public NdbRecordResultDataImpl(NdbRecordOperationImpl operation, NdbRecordImpl ndbRecordImpl,
+            ByteBuffer buffer, BufferManager bufferManager) {
+        this.operation = operation;
+        this.record = ndbRecordImpl;
+        this.bufferManager = bufferManager;
+        this.buffer = buffer;
+    }
+
+    public boolean next() {
+        // NdbOperation has exactly zero or one result. ScanResultDataImpl handles scans...
+        // if the ndbOperation reports an error there is no result
+        int errorCode = operation.errorCode();
+        if (errorCode != 0) {
+            setNoResult();
+        }
+        if (nextDone) {
+            return false;
+        } else {
+            nextDone = true;
+            return true;
+        }
+    }
+
+    public Blob getBlob(int columnId) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordResultDataImpl.getBlob(int)"));
+    }
+
+    public Blob getBlob(Column storeColumn) {
+        return operation.getBlobHandle(storeColumn);
+    }
+
+    public boolean getBoolean(int columnId) {
+        return record.getBoolean(buffer, columnId);
+    }
+
+    public boolean getBoolean(Column storeColumn) {
+        return record.getBoolean(buffer, storeColumn.getColumnId());
+    }
+
+    public boolean[] getBooleans(int column) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordResultDataImpl.getBooleans(int)"));
+    }
+
+    public boolean[] getBooleans(Column storeColumn) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordResultDataImpl.getBooleans(Column)"));
+    }
+
+    public byte getByte(int columnId) {
+        return record.getByte(buffer, columnId);
+    }
+
+    public byte getByte(Column storeColumn) {
+        return record.getByte(buffer, storeColumn.getColumnId());
+    }
+
+    public short getShort(int columnId) {
+        return record.getShort(buffer, columnId);
+    }
+
+    public short getShort(Column storeColumn) {
+        return record.getShort(buffer, storeColumn.getColumnId());
+     }
+
+    public int getInt(int columnId) {
+        return record.getInt(buffer, columnId);
+    }
+
+    public int getInt(Column storeColumn) {
+        return getInt(storeColumn.getColumnId());
+    }
+
+    public long getLong(int columnId) {
+        return record.getLong(buffer, columnId);
+    }
+
+    public long getLong(Column storeColumn) {
+        return getLong(storeColumn.getColumnId());
+     }
+
+    public float getFloat(int columnId) {
+        return record.getFloat(buffer, columnId);
+    }
+
+    public float getFloat(Column storeColumn) {
+        return getFloat(storeColumn.getColumnId());
+    }
+
+    public double getDouble(int columnId) {
+        return record.getDouble(buffer, columnId);
+    }
+
+    public double getDouble(Column storeColumn) {
+        return getDouble(storeColumn.getColumnId());
+    }
+
+    public String getString(int columnId) {
+        return record.getString(buffer, columnId, bufferManager);
+    }
+
+    public String getString(Column storeColumn) {
+        return record.getString(buffer, storeColumn.getColumnId(), bufferManager);
+    }
+
+    public byte[] getBytes(int column) {
+        return record.getBytes(buffer, column);
+    }
+
+    public byte[] getBytes(Column storeColumn) {
+        return record.getBytes(buffer, storeColumn);
+     }
+
+    public Object getObject(int column) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+        "NdbRecordResultDataImpl.getObject(int)"));
+    }
+
+    public Object getObject(Column storeColumn) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+        "NdbRecordResultDataImpl.getObject(Column)"));
+    }
+
+    public boolean wasNull(Column storeColumn) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+        "NdbRecordResultDataImpl.wasNull(Column)"));
+    }
+
+    public Boolean getObjectBoolean(int column) {
+        return record.getObjectBoolean(buffer, column);
+    }
+
+    public Boolean getObjectBoolean(Column storeColumn) {
+        return record.getObjectBoolean(buffer, storeColumn.getColumnId());
+    }
+
+    public Byte getObjectByte(int columnId) {
+        return record.getObjectByte(buffer, columnId);
+    }
+
+    public Byte getObjectByte(Column storeColumn) {
+        return record.getObjectByte(buffer, storeColumn.getColumnId());
+    }
+
+    public Float getObjectFloat(int column) {
+        return record.getObjectFloat(buffer, column);
+    }
+
+    public Float getObjectFloat(Column storeColumn) {
+        return record.getObjectFloat(buffer, storeColumn.getColumnId());
+    }
+
+    public Double getObjectDouble(int column) {
+        return record.getObjectDouble(buffer, column);
+    }
+
+    public Double getObjectDouble(Column storeColumn) {
+        return record.getObjectDouble(buffer, storeColumn.getColumnId());
+    }
+
+    public Integer getObjectInteger(int columnId) {
+        return record.getObjectInteger(buffer, columnId);
+    }
+
+    public Integer getObjectInteger(Column storeColumn) {
+        return record.getObjectInteger(buffer, storeColumn.getColumnId());
+    }
+
+    public Long getObjectLong(int column) {
+        return record.getObjectLong(buffer, column);
+    }
+
+    public Long getObjectLong(Column storeColumn) {
+        return record.getObjectLong(buffer, storeColumn.getColumnId());
+    }
+
+    public Short getObjectShort(int columnId) {
+        return record.getObjectShort(buffer, columnId);
+    }
+
+    public Short getObjectShort(Column storeColumn) {
+        return record.getObjectShort(buffer, storeColumn.getColumnId());
+    }
+
+    public BigInteger getBigInteger(int column) {
+        return record.getBigInteger(buffer, column);
+    }
+
+    public BigInteger getBigInteger(Column storeColumn) {
+        return record.getBigInteger(buffer, storeColumn);
+    }
+
+    public BigDecimal getDecimal(int column) {
+        return record.getDecimal(buffer, column);
+    }
+
+    public BigDecimal getDecimal(Column storeColumn) {
+        return record.getDecimal(buffer, storeColumn);
+    }
+
+    public void setNoResult() {
+        nextDone = true;
+    }
+
+    public Column[] getColumns() {
+        return null;
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java	2012-02-08 17:27:45 +0000
@@ -83,6 +83,7 @@ public class Utility {
     static final int ooooffff = 0x0000ffff;
     static final int ooooffoo = 0x0000ff00;
     static final int ooffoooo = 0x00ff0000;
+    static final int ooffffff = 0x00ffffff;
 
     static final char[] SPACE_PAD = new char[255];
     static {
@@ -234,6 +235,19 @@ public class Utility {
             }
         }
 
+        public boolean getBoolean(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                    return value == 1;
+                case Tinyint:
+                    // the value is stored in the top 8 bits
+                    return (value >>> 24) == 1;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "boolean"));
+            }
+        }
+
         public byte getByte(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -246,6 +260,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "byte"));
             }
         }
+
         public short getShort(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -257,6 +272,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "short"));
             }
         }
+
         public int getInt(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -272,6 +288,25 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
             }
         }
+
+        public int getInt(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                case Timestamp:
+                    return value;
+                case Date:
+                    // the unsigned value is stored in the top 3 bytes
+                    return value >>> 8;
+                case Time:
+                    // the signed value is stored in the top 3 bytes
+                    return value >> 8;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
         public long getLong(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -293,6 +328,31 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
             }
         }
+
+        public long getLong(Column storeColumn, long value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                    // the data is stored as two int values
+                    return (value >>> 32) | (value << 32);
+                case Bigint:
+                case Bigunsigned:
+                    return value;
+                case Datetime:
+                    return unpackDatetime(value);
+                case Timestamp:
+                    return (value >> 32) * 1000L;
+                case Date:
+                    // the unsigned value is stored in the top 3 bytes
+                    return unpackDate((int)(value >>> 40));
+                case Time:
+                    // the signed value is stored in the top 3 bytes
+                    return unpackTime((int)(value >> 40));
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
+            }
+        }
+
         /** Put the low order three bytes of the input value into the ByteBuffer as a medium_value.
          * The format for medium value is always little-endian even on big-endian architectures.
          * Do not flip the buffer, as the caller will do that if needed.
@@ -365,6 +425,17 @@ public class Utility {
             return result;
         }
 
+        public int convertIntValueForStorage(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                    return value;
+                default:
+                    throw new ClusterJUserException(local.message(
+                            "ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
         public ByteBuffer convertValue(Column storeColumn, long value) {
             ByteBuffer result = ByteBuffer.allocateDirect(8);
             return convertValue(storeColumn, value, result);
@@ -479,6 +550,12 @@ public class Utility {
             }
         }
 
+        public long convertLongValueFromStorage(Column storeColumn,
+                long fromStorage) {
+            // TODO Auto-generated method stub
+            return 0;
+        }
+
     }:
         /*
          * Little Endian algorithms to convert NdbRecAttr buffer into primitive types
@@ -497,6 +574,17 @@ public class Utility {
             }
         }
 
+        public boolean getBoolean(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Tinyint:
+                    return value == 1;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "boolean"));
+            }
+        }
+
         public byte getByte(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -508,6 +596,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "byte"));
             }
         }
+
         public short getShort(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -518,6 +607,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "short"));
             }
         }
+
         public int getInt(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -533,6 +623,24 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
             }
         }
+
+        public int getInt(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                case Timestamp:
+                    return value;
+                case Date:
+                    return value & ooffffff;
+                case Time:
+                    // propagate the sign bit from 3 byte medium_int
+                    return (value << 8) >> 8;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
         public long getLong(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bigint:
@@ -553,6 +661,26 @@ public class Utility {
             }
         }
 
+        public long getLong(Column storeColumn, long value) {
+            switch (storeColumn.getType()) {
+                case Bigint:
+                case Bigunsigned:
+                case Bit:
+                    return value;
+                case Datetime:
+                    return unpackDatetime(value);
+                case Timestamp:
+                    return value * 1000L;
+                case Date:
+                    return unpackDate((int)value);
+                case Time:
+                    return unpackTime((int)value);
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
+            }
+        }
+
         /** Put the low order three bytes of the input value into the ByteBuffer as a medium_value.
          * The format for medium value is always little-endian even on big-endian architectures.
          * Do not flip the buffer, as the caller will do that if needed.
@@ -613,7 +741,18 @@ public class Utility {
                     return result;
                 default:
                     throw new ClusterJUserException(local.message(
-                            "ERR_Unsupported_Mapping", storeColumn.getType(), "short"));
+                            "ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
+        public int convertIntValueForStorage(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                    return value;
+                default:
+                    throw new ClusterJUserException(local.message(
+                            "ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
             }
         }
 
@@ -702,6 +841,26 @@ public class Utility {
             }
         }
 
+        public long convertLongValueFromStorage(Column storeColumn, long fromStorage) {
+            switch (storeColumn.getType()) {
+                case Bigint:
+                case Bigunsigned:
+                case Bit:
+                    return fromStorage;
+                case Datetime:
+                    return unpackDatetime(fromStorage);
+                case Timestamp:
+                    return fromStorage * 1000L;
+                case Date:
+                    return unpackDate((int)fromStorage);
+                case Time:
+                    return unpackTime((int)fromStorage);
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
+            }
+        }
+
     };
 
     /* Error codes that are not severe, and simply reflect expected conditions */
@@ -715,16 +874,21 @@ public class Utility {
 
     protected static interface EndianManager {
         public void put3byteInt(ByteBuffer byteBuffer, int value);
+        public int getInt(Column storeColumn, int value);
         public int getInt(Column storeColumn, NdbRecAttr ndbRecAttr);
         public short getShort(Column storeColumn, NdbRecAttr ndbRecAttr);
         public long getLong(Column storeColumn, NdbRecAttr ndbRecAttr);
+        public long getLong(Column storeColumn, long value);
         public byte getByte(Column storeColumn, NdbRecAttr ndbRecAttr);
         public ByteBuffer convertValue(Column storeColumn, byte value);
         public ByteBuffer convertValue(Column storeColumn, short value);
         public ByteBuffer convertValue(Column storeColumn, int value);
         public ByteBuffer convertValue(Column storeColumn, long value);
         public boolean getBoolean(Column storeColumn, NdbRecAttr ndbRecAttr);
+        public boolean getBoolean(Column storeColumn, int value);
+        public int convertIntValueForStorage(Column storeColumn, int value);
         public long convertLongValueForStorage(Column storeColumn, long value);
+        public long convertLongValueFromStorage(Column storeColumn, long fromStorage);
         public int convertByteValueForStorage(Column storeColumn, byte value);
         public int convertShortValueForStorage(Column storeColumn, short value);
     }
@@ -1823,6 +1987,10 @@ public class Utility {
         return endianManager.getBoolean(storeColumn, ndbRecAttr);
     }
 
+    public static boolean getBoolean(Column storeColumn, int value) {
+        return endianManager.getBoolean(storeColumn, value);
+    }
+
     /** Get a byte from this ndbRecAttr. 
      * 
      * @param storeColumn the Column
@@ -1853,6 +2021,10 @@ public class Utility {
         return endianManager.getInt(storeColumn, ndbRecAttr);
     }
 
+    public static int getInt(Column storeColumn, int value) {
+        return endianManager.getInt(storeColumn, value);
+    }
+
     /** Get a long from this ndbRecAttr. 
      * 
      * @param storeColumn the Column
@@ -1863,6 +2035,10 @@ public class Utility {
         return endianManager.getLong(storeColumn, ndbRecAttr);
     }
 
+    public static long getLong(Column storeColumn, long value) {
+        return endianManager.getLong(storeColumn, value);
+    }
+
     /** Convert a long value into a long for storage. The value parameter
      * may be a date (milliseconds since the epoch), a bit array, or simply a long value.
      * The storage format depends on the type of the column and the endian-ness of 
@@ -1900,4 +2076,8 @@ public class Utility {
         return endianManager.convertShortValueForStorage(storeColumn, value);
     }
 
+    public static int convertIntValueForStorage(Column storeColumn, int value) {
+        return endianManager.convertIntValueForStorage(storeColumn, value);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java	2012-01-21 02:22:20 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java	2012-02-08 17:27:45 +0000
@@ -17,9 +17,6 @@
 
 package testsuite.clusterj.tie;
 
-import org.junit.Ignore;
-
-@Ignore
 public class StressTest extends testsuite.clusterj.StressTest {
 
 }

=== modified file 'storage/ndb/cmake/os/WindowsCache.cmake'
--- a/storage/ndb/cmake/os/WindowsCache.cmake	2011-05-24 08:45:38 +0000
+++ b/storage/ndb/cmake/os/WindowsCache.cmake	2012-02-07 15:41:33 +0000
@@ -43,7 +43,10 @@ SET(HAVE_PTHREAD_MUTEXATTR_SETTYPE CACHE
 SET(HAVE_PTHREAD_SETSCHEDPARAM CACHE INTERNAL "")
 SET(HAVE_SUN_PREFETCH_H CACHE INTERNAL "")
 SET(HAVE___BUILTIN_FFS CACHE INTERNAL "")
+SET(HAVE___BUILTIN_CTZ CACHE INTERNAL "")
+SET(HAVE___BUILTIN_CLZ CACHE INTERNAL "")
 SET(HAVE__BITSCANFORWARD 1 CACHE INTERNAL "")
+SET(HAVE__BITSCANREVERSE 1 CACHE INTERNAL "")
 SET(HAVE_LINUX_SCHEDULING CACHE INTERNAL "")
 SET(HAVE_SOLARIS_AFFINITY CACHE INTERNAL "")
 SET(HAVE_LINUX_FUTEX CACHE INTERNAL "")

=== modified file 'storage/ndb/include/kernel/kernel_types.h'
--- a/storage/ndb/include/kernel/kernel_types.h	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/kernel/kernel_types.h	2012-01-28 10:11:10 +0000
@@ -77,6 +77,13 @@ struct Local_key 
   static bool isInvalid(Uint32 lk1, Uint32 lk2) {
     return ref(lk1, lk2) == ~Uint32(0);
   }
+
+  /**
+   * Can the local key be saved in one Uint32
+   */
+  static bool isShort(Uint32 pageId) {
+    return pageId < (1 << (32 - MAX_TUPLES_BITS));
+  }
 };
 
 class NdbOut&

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2012-01-18 09:47:02 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2012-02-02 11:43:58 +0000
@@ -199,20 +199,24 @@
 #define NDBMT_BLOCK_BITS 9
 #define NDBMT_BLOCK_MASK ((1 << NDBMT_BLOCK_BITS) - 1)
 #define NDBMT_BLOCK_INSTANCE_BITS 7
+#define NDBMT_MAX_BLOCK_INSTANCES (1 << NDBMT_BLOCK_INSTANCE_BITS)
 
 #define NDB_DEFAULT_LOG_PARTS 4
-#define NDB_MAX_LOG_PARTS     4
-#define MAX_NDBMT_LQH_WORKERS NDB_MAX_LOG_PARTS
-#define MAX_NDBMT_LQH_THREADS NDB_MAX_LOG_PARTS
 
 #if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
-#define MAX_NDBMT_TC_THREADS  2
+#define NDB_MAX_LOG_PARTS          4
+#define MAX_NDBMT_TC_THREADS       2
+#define MAX_NDBMT_RECEIVE_THREADS  1
+#define MAX_NDBMT_SEND_THREADS     0
 #else
-#define MAX_NDBMT_TC_THREADS  4
+#define NDB_MAX_LOG_PARTS         16
+#define MAX_NDBMT_TC_THREADS      16
+#define MAX_NDBMT_RECEIVE_THREADS  8
+#define MAX_NDBMT_SEND_THREADS     8
 #endif
 
-#define MAX_NDBMT_SEND_THREADS    0
-#define MAX_NDBMT_RECEIVE_THREADS 1
+#define MAX_NDBMT_LQH_WORKERS NDB_MAX_LOG_PARTS
+#define MAX_NDBMT_LQH_THREADS NDB_MAX_LOG_PARTS
 
 #define NDB_FILE_BUFFER_SIZE (256*1024)
 

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-12-01 13:47:41 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2012-01-30 15:12:41 +0000
@@ -206,6 +206,7 @@
 #define CFG_NODE_ARBIT_RANK           200
 #define CFG_NODE_ARBIT_DELAY          201
 #define CFG_RESERVED_SEND_BUFFER_MEMORY 202
+#define CFG_EXTRA_SEND_BUFFER_MEMORY  203
 
 #define CFG_MIN_LOGLEVEL          250
 #define CFG_LOGLEVEL_STARTUP      250

=== modified file 'storage/ndb/include/ndb_config.h.in'
--- a/storage/ndb/include/ndb_config.h.in	2011-09-06 12:43:05 +0000
+++ b/storage/ndb/include/ndb_config.h.in	2012-02-23 12:37:59 +0000
@@ -38,7 +38,10 @@
 #cmakedefine HAVE_MLOCK 1
 #cmakedefine HAVE_FFS 1
 #cmakedefine HAVE___BUILTIN_FFS 1
+#cmakedefine HAVE___BUILTIN_CTZ 1
+#cmakedefine HAVE___BUILTIN_CLZ 1
 #cmakedefine HAVE__BITSCANFORWARD 1
+#cmakedefine HAVE__BITSCANREVERSE 1
 #cmakedefine HAVE_PTHREAD_MUTEXATTR_INIT 1
 #cmakedefine HAVE_PTHREAD_MUTEXATTR_SETTYPE 1
 #cmakedefine HAVE_PTHREAD_SETSCHEDPARAM 1

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2011-11-16 08:17:17 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2012-02-03 13:37:34 +0000
@@ -714,4 +714,27 @@ ndb_configurable_log_parts(Uint32 x)
   }
   return x >= NDBD_CONFIGURABLE_LOG_PARTS_72;
 }
+
+#define NDBD_128_INSTANCES_ADDRESS_70 NDB_MAKE_VERSION(7,0,31)
+#define NDBD_128_INSTANCES_ADDRESS_71 NDB_MAKE_VERSION(7,1,20)
+#define NDBD_128_INSTANCES_ADDRESS_72 NDB_MAKE_VERSION(7,2,5)
+
+static
+inline
+int
+ndbd_128_instances_address(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor < 2)
+  {
+    if (minor == 0)
+      return x >= NDBD_128_INSTANCES_ADDRESS_70;
+    else if (minor == 1)
+      return x >= NDBD_128_INSTANCES_ADDRESS_71;
+  }
+  return x >= NDBD_128_INSTANCES_ADDRESS_72;
+}
+
 #endif

=== modified file 'storage/ndb/include/transporter/TransporterRegistry.hpp'
--- a/storage/ndb/include/transporter/TransporterRegistry.hpp	2012-01-19 12:28:47 +0000
+++ b/storage/ndb/include/transporter/TransporterRegistry.hpp	2012-01-30 15:12:41 +0000
@@ -270,8 +270,10 @@ public:
    *
    * Argument is the value of config parameter TotalSendBufferMemory. If 0,
    * a default will be used of sum(max send buffer) over all transporters.
+   * The second is the config parameter ExtraSendBufferMemory
    */
-  void allocate_send_buffers(Uint64 total_send_buffer);
+  void allocate_send_buffers(Uint64 total_send_buffer,
+                             Uint64 extra_send_buffer);
 
   /**
    * Get sum of max send buffer over all transporters, to be used as a default

=== modified file 'storage/ndb/include/util/BaseString.hpp'
--- a/storage/ndb/include/util/BaseString.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/util/BaseString.hpp	2012-02-20 21:01:57 +0000
@@ -133,9 +133,19 @@ public:
    * Returns the index of the first occurance of the character c.
    *
    * @params c character to look for
+   * @params pos position to start searching from
    * @returns index of character, of -1 if no character found
    */
-  ssize_t indexOf(char c) const;
+  ssize_t indexOf(char c, size_t pos = 0) const;
+
+  /**
+   * Returns the index of the first occurance of the string needle
+   *
+   * @params needle string to search for
+   * @params pos position to start searching from
+   * @returns index of character, of -1 if no character found
+   */
+  ssize_t indexOf(const char * needle, size_t pos = 0) const;
 
   /**
    * Returns the index of the last occurance of the character c.

=== modified file 'storage/ndb/include/util/Bitmask.hpp'
--- a/storage/ndb/include/util/Bitmask.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/util/Bitmask.hpp	2012-02-07 15:41:33 +0000
@@ -20,7 +20,7 @@
 
 #include <ndb_global.h>
 
-#ifdef HAVE__BITSCANFORWARD
+#if defined(HAVE__BITSCANFORWARD) || defined(HAVE__BITSCANREVERSE)
 #include <intrin.h>
 #endif
 
@@ -93,24 +93,54 @@ public:
   static unsigned count(unsigned size, const Uint32 data[]);
 
   /**
+   * return count trailing zero bits inside a word
+   * undefined behaviour if non set
+   */
+  static unsigned ctz(Uint32 x);
+
+  /**
+   * return count leading zero bits inside a word
+   * undefined behaviour if non set
+   */
+  static unsigned clz(Uint32 x);
+
+  /**
    * return index of first bit set inside a word
    * undefined behaviour if non set
    */
   static unsigned ffs(Uint32 x);
 
   /**
+   * return index of last bit set inside a word
+   * undefined behaviour if non set
+   */
+  static unsigned fls(Uint32 x);
+
+  /**
    * find - Find first set bit, starting from 0
    * Returns NotFound when not found.
    */
   static unsigned find_first(unsigned size, const Uint32 data[]);
 
   /**
+   * find - Find last set bit, starting from 0
+   * Returns NotFound when not found.
+   */
+  static unsigned find_last(unsigned size, const Uint32 data[]);
+
+  /**
    * find - Find first set bit, starting at given position.
    * Returns NotFound when not found.
    */
   static unsigned find_next(unsigned size, const Uint32 data[], unsigned n);
 
   /**
+   * find - Find last set bit, starting at given position.
+   * Returns NotFound when not found.
+   */
+  static unsigned find_prev(unsigned size, const Uint32 data[], unsigned n);
+
+  /**
    * find - Find first set bit, starting at given position.
    * Returns NotFound when not found.
    */
@@ -358,6 +388,68 @@ BitmaskImpl::count(unsigned size, const 
 }
 
 /**
+ * return count trailing zero bits inside a word
+ * undefined behaviour if non set
+ */
+inline
+Uint32
+BitmaskImpl::ctz(Uint32 x)
+{
+  return ffs(x);
+}
+
+/**
+ * return count leading bits inside a word
+ * undefined behaviour if non set
+ */
+inline
+Uint32
+BitmaskImpl::clz(Uint32 x)
+{
+#if defined HAVE___BUILTIN_CLZ
+  return __builtin_clz(x);
+#elif defined(__GNUC__) && (defined(__x86_64__) || defined (__i386__))
+  asm("bsr %1,%0"
+      : "=r" (x)
+      : "rm" (x));
+  return 31 - x;
+#elif defined HAVE__BITSCANREVERSE
+  unsigned long r;
+  unsigned char res = _BitScanReverse(&r, (unsigned long)x);
+  assert(res > 0);
+  return 31 - (Uint32)r;
+#else
+  int b = 0;
+  if (!(x & 0xffff0000))
+  {
+    x <<= 16;
+    b += 16;
+  }
+  if (!(x & 0xff000000))
+  {
+    x <<= 8;
+    b += 8;
+  }
+  if (!(x & 0xf0000000))
+  {
+    x <<= 4;
+    b += 4;
+  }
+  if (!(x & 0xc0000000))
+  {
+    x <<= 2;
+    b += 2;
+  }
+  if (!(x & 0x80000000))
+  {
+    x <<= 1;
+    b += 1;
+  }
+  return b;
+#endif
+}
+
+/**
  * return index of first bit set inside a word
  * undefined behaviour if non set
  */
@@ -365,7 +457,9 @@ inline
 Uint32
 BitmaskImpl::ffs(Uint32 x)
 {
-#if defined(__GNUC__) && (defined(__x86_64__) || defined (__i386__))
+#if defined HAVE___BUILTIN_CTZ
+  return __builtin_ctz(x);
+#elif defined(__GNUC__) && (defined(__x86_64__) || defined (__i386__))
   asm("bsf %1,%0"
       : "=r" (x)
       : "rm" (x));
@@ -413,6 +507,57 @@ BitmaskImpl::ffs(Uint32 x)
 #endif
 }
 
+/**
+ * return index of last bit set inside a word
+ * undefined behaviour if non set
+ */
+inline
+Uint32
+BitmaskImpl::fls(Uint32 x)
+{
+#if defined(__GNUC__) && (defined(__x86_64__) || defined (__i386__))
+  asm("bsr %1,%0"
+      : "=r" (x)
+      : "rm" (x));
+  return x;
+#elif defined HAVE___BUILTIN_CLZ
+  return 31 - __builtin_clz(x);
+#elif defined HAVE__BITSCANREVERSE
+  unsigned long r;
+  unsigned char res = _BitScanReverse(&r, (unsigned long)x);
+  assert(res > 0);
+  return (Uint32)r;
+#else
+  int b = 31;
+  if (!(x & 0xffff0000))
+  {
+    x <<= 16;
+    b -= 16;
+  }
+  if (!(x & 0xff000000))
+  {
+    x <<= 8;
+    b -= 8;
+  }
+  if (!(x & 0xf0000000))
+  {
+    x <<= 4;
+    b -= 4;
+  }
+  if (!(x & 0xc0000000))
+  {
+    x <<= 2;
+    b -= 2;
+  }
+  if (!(x & 0x80000000))
+  {
+    x <<= 1;
+    b -= 1;
+  }
+  return b;
+#endif
+}
+
 inline unsigned
 BitmaskImpl::find_first(unsigned size, const Uint32 data[])
 {
@@ -430,8 +575,29 @@ BitmaskImpl::find_first(unsigned size, c
 }
 
 inline unsigned
+BitmaskImpl::find_last(unsigned size, const Uint32 data[])
+{
+  if (size == 0)
+    return NotFound;
+  Uint32 n = (size << 5) - 1;
+  do
+  {
+    Uint32 val = data[n >> 5];
+    if (val)
+    {
+      return n - clz(val);
+    }
+    n -= 32;
+  } while (n != 0xffffffff);
+ return NotFound;
+}
+
+inline unsigned
 BitmaskImpl::find_next(unsigned size, const Uint32 data[], unsigned n)
 {
+  assert(n <= (size << 5));
+  if (n == (size << 5)) // allow one step utside for easier use
+    return NotFound;
   Uint32 val = data[n >> 5];
   Uint32 b = n & 31;
   if (b)
@@ -457,6 +623,35 @@ BitmaskImpl::find_next(unsigned size, co
 }
 
 inline unsigned
+BitmaskImpl::find_prev(unsigned size, const Uint32 data[], unsigned n)
+{
+  if (n >= (Uint32) 0xffffffff /* -1 */) // allow one bit outside array for easier use
+    return NotFound;
+  assert(n < (size << 5));
+  Uint32 val = data[n >> 5];
+  Uint32 b = n & 31;
+  if (b < 31)
+  {
+    val <<= 31 - b;
+    if (val)
+    {
+      return n - clz(val);
+    }
+    n -= b + 1;
+  }
+
+  while (n != NotFound) {
+    val = data[n >> 5];
+    if (val)
+    {
+      return n - clz(val);
+    }
+    n -= 32;
+  }
+  return NotFound;
+}
+
+inline unsigned
 BitmaskImpl::find(unsigned size, const Uint32 data[], unsigned n)
 {
   return find_next(size, data, n);
@@ -742,6 +937,20 @@ public:
   unsigned find_next(unsigned n) const;
 
   /**
+   * find - Find last set bit, starting at 0
+   * Returns NotFound when not found.
+   */
+  static unsigned find_last(const Uint32 data[]);
+  unsigned find_last() const;
+
+  /**
+   * find - Find previous set bit, starting at n
+   * Returns NotFound when not found.
+   */
+  static unsigned find_prev(const Uint32 data[], unsigned n);
+  unsigned find_prev(unsigned n) const;
+
+  /**
    * find - Find first set bit, starting at given position.
    * Returns NotFound when not found.
    */
@@ -1025,6 +1234,34 @@ BitmaskPOD<size>::find_next(unsigned n) 
 
 template <unsigned size>
 inline unsigned
+BitmaskPOD<size>::find_last(const Uint32 data[])
+{
+  return BitmaskImpl::find_last(size, data);
+}
+
+template <unsigned size>
+inline unsigned
+BitmaskPOD<size>::find_last() const
+{
+  return BitmaskPOD<size>::find_last(rep.data);
+}
+
+template <unsigned size>
+inline unsigned
+BitmaskPOD<size>::find_prev(const Uint32 data[], unsigned n)
+{
+  return BitmaskImpl::find_prev(size, data, n);
+}
+
+template <unsigned size>
+inline unsigned
+BitmaskPOD<size>::find_prev(unsigned n) const
+{
+  return BitmaskPOD<size>::find_prev(rep.data, n);
+}
+
+template <unsigned size>
+inline unsigned
 BitmaskPOD<size>::find(const Uint32 data[], unsigned n)
 {
   return find_next(data, n);

=== modified file 'storage/ndb/memcache/src/QueryPlan.cc'
--- a/storage/ndb/memcache/src/QueryPlan.cc	2011-12-16 10:04:43 +0000
+++ b/storage/ndb/memcache/src/QueryPlan.cc	2012-02-22 00:19:45 +0000
@@ -78,7 +78,11 @@ QueryPlan::QueryPlan(Ndb *my_ndb, const 
   dup_numbers(false),
   is_scan(false),
   spec(my_spec),
+  extern_store(0),
   static_flags(spec->static_flags),
+  key_record(0), 
+  val_record(0),
+  row_record(0),
   db(my_ndb)
 {
   const NdbDictionary::Column *col;

=== modified file 'storage/ndb/memcache/src/ndb_engine.c'
--- a/storage/ndb/memcache/src/ndb_engine.c	2011-12-18 23:21:21 +0000
+++ b/storage/ndb/memcache/src/ndb_engine.c	2012-02-22 10:09:58 +0000
@@ -264,9 +264,8 @@ static void ndb_destroy(ENGINE_HANDLE* h
 
   struct ndb_engine* ndb_eng = ndb_handle(handle);
   struct default_engine *def_eng = default_handle(ndb_eng);
-  int nthreads = ndb_eng->server_options.nthreads;
 
-  for(int i = 0 ; i < nthreads; i ++) {
+  for(unsigned i = 0 ; i < ndb_eng->npipelines; i ++) {
     void *p = ndb_eng->schedulers[i];
     if(p) {
       shutdown_scheduler(p);

=== modified file 'storage/ndb/ndb_configure.cmake'
--- a/storage/ndb/ndb_configure.cmake	2011-10-17 13:30:56 +0000
+++ b/storage/ndb/ndb_configure.cmake	2012-02-23 12:37:59 +0000
@@ -77,6 +77,24 @@ int main()
 }"
 HAVE___BUILTIN_FFS)
 
+CHECK_CXX_SOURCE_COMPILES("
+unsigned A = 7;
+int main()
+{
+  unsigned a = __builtin_ctz(A);
+  return 0;
+}"
+HAVE___BUILTIN_CTZ)
+
+CHECK_CXX_SOURCE_COMPILES("
+unsigned A = 7;
+int main()
+{
+  unsigned a = __builtin_clz(A);
+  return 0;
+}"
+HAVE___BUILTIN_CLZ)
+
 CHECK_C_SOURCE_COMPILES("
 #include <intrin.h>
 unsigned long A = 7;
@@ -88,6 +106,17 @@ int main()
 }"
 HAVE__BITSCANFORWARD)
 
+CHECK_C_SOURCE_COMPILES("
+#include <intrin.h>
+unsigned long A = 7;
+int main()
+{
+  unsigned long a;
+  unsigned char res = _BitScanReverse(&a, A);
+  return (int)a;
+}"
+HAVE__BITSCANREVERSE)
+
 # Linux scheduling and locking support
 CHECK_C_SOURCE_COMPILES("
 #ifndef _GNU_SOURCE

=== modified file 'storage/ndb/src/common/transporter/TransporterRegistry.cpp'
--- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp	2012-01-24 06:20:13 +0000
+++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp	2012-01-30 15:12:41 +0000
@@ -250,8 +250,11 @@ TransporterRegistry::TransporterRegistry
   DBUG_VOID_RETURN;
 }
 
+#define MIN_SEND_BUFFER_SIZE (4 * 1024 * 1024)
+
 void
-TransporterRegistry::allocate_send_buffers(Uint64 total_send_buffer)
+TransporterRegistry::allocate_send_buffers(Uint64 total_send_buffer,
+                                           Uint64 extra_send_buffer)
 {
   if (!m_use_default_send_buffer)
     return;
@@ -259,6 +262,22 @@ TransporterRegistry::allocate_send_buffe
   if (total_send_buffer == 0)
     total_send_buffer = get_total_max_send_buffer();
 
+  total_send_buffer += extra_send_buffer;
+
+  if (!extra_send_buffer)
+  {
+    /**
+     * If extra send buffer memory is 0 it means we can decide on an
+     * appropriate value for it. We select to always ensure that the
+     * minimum send buffer memory is 4M, otherwise we simply don't
+     * add any extra send buffer memory at all.
+     */
+    if (total_send_buffer < MIN_SEND_BUFFER_SIZE)
+    {
+      total_send_buffer = (Uint64)MIN_SEND_BUFFER_SIZE;
+    }
+  }
+
   if (m_send_buffers)
   {
     /* Send buffers already allocated -> resize the buffer pages */

=== modified file 'storage/ndb/src/common/util/BaseString.cpp'
--- a/storage/ndb/src/common/util/BaseString.cpp	2011-10-21 12:36:44 +0000
+++ b/storage/ndb/src/common/util/BaseString.cpp	2012-02-20 21:15:00 +0000
@@ -297,9 +297,24 @@ BaseString::split(Vector<BaseString> &v,
 }
 
 ssize_t
-BaseString::indexOf(char c) const {
-    char *p;
-    p = strchr(m_chr, c);
+BaseString::indexOf(char c, size_t pos) const {
+
+  if (pos >= m_len)
+    return -1;
+
+    char *p = strchr(m_chr + pos, c);
+    if(p == NULL)
+	return -1;
+    return (ssize_t)(p-m_chr);
+}
+
+ssize_t
+BaseString::indexOf(const char * needle, size_t pos) const {
+
+  if (pos >= m_len)
+    return -1;
+
+    char *p = strstr(m_chr + pos, needle);
     if(p == NULL)
 	return -1;
     return (ssize_t)(p-m_chr);

=== modified file 'storage/ndb/src/common/util/Bitmask.cpp'
--- a/storage/ndb/src/common/util/Bitmask.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/util/Bitmask.cpp	2012-02-07 15:41:33 +0000
@@ -368,6 +368,28 @@ test_find_fast(Result& res, const Bitmas
 template<unsigned sz>
 inline
 void
+test_find_fast_reversed(Result& res, const Bitmask<sz> & mask, unsigned iter, FUNC func)
+{
+  Uint32 sum = 0;
+  Uint64 start = NdbTick_CurrentMillisecond();
+  for (Uint32 j = 0; j<iter; j++)
+  {
+
+    for (Uint32 n = BitmaskImpl::find_last(sz, mask.rep.data);
+         n != mask.NotFound;
+         n = BitmaskImpl::find_prev(sz, mask.rep.data, n - 1))
+    {
+      sum += (* func)(n);
+    }
+  }
+  Uint64 stop = NdbTick_CurrentMillisecond();
+  res.sum += sum;
+  res.elapsed += (stop - start);
+}
+
+template<unsigned sz>
+inline
+void
 test_toArray(Result& res, const Bitmask<sz> & mask, unsigned iter, FUNC func)
 {
   Uint32 sum = 0;
@@ -412,7 +434,7 @@ do_test(Uint32 len, FUNC func, const cha
   if (func == slow)
     iter = 3000;
 
-  Result res_find, res_fast, res_toArray, res_empty;
+  Result res_find, res_fast, res_fast_reversed, res_toArray, res_empty;
   for (Uint32 i = 0; i < (10000 / len); i++)
   {
     Bitmask<8> tmp;
@@ -437,6 +459,7 @@ do_test(Uint32 len, FUNC func, const cha
     }
     test_find(res_find, tmp, iter, func);
     test_find_fast(res_fast, tmp, iter, func);
+    test_find_fast_reversed(res_fast_reversed, tmp, iter, func);
     test_toArray(res_toArray, tmp, iter, func);
     test_empty(res_empty, len, iter, func);
   }
@@ -444,7 +467,8 @@ do_test(Uint32 len, FUNC func, const cha
   res_find.elapsed = sub0(res_find.elapsed, res_empty.elapsed);
   res_toArray.elapsed = sub0(res_toArray.elapsed, res_empty.elapsed);
   res_fast.elapsed = sub0(res_fast.elapsed, res_empty.elapsed);
-  Uint64 m = x_min(res_find.elapsed, res_toArray.elapsed, res_fast.elapsed);
+  res_fast_reversed.elapsed = sub0(res_fast_reversed.elapsed, res_empty.elapsed);
+  Uint64 m = x_min(res_find.elapsed, res_toArray.elapsed, res_fast_reversed.elapsed);
   if (m == 0)
     m = 1;
 
@@ -468,6 +492,16 @@ do_test(Uint32 len, FUNC func, const cha
          (1000000 * res_toArray.elapsed / div),
          Uint32((100 * res_toArray.elapsed) / m),
          res_toArray.sum);
+  printf("reversed(%s,%s, %u)    : %llu ns/iter (%.3u%%), (sum: %u)\n",
+         dist, name, len,
+         (1000000 * res_fast_reversed.elapsed / div),
+         Uint32((100 * res_fast_reversed.elapsed) / m),
+         res_fast_reversed.sum);
+         printf("toArray(%s,%s, %u) : %llu ns/iter (%.3u%%), (sum: %u)\n",
+         dist, name, len,
+         (1000000 * res_toArray.elapsed / div),
+         Uint32((100 * res_toArray.elapsed) / m),
+         res_toArray.sum);
   printf("\n");
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/backup/BackupInit.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp	2012-01-30 12:31:48 +0000
@@ -206,7 +206,31 @@ Backup::execREAD_CONFIG_REQ(Signal* sign
   ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf);
   ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite);
   ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MAX_WRITE_SIZE, &maxWriteSize);
-  
+
+  if (maxWriteSize < szWrite)
+  {
+    /**
+     * max can't be lower than min
+     */
+    maxWriteSize = szWrite;
+  }
+  if ((maxWriteSize % szWrite) != 0)
+  {
+    /**
+     * max needs to be a multiple of min
+     */
+    maxWriteSize = (maxWriteSize + szWrite - 1) / szWrite;
+    maxWriteSize *= szWrite;
+  }
+
+  /**
+   * add min writesize to buffer size...and the alignment added here and there
+   */
+  Uint32 extra = szWrite + 4 * (/* align * 512b */ 128);
+
+  szDataBuf += extra;
+  szLogBuf += extra;
+
   c_defaults.m_logBufferSize = szLogBuf;
   c_defaults.m_dataBufferSize = szDataBuf;
   c_defaults.m_minWriteSize = szWrite;
@@ -215,8 +239,12 @@ Backup::execREAD_CONFIG_REQ(Signal* sign
 
   Uint32 szMem = 0;
   ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem);
-  Uint32 noPages = (szMem + c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) 
-    / sizeof(Page32);
+
+  szMem += 3 * extra; // (data+log+lcp);
+  Uint32 noPages =
+    (szMem + sizeof(Page32) - 1) / sizeof(Page32) +
+    (c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) / sizeof(Page32);
+
   // We need to allocate an additional of 2 pages. 1 page because of a bug in
   // ArrayPool and another one for DICTTAINFO.
   c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true); 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2012-01-11 18:28:28 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2012-02-03 13:37:34 +0000
@@ -1894,11 +1894,16 @@ private:
   Uint32 dihGetInstanceKey(FragmentstorePtr tFragPtr) {
     ndbrequire(!tFragPtr.isNull());
     Uint32 log_part_id = tFragPtr.p->m_log_part_id;
-    Uint32 instanceKey = 1 + log_part_id % MAX_NDBMT_LQH_WORKERS;
+    Uint32 instanceKey = 1 + (log_part_id % NDBMT_MAX_BLOCK_INSTANCES);
     return instanceKey;
   }
   Uint32 dihGetInstanceKey(Uint32 tabId, Uint32 fragId);
 
+  /**
+   * Get minimum version of nodes in alive-list
+   */
+  Uint32 getMinVersion() const;
+
   bool c_2pass_inr;
 };
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2012-01-26 14:32:08 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2012-02-23 12:37:59 +0000
@@ -7710,7 +7710,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ
         }
         const Uint32 max = NGPtr.p->nodeCount;
 	
-	fragments[count++] = (NGPtr.p->m_next_log_part++ / cnoReplicas); // Store logpart first
+	fragments[count++] = (NGPtr.p->m_next_log_part++ / cnoReplicas) % globalData.ndbLogParts; // Store logpart first
 	Uint32 tmp= next_replica_node[NGPtr.i];
         for(Uint32 replicaNo = 0; replicaNo < noOfReplicas; replicaNo++)
         {
@@ -7811,7 +7811,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ
                                        NDB_ARRAY_SIZE(fragments_per_node));
           NGPtr.i = getNodeGroup(node);
           ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
-          fragments[count++] = NGPtr.p->m_next_log_part++;
+          fragments[count++] = (NGPtr.p->m_next_log_part++) % globalData.ndbLogParts;
           fragments[count++] = node;
           fragments_per_node[node]++;
           for (Uint32 r = 0; r<noOfReplicas; r++)
@@ -16473,6 +16473,17 @@ void Dbdih::readFragment(RWFragment* rf,
   fragPtr.p->distributionKey = TdistKey;
 
   fragPtr.p->m_log_part_id = readPageWord(rf);
+  if (!ndbd_128_instances_address(getMinVersion()))
+  {
+    jam();
+    /**
+     * Limit log-part to 0-3 as older version didn't handle
+     *   getting requests to instances > 4
+     *   (in reality 7 i think...but that is useless as log-part dividor anyway)
+     */
+    fragPtr.p->m_log_part_id %= 4;
+  }
+
   inc_ng_refcount(getNodeGroup(fragPtr.p->preferredPrimary));
 }//Dbdih::readFragment()
 
@@ -19407,3 +19418,25 @@ error:
   sendSignal(req->senderRef, GSN_DROP_NODEGROUP_IMPL_REF, signal,
              DropNodegroupImplRef::SignalLength, JBB);
 }
+
+Uint32
+Dbdih::getMinVersion() const
+{
+  Uint32 ver = getNodeInfo(getOwnNodeId()).m_version;
+  NodeRecordPtr specNodePtr;
+  specNodePtr.i = cfirstAliveNode;
+  do
+  {
+    jam();
+    ptrCheckGuard(specNodePtr, MAX_NDB_NODES, nodeRecord);
+    Uint32 v = getNodeInfo(specNodePtr.i).m_version;
+    if (v < ver)
+    {
+      jam();
+      ver = v;
+    }
+    specNodePtr.i = specNodePtr.p->nextNode;
+  } while (specNodePtr.i != RNIL);
+
+  return ver;
+}

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp	2011-11-16 05:47:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp	2012-02-02 21:00:28 +0000
@@ -50,16 +50,6 @@ NdbLogPartInfo::partNoOwner(Uint32 lpno)
   return partMask.get(lpno);
 }
 
-bool
-NdbLogPartInfo::partNoOwner(Uint32 tabId, Uint32 fragId)
-{
-  Uint32 instanceKey = SimulatedBlock::getInstanceKey(tabId, fragId);
-  assert(instanceKey != 0);
-  Uint32 lpid = instanceKey - 1;
-  Uint32 lpno = partNoFromId(lpid);
-  return partNoOwner(lpno);
-}
-
 Uint32
 NdbLogPartInfo::partNoIndex(Uint32 lpno) const
 {
@@ -74,10 +64,3 @@ NdbLogPartInfo::partNoIndex(Uint32 lpno)
   assert(partNo[i] == lpno);
   return i;
 }
-
-Uint32
-NdbLogPartInfo::instanceKey(Uint32 lpno) const
-{
-  assert(lpno < LogParts);
-  return 1 + lpno;
-}

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp	2011-11-16 05:47:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp	2012-02-02 21:00:28 +0000
@@ -30,10 +30,6 @@
  *
  *   log part number = log part id % 4
  *
- * Currently instance key (1-4) is
- *
- *   instance key = 1 + log part number
- *
  * This may change, and the code (except this file) must not assume
  * any connection between log part number and instance key.
  *
@@ -49,9 +45,7 @@ struct NdbLogPartInfo {
   Bitmask<(NDB_MAX_LOG_PARTS+31)/32> partMask;
   Uint32 partNoFromId(Uint32 lpid) const;
   bool partNoOwner(Uint32 lpno) const;
-  bool partNoOwner(Uint32 tabId, Uint32 fragId);
   Uint32 partNoIndex(Uint32 lpno) const;
-  Uint32 instanceKey(Uint32 lpno) const;
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2012-01-26 14:32:08 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2012-02-23 12:37:59 +0000
@@ -1244,12 +1244,13 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* 
 
   if (globalData.ndbLogParts != 4 &&
       globalData.ndbLogParts != 8 &&
+      globalData.ndbLogParts != 12 &&
       globalData.ndbLogParts != 16)
   {
     char buf[255];
     BaseString::snprintf(buf, sizeof(buf),
       "Trying to start with %d log parts, number of log parts can"
-      " only be set to 4, 8 or 16.",
+      " only be set to 4, 8, 12 or 16.",
       globalData.ndbLogParts);
     progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
   }
@@ -1912,7 +1913,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signa
     ndbrequire(ptr.p->logPartNo == logPartNo);
 
     fragptr.p->m_log_part_ptr_i = ptr.i;
-    fragptr.p->lqhInstanceKey = lpinfo.instanceKey(logPartNo);
+    fragptr.p->lqhInstanceKey = getInstanceKey(tabptr.i, req->fragId);
   }
 
   if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) {

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2012-01-26 08:15:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2012-02-20 21:11:31 +0000
@@ -3155,7 +3155,8 @@ handle_reorg_trigger(DiGetNodesConf * co
   {
     conf->fragId = conf->nodes[MAX_REPLICAS];
     conf->reqinfo = conf->nodes[MAX_REPLICAS+1];
-    memcpy(conf->nodes, conf->nodes+MAX_REPLICAS+2, sizeof(conf->nodes));
+    memcpy(conf->nodes, conf->nodes+MAX_REPLICAS+2,
+           sizeof(Uint32)*MAX_REPLICAS);
   }
   else
   {
@@ -6008,14 +6009,15 @@ Dbtc::sendRemoveMarker(Signal* signal, 
   Uint32 len = 3;
 
   // currently packed signals can not address specific instance
-  bool send_unpacked = getNodeInfo(hostPtr.i).m_lqh_workers > 1;
+  Uint32 cnt_workers = getNodeInfo(hostPtr.i).m_lqh_workers;
+  bool send_unpacked = cnt_workers > 1;
   if (send_unpacked) {
     jam();
     // first word omitted
     memcpy(&signal->theData[0], &Tdata[1], (len - 1) << 2);
     Uint32 Tnode = hostPtr.i;
     Uint32 i;
-    for (i = 0; i < MAX_NDBMT_LQH_WORKERS; i++) {
+    for (i = 0; i < cnt_workers; i++) {
       // wl4391_todo skip workers not part of tx
       Uint32 instanceKey = 1 + i;
       BlockReference ref = numberToRef(DBLQH, instanceKey, Tnode);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-11-16 08:17:17 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2012-01-28 10:11:10 +0000
@@ -1832,6 +1832,7 @@ int Dbtup::handleInsertReq(Signal* signa
    */
   if(mem_insert)
   {
+    terrorCode = 0;
     if (!rowid)
     {
       if (ERROR_INSERTED(4018))
@@ -1938,7 +1939,13 @@ int Dbtup::handleInsertReq(Signal* signa
       terrorCode = 1601;
       goto disk_prealloc_error;
     }
-    
+
+    if (!Local_key::isShort(frag_page_id))
+    {
+      terrorCode = 1603;
+      goto disk_prealloc_error;
+    }
+
     int ret= disk_page_prealloc(signal, fragPtr, &tmp, size);
     if (unlikely(ret < 0))
     {
@@ -2018,7 +2025,10 @@ null_check_error:
 
 mem_error:
   jam();
-  terrorCode= ZMEM_NOMEM_ERROR;
+  if (terrorCode == 0)
+  {
+    terrorCode= ZMEM_NOMEM_ERROR;
+  }
   goto update_error;
 
 log_space_error:

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp	2012-01-28 10:11:10 +0000
@@ -230,6 +230,15 @@ Dbtup::allocFragPage(Uint32 * err, Fragr
   {
     jam();
     pageId = max;
+    if (!Local_key::isShort(pageId))
+    {
+      /**
+       * TODO: remove when ACC supports 48 bit references
+       */
+      jam();
+      * err = 889;
+      return RNIL;
+    }
     Uint32 * ptr = map.set(2 * pageId);
     if (unlikely(ptr == 0))
     {

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.cpp	2012-02-02 21:00:28 +0000
@@ -144,7 +144,6 @@ DbtuxProxy::sendINDEX_STAT_IMPL_REQ(Sign
   const Uint32 instance = workerInstance(ss.m_worker);
   NdbLogPartInfo lpinfo(instance);
 
-  //XXX remove unused
   switch (req->requestType) {
   case IndexStatReq::RT_START_MON:
     /*
@@ -152,11 +151,6 @@ DbtuxProxy::sendINDEX_STAT_IMPL_REQ(Sign
      * to turn off any possible old assignment.  In MT-LQH we also have
      * to check which worker owns the frag.
      */
-    if (req->fragId != ZNIL
-        && !lpinfo.partNoOwner(req->indexId, req->fragId)) {
-      jam();
-      req->fragId = ZNIL;
-    }
     break;
   case IndexStatReq::RT_STOP_MON:
     /*
@@ -165,34 +159,8 @@ DbtuxProxy::sendINDEX_STAT_IMPL_REQ(Sign
      */
     ndbrequire(req->fragId == ZNIL);
     break;
-  case IndexStatReq::RT_SCAN_FRAG:
-    ndbrequire(req->fragId != ZNIL);
-    if (!lpinfo.partNoOwner(req->indexId, req->fragId)) {
-      jam();
-      skipReq(ss);
-      return;
-    }
-    break;
-  case IndexStatReq::RT_CLEAN_NEW:
-  case IndexStatReq::RT_CLEAN_OLD:
-  case IndexStatReq::RT_CLEAN_ALL:
-    ndbrequire(req->fragId == ZNIL);
-    break;
-  case IndexStatReq::RT_DROP_HEAD:
-    /*
-     * Only one client can do the PK-delete of the head record.  We use
-     * of course the worker which owns the assigned fragment.
-     */
-    ndbrequire(req->fragId != ZNIL);
-    if (!lpinfo.partNoOwner(req->indexId, req->fragId)) {
-      jam();
-      skipReq(ss);
-      return;
-    }
-    break;
   default:
     ndbrequire(false);
-    break;
   }
 
   sendSignal(workerRef(ss.m_worker), GSN_INDEX_STAT_IMPL_REQ,
@@ -256,36 +224,10 @@ DbtuxProxy::execINDEX_STAT_REP(Signal* s
   jamEntry();
   const IndexStatRep* rep =
     (const IndexStatRep*)signal->getDataPtr();
-  Ss_INDEX_STAT_REP& ss = ssSeize<Ss_INDEX_STAT_REP>();
-  ss.m_rep = *rep;
-  ndbrequire(signal->getLength() == IndexStatRep::SignalLength);
-  sendREQ(signal, ss);
-  ssRelease<Ss_INDEX_STAT_REP>(ss);
-}
-
-void
-DbtuxProxy::sendINDEX_STAT_REP(Signal* signal, Uint32 ssId,
-                               SectionHandle*)
-{
-  Ss_INDEX_STAT_REP& ss = ssFind<Ss_INDEX_STAT_REP>(ssId);
-
-  IndexStatRep* rep = (IndexStatRep*)signal->getDataPtrSend();
-  *rep = ss.m_rep;
-  rep->senderData = reference();
-  rep->senderData = ssId;
-
-  const Uint32 instance = workerInstance(ss.m_worker);
-  NdbLogPartInfo lpinfo(instance);
-
-  ndbrequire(rep->fragId != ZNIL);
-  if (!lpinfo.partNoOwner(rep->indexId, rep->fragId)) {
-    jam();
-    skipReq(ss);
-    return;
-  }
 
-  sendSignal(workerRef(ss.m_worker), GSN_INDEX_STAT_REP,
-             signal, IndexStatRep::SignalLength, JBB);
+  Uint32 instance = getInstanceKey(rep->indexId, rep->fragId);
+  sendSignal(numberToRef(DBTUX, instance, getOwnNodeId()),
+             GSN_INDEX_STAT_REP, signal, signal->getLength(), JBB);
 }
 
 BLOCK_FUNCTIONS(DbtuxProxy)

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxProxy.hpp	2012-02-02 21:00:28 +0000
@@ -69,20 +69,7 @@ protected:
   void sendINDEX_STAT_IMPL_CONF(Signal*, Uint32 ssId);
 
   // GSN_INDEX_STAT_REP
-  struct Ss_INDEX_STAT_REP : SsParallel {
-    IndexStatRep m_rep;
-    Ss_INDEX_STAT_REP() {
-      m_sendREQ = (SsFUNCREQ)&DbtuxProxy::sendINDEX_STAT_REP;
-      m_sendCONF = 0;
-    }
-    enum { poolSize = 1 };
-    static SsPool<Ss_INDEX_STAT_REP>& pool(LocalProxy* proxy) {
-      return ((DbtuxProxy*)proxy)->c_ss_INDEX_STAT_REP;
-    }
-  };
-  SsPool<Ss_INDEX_STAT_REP> c_ss_INDEX_STAT_REP;
   void execINDEX_STAT_REP(Signal*);
-  void sendINDEX_STAT_REP(Signal*, Uint32 ssId, SectionHandle*);
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp	2011-11-11 13:31:19 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp	2012-02-02 21:00:28 +0000
@@ -498,23 +498,27 @@ Dbtux::statMonStart(Signal* signal, Stat
   Index& index = *c_indexPool.getPtr(req->indexId);
   D("statMonStart" << V(mon));
 
-  // RT_START_MON also sends ZNIL to all non-monitoring nodes
-  if (req->fragId == ZNIL)
+  FragPtr fragPtr;
+  fragPtr.setNull();
+
+  if (req->fragId != ZNIL)
   {
     jam();
-    index.m_statFragPtrI = RNIL;
-    D("non-monitoring node");
+    findFrag(index, req->fragId, fragPtr);
   }
-  else
+
+  if (fragPtr.i != RNIL)
   {
     jam();
-    FragPtr fragPtr;
-    findFrag(index, req->fragId, fragPtr);
-    ndbrequire(fragPtr.i != RNIL);
     index.m_statFragPtrI = fragPtr.i;
     fragPtr.p->m_entryOps = 0;
     D("monitoring node" << V(index));
   }
+  else
+  {
+    jam();
+    index.m_statFragPtrI = RNIL;
+  }
 
   statMonConf(signal, mon);
 }

=== modified file 'storage/ndb/src/kernel/ndbd.cpp'
--- a/storage/ndb/src/kernel/ndbd.cpp	2012-01-16 07:14:30 +0000
+++ b/storage/ndb/src/kernel/ndbd.cpp	2012-02-03 11:00:32 +0000
@@ -214,11 +214,40 @@ init_global_memory_manager(EmulatorData 
   Uint32 sbpages = 0;
   if (globalTransporterRegistry.get_using_default_send_buffer() == false)
   {
-    Uint64 mem = globalTransporterRegistry.get_total_max_send_buffer();
+    Uint64 mem;
+    {
+      Uint32 tot_mem = 0;
+      ndb_mgm_get_int_parameter(p, CFG_TOTAL_SEND_BUFFER_MEMORY, &tot_mem);
+      if (tot_mem)
+      {
+        mem = (Uint64)tot_mem;
+      }
+      else
+      {
+        mem = globalTransporterRegistry.get_total_max_send_buffer();
+      }
+    }
+
     sbpages = Uint32((mem + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE);
+
+    /**
+     * Add extra send buffer pages for NDB multithreaded case
+     */
+    {
+      Uint64 extra_mem = 0;
+      ndb_mgm_get_int64_parameter(p, CFG_EXTRA_SEND_BUFFER_MEMORY, &extra_mem);
+      Uint32 extra_mem_pages = Uint32((extra_mem + GLOBAL_PAGE_SIZE - 1) /
+                                      GLOBAL_PAGE_SIZE);
+      sbpages += mt_get_extra_send_buffer_pages(sbpages, extra_mem_pages);
+    }
+
     Resource_limit rl;
     rl.m_min = sbpages;
-    rl.m_max = sbpages;
+    /**
+     * allow over allocation (from SharedGlobalMemory) of up to 25% of
+     *   totally allocated SendBuffer
+     */
+    rl.m_max = sbpages + (sbpages * 25) / 100;
     rl.m_resource_id = RG_TRANSPORTER_BUFFERS;
     ed.m_mem_manager->set_resource_limit(rl);
   }

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2012-01-17 08:33:59 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2012-01-30 15:12:41 +0000
@@ -316,7 +316,10 @@ Configuration::setupConfiguration(){
 
   Uint32 total_send_buffer = 0;
   iter.get(CFG_TOTAL_SEND_BUFFER_MEMORY, &total_send_buffer);
-  globalTransporterRegistry.allocate_send_buffers(total_send_buffer);
+  Uint64 extra_send_buffer = 0;
+  iter.get(CFG_EXTRA_SEND_BUFFER_MEMORY, &extra_send_buffer);
+  globalTransporterRegistry.allocate_send_buffers(total_send_buffer,
+                                                  extra_send_buffer);
   
   if(iter.get(CFG_DB_NO_SAVE_MSGS, &_maxErrorLogs)){
     ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, "Invalid configuration fetched", 

=== modified file 'storage/ndb/src/kernel/vm/DynArr256.cpp'
--- a/storage/ndb/src/kernel/vm/DynArr256.cpp	2011-12-23 17:22:50 +0000
+++ b/storage/ndb/src/kernel/vm/DynArr256.cpp	2012-02-07 19:40:05 +0000
@@ -44,6 +44,8 @@ struct DA256Page
 {
   struct DA256CL m_header[2];
   struct DA256Node m_nodes[30];
+
+  bool get(Uint32 node, Uint32 idx, Uint32 type_id, Uint32*& val_ptr) const;
 };
 
 #undef require
@@ -52,18 +54,31 @@ struct DA256Page
 //#define DA256_USE_PREFETCH
 #define DA256_EXTRA_SAFE
 
+#ifdef TAP_TEST
+#define UNIT_TEST
+#include "NdbTap.hpp"
+#endif
 
 #ifdef UNIT_TEST
+#include "my_sys.h"
 #ifdef USE_CALLGRIND
 #include <valgrind/callgrind.h>
 #else
 #define CALLGRIND_TOGGLE_COLLECT()
 #endif
+Uint32 verbose = 0;
 Uint32 allocatedpages = 0;
 Uint32 allocatednodes = 0;
 Uint32 releasednodes = 0;
 #endif
 
+static
+inline
+Uint32 div15(Uint32 x)
+{
+  return ((x << 8) + (x << 4) + x + 255) >> 12;
+}
+
 inline
 void
 require_impl(bool x, int line)
@@ -97,6 +112,35 @@ DynArr256Pool::init(NdbMutex* m, Uint32 
   m_mutex = m;
 }
 
+inline
+bool
+DA256Page::get(Uint32 node, Uint32 idx, Uint32 type_id, Uint32*& val_ptr) const
+{
+  Uint32 *magic_ptr, p;
+  if (idx != 255)
+  {
+    Uint32 line = div15(idx);
+    Uint32* ptr = (Uint32*)(m_nodes + node);
+
+    p = 0;
+    val_ptr = (ptr + 1 + idx + line);
+    magic_ptr =(ptr + (idx & ~15));
+  }
+  else
+  {
+    Uint32 b = (node + 1) >> 4;
+    Uint32 * ptr = (Uint32*)(m_header+b);
+
+    p = node - (b << 4) + b;
+    val_ptr = (ptr + 1 + p);
+    magic_ptr = ptr;
+  }
+
+  Uint32 magic = *magic_ptr;
+
+  return ((magic & (1 << p)) && (magic >> 16) == type_id);
+}
+
 static const Uint32 g_max_sizes[5] = { 0, 256, 65536, 16777216, ~0 };
 
 /**
@@ -142,34 +186,13 @@ DynArr256::get(Uint32 pos) const
     Uint32 page_no = ptrI >> DA256_BITS;
     Uint32 page_idx = ptrI & DA256_MASK;
     DA256Page * page = memroot + page_no;
-    
-    Uint32 *magic_ptr, p;
-    if (p0 != 255)
-    {
-      Uint32 line = ((p0 << 8) + (p0 << 4) + p0 + 255) >> 12;
-      Uint32 * ptr = (Uint32*)(page->m_nodes + page_idx);
-      
-      p = 0;
-      retVal = (ptr + 1 + p0 + line);
-      magic_ptr =(ptr + (p0 & ~15));
-    }
-    else
-    {
-      Uint32 b = (page_idx + 1) >> 4;
-      Uint32 * ptr = (Uint32*)(page->m_header+b);
-      
-      p = page_idx - (b << 4) + b;
-      retVal = (ptr + 1 + p);
-      magic_ptr = ptr;
-    }
-    
-    ptrI = *retVal;
-    Uint32 magic = *magic_ptr;
-    
-    if (unlikely(! ((magic & (1 << p)) && (magic >> 16) == type_id)))
+
+    if (unlikely(! page->get(page_idx, p0, type_id, retVal)))
       goto err;
+
+    ptrI = *retVal;
   }
-  
+
   return retVal;
 err:
   require(false);
@@ -222,31 +245,10 @@ DynArr256::set(Uint32 pos)
     Uint32 page_idx = ptrI & DA256_MASK;
     DA256Page * page = memroot + page_no;
     
-    Uint32 *magic_ptr, p;
-    if (p0 != 255)
-    {
-      Uint32 line = ((p0 << 8) + (p0 << 4) + p0 + 255) >> 12;
-      Uint32 * ptr = (Uint32*)(page->m_nodes + page_idx);
+    if (unlikely(! page->get(page_idx, p0, type_id, retVal)))
+      goto err;
 
-      p = 0;
-      magic_ptr = (ptr + (p0 & ~15));
-      retVal = (ptr + 1 + p0 + line);
-    }
-    else
-    {
-      Uint32 b = (page_idx + 1) >> 4;
-      Uint32 * ptr = (Uint32*)(page->m_header+b);
-      
-      p = page_idx - (b << 4) + b;
-      magic_ptr = ptr;
-      retVal = (ptr + 1 + p);
-    }
-     
     ptrI = * retVal;
-    Uint32 magic = *magic_ptr;
-
-    if (unlikely(! ((magic & (1 << p)) && (magic >> 16) == type_id)))
-      goto err;
   } 
   
   return retVal;
@@ -355,8 +357,7 @@ void
 DynArr256::init(ReleaseIterator &iter)
 {
   iter.m_sz = 1;
-  iter.m_pos = 0;
-  iter.m_ptr_i[0] = RNIL;
+  iter.m_pos = ~(~0U << (8 * m_head.m_sz));
   iter.m_ptr_i[1] = m_head.m_ptr_i;
   iter.m_ptr_i[2] = RNIL;
   iter.m_ptr_i[3] = RNIL;
@@ -371,92 +372,83 @@ DynArr256::init(ReleaseIterator &iter)
  * 2 - no data
  */
 Uint32
-DynArr256::release(ReleaseIterator &iter, Uint32 * retptr)
+DynArr256::truncate(Uint32 keep_pos, ReleaseIterator& iter, Uint32* ptrVal)
 {
-  Uint32 sz = iter.m_sz;
-  Uint32 ptrI = iter.m_ptr_i[sz];
-  Uint32 page_no = ptrI >> DA256_BITS;
-  Uint32 page_idx = ptrI & DA256_MASK;
   Uint32 type_id = (~m_pool.m_type_id) & 0xFFFF;
   DA256Page * memroot = m_pool.m_memroot;
-  DA256Page * page = memroot + page_no;
 
-  if (ptrI != RNIL)
+  for (;;)
   {
-    Uint32 p0 = iter.m_pos & 255;
-    for (; p0<256; p0++)
+    if (iter.m_sz == 0 ||
+        iter.m_pos < keep_pos ||
+        m_head.m_sz == 0)
     {
-      Uint32 *retVal, *magic_ptr, p;
-      if (p0 != 255)
+      return 0;
+    }
+
+    Uint32* refPtr;
+    Uint32 ptrI = iter.m_ptr_i[iter.m_sz];
+    Uint32 page_no = ptrI >> DA256_BITS;
+    Uint32 page_idx = (ptrI & DA256_MASK) ;
+    DA256Page* page = memroot + page_no;
+    Uint32 node_index = (iter.m_pos >> (8 * (m_head.m_sz - iter.m_sz))) & 255;
+    bool is_value = (iter.m_sz == m_head.m_sz);
+
+    if (unlikely(! page->get(page_idx, node_index, type_id, refPtr)))
+    {
+      require(false);
+    }
+    assert(refPtr != NULL);
+    *ptrVal = *refPtr;
+
+    if (iter.m_sz == 1 &&
+        (iter.m_pos >> (8 * (m_head.m_sz - iter.m_sz))) == 0)
+    {
+      assert(iter.m_ptr_i[iter.m_sz] == m_head.m_ptr_i);
+      assert(iter.m_ptr_i[iter.m_sz + 1] == RNIL);
+      iter.m_ptr_i[iter.m_sz] = is_value ? RNIL : *refPtr;
+      m_pool.release(m_head.m_ptr_i);
+      m_head.m_sz --;
+      m_head.m_ptr_i = iter.m_ptr_i[iter.m_sz];
+      return is_value ? 1 : 2;
+    }
+
+    if (is_value || iter.m_ptr_i[iter.m_sz + 1] == *refPtr)
+    { // sz--
+      Uint32 ptrI = *refPtr;
+      if (!is_value)
       {
-	Uint32 line = ((p0 << 8) + (p0 << 4) + p0 + 255) >> 12;
-	Uint32 * ptr = (Uint32*)(page->m_nodes + page_idx);
-	
-	p = 0;
-	retVal = (ptr + 1 + p0 + line);
-	magic_ptr =(ptr + (p0 & ~15));
+        if (ptrI != RNIL)
+        {
+          m_pool.release(ptrI);
+          *refPtr = iter.m_ptr_i[iter.m_sz+1] = RNIL;
+        }
       }
-      else
+      if (node_index == 0)
       {
-	Uint32 b = (page_idx + 1) >> 4;
-	Uint32 * ptr = (Uint32*)(page->m_header+b);
-	
-	p = page_idx - (b << 4) + b;
-	retVal = (ptr + 1 + p);
-	magic_ptr = ptr;
+        iter.m_sz --;
       }
-      
-      Uint32 magic = *magic_ptr;
-      Uint32 val = *retVal;
-      if (unlikely(! ((magic & (1 << p)) && (magic >> 16) == type_id)))
-	goto err;
-      
-      if (sz == m_head.m_sz)
+      else if (!is_value && ptrI == RNIL)
       {
-	* retptr = val;
-	p0++;
-	if (p0 != 256)
-	{
-	  /**
-	   * Move next
-	   */
-	  iter.m_pos &= ~(Uint32)255;
-	  iter.m_pos |= p0;
-	}
-	else
-	{
-	  /**
-	   * Move up
-	   */
-	  m_pool.release(ptrI);
-	  iter.m_sz --;
-	  iter.m_pos >>= 8;
-	}
-	return 1;
+        assert((~iter.m_pos & ~(0xffffffff << (8 * (m_head.m_sz - iter.m_sz)))) == 0);
+        iter.m_pos -= 1U << (8 * (m_head.m_sz - iter.m_sz));
       }
-      else if (val != RNIL)
+      else
       {
-	iter.m_sz++;
-	iter.m_ptr_i[iter.m_sz] = val;
-	iter.m_pos = (p0 << 8);
-	* retVal = RNIL;
-	return 2;
+        assert((iter.m_pos & ~(0xffffffff << (8 * (m_head.m_sz - iter.m_sz)))) == 0);
+        iter.m_pos --;
       }
+      if (is_value)
+        return 1;
+    }
+    else
+    { // sz++
+      assert(iter.m_ptr_i[iter.m_sz + 1] == RNIL);
+      iter.m_sz ++;
+      iter.m_ptr_i[iter.m_sz] = *refPtr;
+      return 2;
     }
-    
-    assert(p0 == 256);
-    m_pool.release(ptrI);
-    iter.m_sz --;
-    iter.m_pos >>= 8;
-    return 2;
   }
-  
-  new (&m_head) Head();
-  return 0;
-  
-err:
-  require(false);
-  return false;
 }
 
 static
@@ -638,10 +630,10 @@ DynArr256Pool::release(Uint32 ptrI)
 #ifdef UNIT_TEST
 
 static
-void
+bool
 simple(DynArr256 & arr, int argc, char* argv[])
 {
-  ndbout_c("argc: %d", argc);
+  if (verbose) ndbout_c("argc: %d", argc);
   for (Uint32 i = 1; i<(Uint32)argc; i++)
   {
     Uint32 * s = arr.set(atoi(argv[i]));
@@ -661,12 +653,13 @@ simple(DynArr256 & arr, int argc, char* 
     
     Uint32 * g = arr.get(atoi(argv[i]));
     Uint32 v = g ? *g : ~0;
-    ndbout_c("p: %p %p %d", s, g, v);
+    if (verbose) ndbout_c("p: %p %p %d", s, g, v);
   }
+  return true;
 }
 
 static
-void
+bool
 basic(DynArr256& arr, int argc, char* argv[])
 {
 #define MAXLEN 65536
@@ -723,29 +716,19 @@ basic(DynArr256& arr, int argc, char* ar
     }
     }
   }
-}
-
-unsigned long long 
-micro()
-{
-  struct timeval tv;
-  gettimeofday(&tv, 0);
-  unsigned long long ret = tv.tv_sec;
-  ret *= 1000000;
-  ret += tv.tv_usec;
-  return ret;
+  return true;
 }
 
 static
-void
+bool
 read(DynArr256& arr, int argc, char ** argv)
 {
   Uint32 cnt = 100000;
   Uint64 mbytes = 16*1024;
-  Uint32 seed = time(0);
+  Uint32 seed = (Uint32) time(0);
   Uint32 seq = 0, seqmask = 0;
 
-  for (Uint32 i = 1; i<argc; i++)
+  for (int i = 1; i < argc; i++)
   {
     if (strncmp(argv[i], "--mbytes=", sizeof("--mbytes=")-1) == 0)
     {
@@ -767,10 +750,17 @@ read(DynArr256& arr, int argc, char ** a
   /**
    * Populate with 5Mb
    */
-  Uint32 maxidx = (1024*mbytes+31) / 32;
+
+  if (mbytes >= 134217720)
+  {
+    ndberr.println("--mbytes must be less than 134217720");
+    return false;
+  }
+  Uint32 maxidx = (Uint32)((1024*mbytes+31) / 32);
   Uint32 nodes = (maxidx+255) / 256;
   Uint32 pages = (nodes + 29)/ 30;
-  ndbout_c("%lldmb data -> %d entries (%dkb)",
+  if (verbose)
+    ndbout_c("%lldmb data -> %d entries (%dkb)",
 	   mbytes, maxidx, 32*pages);
   
   for (Uint32 i = 0; i<maxidx; i++)
@@ -788,13 +778,14 @@ read(DynArr256& arr, int argc, char ** a
     seqmask = ~(Uint32)0;
   }
 
-  ndbout_c("Timing %d %s reads (seed: %u)", cnt, 
+  if (verbose)
+    ndbout_c("Timing %d %s reads (seed: %u)", cnt,
 	   seq ? "sequential" : "random", seed);
 
   for (Uint32 i = 0; i<10; i++)
   {
     Uint32 sum0 = 0, sum1 = 0;
-    Uint64 start = micro();
+    Uint64 start = my_micro_time();
     for (Uint32 i = 0; i<cnt; i++)
     {
       Uint32 idx = ((rand() & (~seqmask)) + ((i + seq) & seqmask)) % maxidx;
@@ -802,22 +793,24 @@ read(DynArr256& arr, int argc, char ** a
       sum0 += idx;
       sum1 += *ptr;
     }
-    start = micro() - start;
-    float uspg = start; uspg /= cnt;
-    ndbout_c("Elapsed %lldus diff: %d -> %f us/get", start, sum0 - sum1, uspg);
+    start = my_micro_time() - start;
+    float uspg = (float)start; uspg /= cnt;
+    if (verbose)
+      ndbout_c("Elapsed %lldus diff: %d -> %f us/get", start, sum0 - sum1, uspg);
   }
+  return true;
 }
 
 static
-void
+bool
 write(DynArr256& arr, int argc, char ** argv)
 {
   Uint32 seq = 0, seqmask = 0;
   Uint32 cnt = 100000;
   Uint64 mbytes = 16*1024;
-  Uint32 seed = time(0);
+  Uint32 seed = (Uint32) time(0);
 
-  for (Uint32 i = 1; i<argc; i++)
+  for (int i = 1; i<argc; i++)
   {
     if (strncmp(argv[i], "--mbytes=", sizeof("--mbytes=")-1) == 0)
     {
@@ -839,10 +832,17 @@ write(DynArr256& arr, int argc, char ** 
   /**
    * Populate with 5Mb
    */
-  Uint32 maxidx = (1024*mbytes+31) / 32;
+
+  if (mbytes >= 134217720)
+  {
+    ndberr.println("--mbytes must be less than 134217720");
+    return false;
+  }
+  Uint32 maxidx = (Uint32)((1024*mbytes+31) / 32);
   Uint32 nodes = (maxidx+255) / 256;
   Uint32 pages = (nodes + 29)/ 30;
-  ndbout_c("%lldmb data -> %d entries (%dkb)",
+  if (verbose)
+    ndbout_c("%lldmb data -> %d entries (%dkb)",
 	   mbytes, maxidx, 32*pages);
 
   srand(seed);
@@ -853,25 +853,28 @@ write(DynArr256& arr, int argc, char ** 
     seqmask = ~(Uint32)0;
   }
 
-  ndbout_c("Timing %d %s writes (seed: %u)", cnt, 
+  if (verbose)
+    ndbout_c("Timing %d %s writes (seed: %u)", cnt,
 	   seq ? "sequential" : "random", seed);
   for (Uint32 i = 0; i<10; i++)
   {
-    Uint64 start = micro();
+    Uint64 start = my_micro_time();
     for (Uint32 i = 0; i<cnt; i++)
     {
       Uint32 idx = ((rand() & (~seqmask)) + ((i + seq) & seqmask)) % maxidx;
       Uint32 *ptr = arr.set(idx);
       *ptr = i;
     }
-    start = micro() - start;
-    float uspg = start; uspg /= cnt;
-    ndbout_c("Elapsed %lldus -> %f us/set", start, uspg);
+    start = my_micro_time() - start;
+    float uspg = (float)start; uspg /= cnt;
+    if (verbose)
+      ndbout_c("Elapsed %lldus -> %f us/set", start, uspg);
     DynArr256::ReleaseIterator iter;
     arr.init(iter);
     Uint32 val;
     while(arr.release(iter, &val));
   }
+  return true;
 }
 
 static
@@ -889,13 +892,37 @@ usage(FILE *f, int argc, char **argv)
 
 # include "test_context.hpp"
 
+#ifdef TAP_TEST
+static
+char* flatten(int argc, char** argv) /* NOT MT-SAFE */
+{
+  static char buf[10000];
+  size_t off = 0;
+  for (; argc > 0; argc--, argv++)
+  {
+    int i = 0;
+    if (off > 0 && (off + 1 < sizeof(buf)))
+      buf[off++] = ' ';
+    for (i = 0; (off + 1 < sizeof(buf)) && argv[0][i] != 0; i++, off++)
+      buf[off] = argv[0][i];
+    buf[off] = 0;
+  }
+  return buf;
+}
+#endif
+
 int
 main(int argc, char** argv)
 {
+#ifndef TAP_TEST
+  verbose = 1;
   if (argc == 1) {
     usage(stderr, argc, argv);
     exit(2);
   }
+#else
+  verbose = 0;
+#endif
 
   Pool_context pc = test_context(10000 /* pages */);
 
@@ -905,6 +932,42 @@ main(int argc, char** argv)
   DynArr256::Head head;
   DynArr256 arr(pool, head);
 
+#ifdef TAP_TEST
+  if (argc == 1)
+  {
+    char *argv[2] = { (char*)"dummy", NULL };
+    plan(5);
+    ok(simple(arr, 1, argv), "simple");
+    ok(basic(arr, 1, argv), "basic");
+    ok(read(arr, 1, argv), "read");
+    ok(write(arr, 1, argv), "write");
+  }
+  else if (strcmp(argv[1], "--simple") == 0)
+  {
+    plan(2);
+    ok(simple(arr, argc - 1, argv + 1), "simple %s", flatten(argc - 1, argv + 1));
+  }
+  else if (strcmp(argv[1], "--basic") == 0)
+  {
+    plan(2);
+    ok(basic(arr, argc - 1, argv + 1), "basic %s", flatten(argc - 1, argv + 1));
+  }
+  else if (strcmp(argv[1], "--read") == 0)
+  {
+    plan(2);
+    ok(read(arr, argc - 1, argv + 1), "read %s", flatten(argc - 1, argv + 1));
+  }
+  else if (strcmp(argv[1], "--write") == 0)
+  {
+    plan(2);
+    ok(write(arr, argc - 1, argv + 1), "write %s", flatten(argc - 1, argv + 1));
+  }
+  else
+  {
+    usage(stderr, argc, argv);
+    BAIL_OUT("Bad usage: %s %s", argv[0], flatten(argc - 1, argv + 1));
+  }
+#else
   if (strcmp(argv[1], "--simple") == 0)
     simple(arr, argc - 1, argv + 1);
   else if (strcmp(argv[1], "--basic") == 0)
@@ -918,34 +981,26 @@ main(int argc, char** argv)
     usage(stderr, argc, argv);
     exit(2);
   }
+#endif
 
   DynArr256::ReleaseIterator iter;
   arr.init(iter);
   Uint32 cnt = 0, val;
   while (arr.release(iter, &val)) cnt++;
   
-  ndbout_c("allocatedpages: %d allocatednodes: %d releasednodes: %d"
+  if (verbose)
+    ndbout_c("allocatedpages: %d allocatednodes: %d releasednodes: %d"
 	   " releasecnt: %d",
 	   allocatedpages, 
 	   allocatednodes,
 	   releasednodes,
 	   cnt);
-  
+#ifdef TAP_TEST
+  ok(allocatednodes == releasednodes, "release");
+  return exit_status();
+#else
   return 0;
-}
-
 #endif
-
-#ifdef TAP_TEST
-#include <NdbTap.hpp>
-#include "test_context.hpp"
-
-TAPTEST(DynArr256)
-{
-  Pool_context pc = test_context(100);
-
-  OK(true);
-
-  return 1;
 }
+
 #endif

=== modified file 'storage/ndb/src/kernel/vm/DynArr256.hpp'
--- a/storage/ndb/src/kernel/vm/DynArr256.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/vm/DynArr256.hpp	2012-02-07 19:40:05 +0000
@@ -79,6 +79,7 @@ public:
    *        2 - nodata
    */
   Uint32 release(ReleaseIterator&, Uint32* retptr);
+  Uint32 truncate(Uint32 keep_pos, ReleaseIterator&, Uint32* retptr);
 protected:
   Head & m_head;
   DynArr256Pool & m_pool;
@@ -87,4 +88,10 @@ protected:
   void handle_invalid_ptr(Uint32 pos, Uint32 ptrI, Uint32 p0);
 };
 
+inline
+Uint32 DynArr256::release(ReleaseIterator& iter, Uint32* retptr)
+{
+  return truncate(0, iter, retptr);
+}
+
 #endif

=== modified file 'storage/ndb/src/kernel/vm/Emulator.hpp'
--- a/storage/ndb/src/kernel/vm/Emulator.hpp	2012-01-19 06:21:05 +0000
+++ b/storage/ndb/src/kernel/vm/Emulator.hpp	2012-01-30 14:28:55 +0000
@@ -78,6 +78,12 @@ struct EmulatorData {
 extern struct EmulatorData globalEmulatorData;
 
 /**
+ * Get number of extra send buffer pages to use
+ */
+Uint32 mt_get_extra_send_buffer_pages(Uint32 curr_num_pages,
+                                      Uint32 extra_mem_pages);
+
+/**
  * Compute no of pages to be used as job-buffer
  */
 Uint32 compute_jb_pages(struct EmulatorData* ed);

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2012-01-04 14:25:32 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2012-02-02 11:43:58 +0000
@@ -635,10 +635,7 @@ private:
    * are real LQHs run by multiple threads.
    */
 protected:
-  enum { MaxInstances = 3 +
-         MAX_NDBMT_TC_THREADS +
-         MAX_NDBMT_LQH_WORKERS +
-         MAX_NDBMT_RECEIVE_THREADS };
+  enum { MaxInstances = NDBMT_MAX_BLOCK_INSTANCES };
 private:
   SimulatedBlock** theInstanceList; // set in main, indexed by instance
   SimulatedBlock* theMainInstance;  // set in all

=== modified file 'storage/ndb/src/kernel/vm/dummy_nonmt.cpp'
--- a/storage/ndb/src/kernel/vm/dummy_nonmt.cpp	2011-10-07 18:15:59 +0000
+++ b/storage/ndb/src/kernel/vm/dummy_nonmt.cpp	2012-01-30 15:12:41 +0000
@@ -45,6 +45,15 @@ mt_get_instance_count(Uint32 block)
 }
 
 Uint32
+mt_get_extra_send_buffer_pages(Uint32 curr_num_pages,
+                               Uint32 extra_mem_pages)
+{
+  (void)curr_num_pages;
+  (void)extra_mem_pages;
+  return 0;
+}
+
+Uint32
 compute_jb_pages(struct EmulatorData*)
 {
   return 0;

=== modified file 'storage/ndb/src/kernel/vm/mt.cpp'
--- a/storage/ndb/src/kernel/vm/mt.cpp	2012-01-25 18:06:21 +0000
+++ b/storage/ndb/src/kernel/vm/mt.cpp	2012-02-20 09:14:06 +0000
@@ -84,7 +84,6 @@ static const Uint32 MAX_SIGNALS_BEFORE_W
                            MAX_NDBMT_LQH_THREADS +  \
                            MAX_NDBMT_TC_THREADS +   \
                            MAX_NDBMT_RECEIVE_THREADS)
-#define MAX_BLOCK_INSTANCES (MAX_BLOCK_THREADS+1)
 
 /* If this is too small it crashes before first signal. */
 #define MAX_INSTANCES_PER_THREAD (16 + 8 * MAX_NDBMT_LQH_THREADS)
@@ -144,6 +143,7 @@ futex_wake(volatile unsigned * addr)
 struct thr_wait
 {
   volatile unsigned m_futex_state;
+  char padding[NDB_CL_PADSZ(sizeof(unsigned))];
   enum {
     FS_RUNNING = 0,
     FS_SLEEPING = 1
@@ -218,9 +218,10 @@ wakeup(struct thr_wait* wait)
 
 struct thr_wait
 {
-  bool m_need_wakeup;
   NdbMutex *m_mutex;
   NdbCondition *m_cond;
+  bool m_need_wakeup;
+  char padding[NDB_CL_PADSZ(sizeof(bool) + (2*sizeof(void*)))];
   thr_wait() : m_need_wakeup(false), m_mutex(0), m_cond(0) {}
 
   void init() {
@@ -436,8 +437,9 @@ struct thr_safe_pool
     return ret;
   }
 
-  T* seize_list(Ndbd_mem_manager *mm, Uint32 rg,
-                Uint32 requested, Uint32 * received) {
+  Uint32 seize_list(Ndbd_mem_manager *mm, Uint32 rg,
+                    Uint32 requested, T** head, T** tail)
+  {
     lock(&m_lock);
     if (m_cnt == 0)
     {
@@ -449,14 +451,13 @@ struct thr_safe_pool
 
       if (ret == 0)
       {
-        * received = 0;
         return 0;
       }
       else
       {
         ret->m_next = 0;
-        * received = 1;
-        return ret;
+        * head = * tail = ret;
+        return 1;
       }
     }
     else
@@ -474,8 +475,9 @@ struct thr_safe_pool
       m_free_list = last->m_next;
       unlock(&m_lock);
       last->m_next = 0;
-      * received = requested;
-      return first;
+      * head = first;
+      * tail = last;
+      return requested;
     }
   }
 
@@ -518,7 +520,8 @@ public:
     T *tmp = m_freelist;
     if (tmp == 0)
     {
-      tmp = m_global_pool->seize_list(mm, rg, m_alloc_size, &m_free);
+      T * tail;
+      m_free = m_global_pool->seize_list(mm, rg, m_alloc_size, &tmp, &tail);
     }
     if (tmp)
     {
@@ -632,6 +635,30 @@ public:
       release_all(mm, rg);
   }
 
+  /**
+   * prealloc up to <em>cnt</em> pages into this pool
+   */
+  bool fill(Ndbd_mem_manager *mm, Uint32 rg, Uint32 cnt)
+  {
+    if (m_free >= cnt)
+    {
+      return true;
+    }
+
+    T *head, *tail;
+    Uint32 allocated = m_global_pool->seize_list(mm, rg, m_alloc_size,
+                                                 &head, &tail);
+    if (allocated)
+    {
+      tail->m_next = m_freelist;
+      m_freelist = head;
+      m_free += allocated;
+      return m_free >= cnt;
+    }
+
+    return false;
+  }
+
   void set_pool(thr_safe_pool<T> * pool) { m_global_pool = pool; }
 
 private:
@@ -802,6 +829,28 @@ struct thr_tq
   Uint32 m_long_queue[LQ_SIZE];
 };
 
+/**
+ * THR_SEND_BUFFER_ALLOC_SIZE is the amount of 32k pages allocated
+ * when we allocate pages from the global pool of send buffers to
+ * the thread_local_pool (which is local to a thread).
+ *
+ * We allocate a bunch to decrease contention on send-buffer-pool-mutex
+ */
+#define THR_SEND_BUFFER_ALLOC_SIZE 32
+
+/**
+ * THR_SEND_BUFFER_PRE_ALLOC is the amout of 32k pages that are
+ *   allocated before we start to run signals
+ */
+#define THR_SEND_BUFFER_PRE_ALLOC 32
+
+/**
+ * Amount of pages that is allowed to linger in a
+ * thread-local send-buffer pool
+ */
+#define THR_SEND_BUFFER_MAX_FREE \
+  (THR_SEND_BUFFER_ALLOC_SIZE + THR_SEND_BUFFER_PRE_ALLOC - 1)
+
 /*
  * Max number of thread-local job buffers to keep before releasing to
  * global pool.
@@ -872,9 +921,48 @@ struct thr_send_queue
 struct thr_data
 {
   thr_data() : m_jba_write_lock("jbalock"),
-               m_send_buffer_pool(0, THR_FREE_BUF_MAX) {}
+               m_send_buffer_pool(0,
+                                  THR_SEND_BUFFER_MAX_FREE,
+                                  THR_SEND_BUFFER_ALLOC_SIZE) {}
+
+  /**
+   * We start with the data structures that are shared globally to
+   * ensure that they get the proper padding
+   */
+  thr_wait m_waiter; /* Cacheline aligned*/
+
+  /*
+   * Prio A signal incoming queue. This area is used from many threads
+   * protected by the spin lock. Thus it is also important to protect
+   * surrounding thread-local variables from CPU cache line sharing
+   * with this part.
+   */
+  struct thr_spin_lock<64> m_jba_write_lock;
+
+  struct thr_job_queue m_jba; /* aligned */
+  struct thr_job_queue_head m_jba_head;
+  char unused_protection1[NDB_CL_PADSZ(sizeof(struct thr_job_queue_head))];
+
+  /*
+   * These are the thread input queues, where other threads deliver signals
+   * into.
+   * Protect the m_in_queue_head by empty cache line to ensure that we don't
+   * get false CPU cacheline sharing. These cache lines are going to be
+   * updated by many different CPU's all the time whereas other neighbour
+   * variables are thread-local variables.
+   */
+  struct thr_job_queue_head m_in_queue_head[MAX_BLOCK_THREADS];
+  char unused_protection2[
+    NDB_CL_PADSZ(MAX_BLOCK_THREADS*sizeof(struct thr_job_queue_head))];
+  struct thr_job_queue m_in_queue[MAX_BLOCK_THREADS];
+
+  /**
+   * The remainder of the variables in thr_data are thread-local,
+   * meaning that they are always updated by the thread that owns those
+   * data structures and thus those variables aren't shared with other
+   * CPUs.
+   */
 
-  thr_wait m_waiter;
   unsigned m_thr_no;
 
   /**
@@ -890,19 +978,6 @@ struct thr_data
   Uint64 m_time;
   struct thr_tq m_tq;
 
-  /* Prio A signal incoming queue. */
-  struct thr_spin_lock<64> m_jba_write_lock;
-  struct thr_job_queue m_jba;
-
-  struct thr_job_queue_head m_jba_head;
-
-  /* Thread-local read state of prio A buffer. */
-  struct thr_jb_read_state m_jba_read_state;
-  /*
-   * There is no m_jba_write_state, as we have multiple writers to the prio A
-   * queue, so local state becomes invalid as soon as we release the lock.
-   */
-
   /*
    * In m_next_buffer we keep a free buffer at all times, so that when
    * we hold the lock and find we need a new buffer, we can use this and this
@@ -921,12 +996,15 @@ struct thr_data
   /* m_first_unused is the first unused entry in m_free_fifo. */
   Uint32 m_first_unused;
 
+
+  /* Thread-local read state of prio A buffer. */
+  struct thr_jb_read_state m_jba_read_state;
+
   /*
-   * These are the thread input queues, where other threads deliver signals
-   * into.
+   * There is no m_jba_write_state, as we have multiple writers to the prio A
+   * queue, so local state becomes invalid as soon as we release the lock.
    */
-  struct thr_job_queue_head m_in_queue_head[MAX_BLOCK_THREADS];
-  struct thr_job_queue m_in_queue[MAX_BLOCK_THREADS];
+
   /* These are the write states of m_in_queue[self] in each thread. */
   struct thr_jb_write_state m_write_states[MAX_BLOCK_THREADS];
   /* These are the read states of all of our own m_in_queue[]. */
@@ -977,6 +1055,13 @@ struct thr_data
   NdbThread* m_thread;
 };
 
+struct thr_data_aligned
+{
+  struct thr_data m_thr_data;
+  /* Ensure that the thr_data is aligned on a cacheline boundary */
+  char unused_protection[NDB_CL_PADSZ(sizeof(struct thr_data))];
+};
+
 struct mt_send_handle  : public TransporterSendBufferHandle
 {
   struct thr_data * m_selfptr;
@@ -1017,18 +1102,29 @@ struct thr_repository
       m_sb_pool("sendbufferpool")
     {}
 
+  /**
+   * m_receive_lock, m_section_lock, m_mem_manager_lock, m_jb_pool
+   * and m_sb_pool are allvariables globally shared among the threads
+   * and also heavily updated.
+   */
   struct thr_spin_lock<64> m_receive_lock[MAX_NDBMT_RECEIVE_THREADS];
   struct thr_spin_lock<64> m_section_lock;
   struct thr_spin_lock<64> m_mem_manager_lock;
+  /* thr_safe_pool is aligned to be also 64 bytes in size */
   struct thr_safe_pool<thr_job_buffer> m_jb_pool;
   struct thr_safe_pool<thr_send_page> m_sb_pool;
+  /* m_mm and m_thread_count are globally shared and read only variables */
   Ndbd_mem_manager * m_mm;
   unsigned m_thread_count;
-  struct thr_data m_thread[MAX_BLOCK_THREADS];
-
   /**
-   * send buffer handling
+   * Protect m_mm and m_thread_count from CPU cache misses, first
+   * part of m_thread (struct thr_data) is globally shared variables.
+   * So sharing cache line with these for these read only variables
+   * isn't a good idea
    */
+  char protection_unused[NDB_CL_PADSZ(sizeof(void*) + sizeof(unsigned))];
+
+  struct thr_data_aligned m_thread[MAX_BLOCK_THREADS];
 
   /* The buffers that are to be sent */
   struct send_buffer
@@ -1099,7 +1195,9 @@ struct thr_send_thread_instance
                m_awake(FALSE),
                m_thread(NULL),
                m_waiter_struct(),
-               m_send_buffer_pool(0, THR_FREE_BUF_MAX)
+               m_send_buffer_pool(0,
+                                  THR_SEND_BUFFER_MAX_FREE,
+                                  THR_SEND_BUFFER_ALLOC_SIZE)
   {}
   Uint32 m_instance_no;
   Uint32 m_watchdog_counter;
@@ -1583,7 +1681,7 @@ thr_job_buffer*
 seize_buffer(struct thr_repository* rep, int thr_no, bool prioa)
 {
   thr_job_buffer* jb;
-  thr_data* selfptr = rep->m_thread + thr_no;
+  struct thr_data* selfptr = &rep->m_thread[thr_no].m_thr_data;
   Uint32 first_free = selfptr->m_first_free;
   Uint32 first_unused = selfptr->m_first_unused;
 
@@ -1645,7 +1743,7 @@ static
 void
 release_buffer(struct thr_repository* rep, int thr_no, thr_job_buffer* jb)
 {
-  struct thr_data* selfptr = rep->m_thread + thr_no;
+  struct thr_data* selfptr = &rep->m_thread[thr_no].m_thr_data;
   Uint32 first_free = selfptr->m_first_free;
   Uint32 first_unused = selfptr->m_first_unused;
 
@@ -1881,7 +1979,7 @@ void
 senddelay(Uint32 thr_no, const SignalHeader* s, Uint32 delay)
 {
   struct thr_repository* rep = &g_thr_repository;
-  struct thr_data * selfptr = rep->m_thread + thr_no;
+  struct thr_data* selfptr = &rep->m_thread[thr_no].m_thr_data;
   assert(pthread_equal(selfptr->m_thr_id, pthread_self()));
   unsigned siglen = (sizeof(*s) >> 2) + s->theLength + s->m_noOfSections;
 
@@ -2033,11 +2131,12 @@ flush_jbb_write_state(thr_data *selfptr)
   Uint32 self = selfptr->m_thr_no;
 
   thr_jb_write_state *w = selfptr->m_write_states;
-  thr_data *thrptr = g_thr_repository.m_thread;
-  for (Uint32 thr_no = 0; thr_no < thr_count; thr_no++, thrptr++, w++)
+  thr_data_aligned *thr_align_ptr = g_thr_repository.m_thread;
+  for (Uint32 thr_no = 0; thr_no < thr_count; thr_no++, thr_align_ptr++, w++)
   {
     if (w->m_pending_signals || w->m_pending_signals_wakeup)
     {
+      struct thr_data *thrptr = &thr_align_ptr->m_thr_data;
       w->m_pending_signals_wakeup = MAX_SIGNALS_BEFORE_WAKEUP;
       thr_job_queue_head *q_head = thrptr->m_in_queue_head + self;
       flush_write_state(selfptr, thrptr, q_head, w);
@@ -2058,8 +2157,8 @@ check_job_buffers(struct thr_repository*
 {
   const Uint32 minfree = (1024 + MIN_SIGNALS_PER_PAGE - 1)/MIN_SIGNALS_PER_PAGE;
   unsigned thr_no = first_receiver_thread_no + recv_thread_id;
-  const thr_data *thrptr = rep->m_thread;
-  for (unsigned i = 0; i<num_threads; i++, thrptr++)
+  const thr_data_aligned *thr_align_ptr = rep->m_thread;
+  for (unsigned i = 0; i<num_threads; i++, thr_align_ptr++)
   {
     /**
      * NOTE: m_read_index is read wo/ lock (and updated by different thread)
@@ -2068,6 +2167,7 @@ check_job_buffers(struct thr_repository*
      *       function is always conservative (i.e it can be better than
      *       returned value, if read-index has moved but we didnt see it)
      */
+    const struct thr_data *thrptr = &thr_align_ptr->m_thr_data;
     const thr_job_queue_head *q_head = thrptr->m_in_queue_head + thr_no;
     unsigned ri = q_head->m_read_index;
     unsigned wi = q_head->m_write_index;
@@ -2103,9 +2203,9 @@ compute_max_signals_to_execute(Uint32 th
 {
   Uint32 minfree = thr_job_queue::SIZE;
   const struct thr_repository* rep = &g_thr_repository;
-  const thr_data *thrptr = rep->m_thread;
+  const struct thr_data_aligned *thr_align_ptr = rep->m_thread;
 
-  for (unsigned i = 0; i<num_threads; i++, thrptr++)
+  for (unsigned i = 0; i<num_threads; i++, thr_align_ptr++)
   {
     /**
      * NOTE: m_read_index is read wo/ lock (and updated by different thread)
@@ -2114,6 +2214,7 @@ compute_max_signals_to_execute(Uint32 th
      *       function is always conservative (i.e it can be better than
      *       returned value, if read-index has moved but we didnt see it)
      */
+    const struct thr_data *thrptr = &thr_align_ptr->m_thr_data;
     const thr_job_queue_head *q_head = thrptr->m_in_queue_head + thr_no;
     unsigned ri = q_head->m_read_index;
     unsigned wi = q_head->m_write_index;
@@ -2518,7 +2619,8 @@ trp_callback::bytes_sent(NodeId node, Ui
   assert(thr_no != NO_SEND_THREAD);
   if (!is_send_thread(thr_no))
   {
-    return ::bytes_sent(&g_thr_repository.m_thread[thr_no].m_send_buffer_pool,
+    thr_data * thrptr = &g_thr_repository.m_thread[thr_no].m_thr_data;
+    return ::bytes_sent(&thrptr->m_send_buffer_pool,
                         sb, bytes);
   }
   else
@@ -2588,6 +2690,53 @@ register_pending_send(thr_data *selfptr,
   }
 }
 
+static void try_send(thr_data *, Uint32); // prototype
+
+/**
+ * pack send buffers for a specific node
+ */
+void
+pack_send_buffer(thr_data *selfptr, Uint32 node)
+{
+  thr_repository* rep = &g_thr_repository;
+  thr_repository::send_buffer* sb = rep->m_send_buffers+node;
+  thread_local_pool<thr_send_page>* pool = &selfptr->m_send_buffer_pool;
+
+  lock(&sb->m_send_lock);
+  int bytes = link_thread_send_buffers(sb, node);
+  if (bytes)
+  {
+    pack_sb_pages(pool, sb);
+  }
+  unlock(&sb->m_send_lock);
+
+  /**
+   * release buffers prior to checking m_force_send
+   */
+  pool->release_global(rep->m_mm, RG_TRANSPORTER_BUFFERS);
+
+  /**
+   * After having locked/unlock m_send_lock
+   *   "protocol" dictates that we must check the m_force_send
+   */
+  if (sb->m_force_send)
+  {
+    try_send(selfptr, node);
+  }
+}
+
+static
+void
+pack_send_buffers(thr_data* selfptr)
+{
+  for (Uint32 i = 1; i < NDB_ARRAY_SIZE(selfptr->m_send_buffers); i++)
+  {
+    if (globalTransporterRegistry.get_transporter(i))
+      pack_send_buffer(selfptr, i);
+  }
+}
+
+
 /**
  * publish thread-locally prepared send-buffer
  */
@@ -2614,9 +2763,7 @@ flush_send_buffer(thr_data* selfptr, Uin
 
   if (unlikely(next == ri))
   {
-    lock(&sb->m_send_lock);
-    link_thread_send_buffers(sb, node);
-    unlock(&sb->m_send_lock);
+    pack_send_buffer(selfptr, node);
   }
 
   dst->m_buffers[wi] = src->m_first_page;
@@ -2646,9 +2793,13 @@ mt_send_handle::forceSend(NodeId nodeId)
     globalTransporterRegistry.performSend(nodeId);
     sb->m_send_thread = NO_SEND_THREAD;
     unlock(&sb->m_send_lock);
-  } while (sb->m_force_send);
 
-  selfptr->m_send_buffer_pool.release_global(rep->m_mm, RG_TRANSPORTER_BUFFERS);
+    /**
+     * release buffers prior to maybe looping on sb->m_force_send
+     */
+    selfptr->m_send_buffer_pool.release_global(rep->m_mm,
+                                               RG_TRANSPORTER_BUFFERS);
+  } while (sb->m_force_send);
 
   return true;
 }
@@ -2677,9 +2828,13 @@ try_send(thr_data * selfptr, Uint32 node
     globalTransporterRegistry.performSend(node);
     sb->m_send_thread = NO_SEND_THREAD;
     unlock(&sb->m_send_lock);
-  } while (sb->m_force_send);
 
-  selfptr->m_send_buffer_pool.release_global(rep->m_mm, RG_TRANSPORTER_BUFFERS);
+    /**
+     * release buffers prior to maybe looping on sb->m_force_send
+     */
+    selfptr->m_send_buffer_pool.release_global(rep->m_mm,
+                                               RG_TRANSPORTER_BUFFERS);
+  } while (sb->m_force_send);
 }
 
 /**
@@ -2811,6 +2966,14 @@ do_send(struct thr_data* selfptr, bool m
       {
         register_pending_send(selfptr, node);
       }
+      if (sb->m_force_send)
+      {
+        /**
+         * release buffers prior to looping on sb->m_force_send
+         */
+        selfptr->m_send_buffer_pool.release_global(rep->m_mm,
+                                                   RG_TRANSPORTER_BUFFERS);
+      }
     } while (sb->m_force_send);
   }
 
@@ -3156,14 +3319,14 @@ struct thr_map_entry {
   thr_map_entry() : thr_no(NULL_THR_NO) {}
 };
 
-static struct thr_map_entry thr_map[NO_OF_BLOCKS][MAX_BLOCK_INSTANCES];
+static struct thr_map_entry thr_map[NO_OF_BLOCKS][NDBMT_MAX_BLOCK_INSTANCES];
 
 static inline Uint32
 block2ThreadId(Uint32 block, Uint32 instance)
 {
   assert(block >= MIN_BLOCK_NO && block <= MAX_BLOCK_NO);
   Uint32 index = block - MIN_BLOCK_NO;
-  assert(instance < MAX_BLOCK_INSTANCES);
+  assert(instance < NDB_ARRAY_SIZE(thr_map[index]));
   const thr_map_entry& entry = thr_map[index][instance];
   assert(entry.thr_no < num_threads);
   return entry.thr_no;
@@ -3175,7 +3338,7 @@ add_thr_map(Uint32 main, Uint32 instance
   assert(main == blockToMain(main));
   Uint32 index = main - MIN_BLOCK_NO;
   assert(index < NO_OF_BLOCKS);
-  assert(instance < MAX_BLOCK_INSTANCES);
+  assert(instance < NDB_ARRAY_SIZE(thr_map[index]));
 
   SimulatedBlock* b = globalData.getBlock(main, instance);
   require(b != 0);
@@ -3185,7 +3348,7 @@ add_thr_map(Uint32 main, Uint32 instance
 
   require(thr_no < num_threads);
   struct thr_repository* rep = &g_thr_repository;
-  thr_data* thr_ptr = rep->m_thread + thr_no;
+  struct thr_data* thr_ptr = &rep->m_thread[thr_no].m_thr_data;
 
   /* Add to list. */
   {
@@ -3333,14 +3496,14 @@ mt_finalize_thr_map()
   {
     Uint32 bno = b + MIN_BLOCK_NO;
     Uint32 cnt = 0;
-    while (cnt < MAX_BLOCK_INSTANCES &&
+    while (cnt < NDB_ARRAY_SIZE(thr_map[b]) &&
            thr_map[b][cnt].thr_no != thr_map_entry::NULL_THR_NO)
       cnt++;
 
-    if (cnt != MAX_BLOCK_INSTANCES)
+    if (cnt != NDB_ARRAY_SIZE(thr_map[b]))
     {
       SimulatedBlock * main = globalData.getBlock(bno, 0);
-      for (Uint32 i = cnt; i < MAX_BLOCK_INSTANCES; i++)
+      for (Uint32 i = cnt; i < NDB_ARRAY_SIZE(thr_map[b]); i++)
       {
         Uint32 dup = (cnt == 1) ? 0 : 1 + ((i - 1) % (cnt - 1));
         if (thr_map[b][i].thr_no == thr_map_entry::NULL_THR_NO)
@@ -3666,6 +3829,20 @@ mt_job_thread_main(void *thr_arg)
   { 
     loops++;
 
+    /**
+     * prefill our thread local send buffers
+     *   up to THR_MINIMUM_SEND_BUFFERS (1Mb)
+     *
+     * and if this doesnt work pack buffers before start to execute signals
+     */
+    watchDogCounter = 11;
+    if (!selfptr->m_send_buffer_pool.fill(g_thr_repository.m_mm,
+                                          RG_TRANSPORTER_BUFFERS,
+                                          THR_SEND_BUFFER_PRE_ALLOC))
+    {
+      pack_send_buffers(selfptr);
+    }
+
     watchDogCounter = 2;
     scan_time_queues(selfptr, now);
 
@@ -3783,9 +3960,9 @@ sendlocal(Uint32 self, const SignalHeade
 
   Uint32 dst = block2ThreadId(block, instance);
   struct thr_repository* rep = &g_thr_repository;
-  struct thr_data * selfptr = rep->m_thread + self;
+  struct thr_data *selfptr = &rep->m_thread[self].m_thr_data;
   assert(pthread_equal(selfptr->m_thr_id, pthread_self()));
-  struct thr_data * dstptr = rep->m_thread + dst;
+  struct thr_data *dstptr = &rep->m_thread[dst].m_thr_data;
 
   selfptr->m_stat.m_priob_count++;
   Uint32 siglen = (sizeof(*s) >> 2) + s->theLength + s->m_noOfSections;
@@ -3811,10 +3988,10 @@ sendprioa(Uint32 self, const SignalHeade
 
   Uint32 dst = block2ThreadId(block, instance);
   struct thr_repository* rep = &g_thr_repository;
-  struct thr_data *selfptr = rep->m_thread + self;
+  struct thr_data *selfptr = &rep->m_thread[self].m_thr_data;
   assert(s->theVerId_signalNumber == GSN_START_ORD ||
          pthread_equal(selfptr->m_thr_id, pthread_self()));
-  struct thr_data *dstptr = rep->m_thread + dst;
+  struct thr_data *dstptr = &rep->m_thread[dst].m_thr_data;
 
   selfptr->m_stat.m_prioa_count++;
   Uint32 siglen = (sizeof(*s) >> 2) + s->theLength + s->m_noOfSections;
@@ -3854,7 +4031,7 @@ mt_send_remote(Uint32 self, const Signal
                const LinearSectionPtr ptr[3])
 {
   thr_repository *rep = &g_thr_repository;
-  thr_data *selfptr = rep->m_thread + self;
+  struct thr_data *selfptr = &rep->m_thread[self].m_thr_data;
   SendStatus ss;
 
   mt_send_handle handle(selfptr);
@@ -3872,7 +4049,7 @@ mt_send_remote(Uint32 self, const Signal
                const SegmentedSectionPtr ptr[3])
 {
   thr_repository *rep = &g_thr_repository;
-  thr_data *selfptr = rep->m_thread + self;
+  struct thr_data *selfptr = &rep->m_thread[self].m_thr_data;
   SendStatus ss;
 
   mt_send_handle handle(selfptr);
@@ -3905,7 +4082,7 @@ sendprioa_STOP_FOR_CRASH(const struct th
   /**
    * Pick any instance running in this thread
    */
-  struct thr_data * dstptr = rep->m_thread + dst;
+  struct thr_data *dstptr = &rep->m_thread[dst].m_thr_data;
   Uint32 bno = dstptr->m_instance_list[0];
 
   memset(&signalT.header, 0, sizeof(SignalHeader));
@@ -4023,7 +4200,7 @@ thr_init2(struct thr_repository* rep, st
     selfptr->m_write_states[i].m_write_index = 0;
     selfptr->m_write_states[i].m_write_pos = 0;
     selfptr->m_write_states[i].m_write_buffer =
-      rep->m_thread[i].m_in_queue[thr_no].m_buffers[0];
+      rep->m_thread[i].m_thr_data.m_in_queue[thr_no].m_buffers[0];
     selfptr->m_write_states[i].m_pending_signals = 0;
     selfptr->m_write_states[i].m_pending_signals_wakeup = 0;
   }    
@@ -4062,11 +4239,11 @@ rep_init(struct thr_repository* rep, uns
   rep->m_thread_count = cnt;
   for (unsigned int i = 0; i<cnt; i++)
   {
-    thr_init(rep, rep->m_thread + i, cnt, i);
+    thr_init(rep, &rep->m_thread[i].m_thr_data, cnt, i);
   }
   for (unsigned int i = 0; i<cnt; i++)
   {
-    thr_init2(rep, rep->m_thread + i, cnt, i);
+    thr_init2(rep, &rep->m_thread[i].m_thr_data, cnt, i);
   }
 
   rep->stopped_threads = 0;
@@ -4093,6 +4270,83 @@ rep_init(struct thr_repository* rep, uns
 #include "ThreadConfig.hpp"
 #include <signaldata/StartOrd.hpp>
 
+static Uint32
+get_total_number_of_block_threads(void)
+{
+  return (NUM_MAIN_THREADS +
+          globalData.ndbMtLqhThreads + 
+          globalData.ndbMtTcThreads +
+          globalData.ndbMtReceiveThreads);
+}
+
+static Uint32
+get_num_nodes()
+{
+  Uint32 count = 0;
+  for (Uint32 nodeId = 1; nodeId < MAX_NODES; nodeId++)
+  {
+    if (globalTransporterRegistry.get_transporter(nodeId))
+    {
+      count++;
+    }
+  }
+  return count;
+}
+
+/**
+ * This function returns the amount of extra send buffer pages
+ * that we should allocate in addition to the amount allocated
+ * for each node send buffer.
+ */
+#define MIN_SEND_BUFFER_GENERAL (512) //16M
+#define MIN_SEND_BUFFER_PER_NODE (8) //256k
+#define MIN_SEND_BUFFER_PER_THREAD (64) //2M
+
+Uint32
+mt_get_extra_send_buffer_pages(Uint32 curr_num_pages,
+                               Uint32 extra_mem_pages)
+{
+  Uint32 num_threads = get_total_number_of_block_threads();
+  Uint32 num_nodes = get_num_nodes();
+
+  Uint32 extra_pages = extra_mem_pages;
+
+  /**
+   * Add 2M for each thread since we allocate 1M every
+   * time we allocate and also we ensure there is also a minimum
+   * of 1M of send buffer in each thread. Thus we can easily have
+   * 2M of send buffer just to keep the contention around the
+   * send buffer page spinlock small. This memory we add independent
+   * of the configuration settings since the user cannot be
+   * expected to handle this and also since we could change this
+   * behaviour at any time.
+   */
+  extra_pages += num_threads * THR_SEND_BUFFER_MAX_FREE;
+
+  if (extra_mem_pages == 0)
+  {
+    /**
+     * The user have set extra send buffer memory to 0 and left for us
+     * to decide on our own how much extra memory is needed.
+     *
+     * We'll make sure that we have at least a minimum of 16M +
+     * 2M per thread + 256k per node. If we have this based on
+     * curr_num_pages and our local additions we don't add
+     * anything more, if we don't come up to this level we add to
+     * reach this minimum level.
+     */
+    Uint32 min_pages = MIN_SEND_BUFFER_GENERAL +
+      (MIN_SEND_BUFFER_PER_NODE * num_nodes) +
+      (MIN_SEND_BUFFER_PER_THREAD * num_threads);
+
+    if ((curr_num_pages + extra_pages) < min_pages)
+    {
+      extra_pages = min_pages - curr_num_pages;
+    }
+  }
+  return extra_pages;
+}
+
 Uint32
 compute_jb_pages(struct EmulatorData * ed)
 {
@@ -4244,7 +4498,7 @@ ThreadConfig::ipControlLoop(NdbThread* p
    */
   for (thr_no = 0; thr_no < num_threads; thr_no++)
   {
-    rep->m_thread[thr_no].m_time = NdbTick_CurrentMillisecond();
+    rep->m_thread[thr_no].m_thr_data.m_time = NdbTick_CurrentMillisecond();
 
     if (thr_no == first_receiver_thread_no)
       continue;                 // Will run in the main thread.
@@ -4256,30 +4510,30 @@ ThreadConfig::ipControlLoop(NdbThread* p
     if (thr_no < first_receiver_thread_no)
     {
       /* Start block threads */
-      rep->m_thread[thr_no].m_thread =
+      rep->m_thread[thr_no].m_thr_data.m_thread =
         NdbThread_Create(mt_job_thread_main,
                          (void **)(rep->m_thread + thr_no),
                          1024*1024,
                          "execute thread", //ToDo add number
                          NDB_THREAD_PRIO_MEAN);
-      require(rep->m_thread[thr_no].m_thread != NULL);
+      require(rep->m_thread[thr_no].m_thr_data.m_thread != NULL);
     }
     else
     {
       /* Start a receiver thread, also block thread for TRPMAN */
-      rep->m_thread[thr_no].m_thread =
+      rep->m_thread[thr_no].m_thr_data.m_thread =
         NdbThread_Create(mt_receiver_thread_main,
-                         (void **)(rep->m_thread + thr_no),
+                         (void **)(&rep->m_thread[thr_no].m_thr_data),
                          1024*1024,
                          "receive thread", //ToDo add number
                          NDB_THREAD_PRIO_MEAN);
-      require(rep->m_thread[thr_no].m_thread != NULL);
+      require(rep->m_thread[thr_no].m_thr_data.m_thread != NULL);
     }
   }
 
   /* Now run the main loop for first receiver thread directly. */
-  rep->m_thread[first_receiver_thread_no].m_thread = pThis;
-  mt_receiver_thread_main(&(rep->m_thread[first_receiver_thread_no]));
+  rep->m_thread[first_receiver_thread_no].m_thr_data.m_thread = pThis;
+  mt_receiver_thread_main(&(rep->m_thread[first_receiver_thread_no].m_thr_data));
 
   /* Wait for all threads to shutdown. */
   for (thr_no = 0; thr_no < num_threads; thr_no++)
@@ -4287,8 +4541,9 @@ ThreadConfig::ipControlLoop(NdbThread* p
     if (thr_no == first_receiver_thread_no)
       continue;
     void *dummy_return_status;
-    NdbThread_WaitFor(rep->m_thread[thr_no].m_thread, &dummy_return_status);
-    NdbThread_Destroy(&(rep->m_thread[thr_no].m_thread));
+    NdbThread_WaitFor(rep->m_thread[thr_no].m_thr_data.m_thread,
+                      &dummy_return_status);
+    NdbThread_Destroy(&(rep->m_thread[thr_no].m_thr_data.m_thread));
   }
 
   /* Delete send threads, includes waiting for threads to shutdown */
@@ -4365,7 +4620,8 @@ FastScheduler::traceDumpGetJam(Uint32 th
   thrdTheEmulatedJam = NULL;
   thrdTheEmulatedJamIndex = 0;
 #else
-  const EmulatedJamBuffer *jamBuffer = &g_thr_repository.m_thread[thr_no].m_jam;
+  const EmulatedJamBuffer *jamBuffer =
+    &g_thr_repository.m_thread[thr_no].m_thr_data.m_jam;
   thrdTheEmulatedJam = jamBuffer->theEmulatedJam;
   thrdTheEmulatedJamIndex = jamBuffer->theEmulatedJamIndex;
   jamBlockNumber = jamBuffer->theEmulatedJamBlockNumber;
@@ -4499,7 +4755,7 @@ FastScheduler::dumpSignalMemory(Uint32 t
   Uint32 seq_start = 0;
   Uint32 seq_end = 0;
 
-  const thr_data *thr_ptr = &rep->m_thread[thr_no];
+  const struct thr_data *thr_ptr = &rep->m_thread[thr_no].m_thr_data;
   if (watchDogCounter)
     *watchDogCounter = 4;
 
@@ -4772,7 +5028,7 @@ mt_get_thread_references_for_blocks(cons
      */
     assert(block == blockToMain(block));
     Uint32 index = block - MIN_BLOCK_NO;
-    for (Uint32 instance = 0; instance < MAX_BLOCK_INSTANCES; instance++)
+    for (Uint32 instance = 0; instance < NDB_ARRAY_SIZE(thr_map[instance]); instance++)
     {
       Uint32 thr_no = thr_map[index][instance].thr_no;
       if (thr_no == thr_map_entry::NULL_THR_NO)
@@ -4793,7 +5049,7 @@ void
 mt_wakeup(class SimulatedBlock* block)
 {
   Uint32 thr_no = block->getThreadId();
-  thr_data *thrptr = g_thr_repository.m_thread + thr_no;
+  struct thr_data *thrptr = &g_thr_repository.m_thread[thr_no].m_thr_data;
   wakeup(&thrptr->m_waiter);
 }
 
@@ -4802,7 +5058,7 @@ void
 mt_assert_own_thread(SimulatedBlock* block)
 {
   Uint32 thr_no = block->getThreadId();
-  thr_data *thrptr = g_thr_repository.m_thread + thr_no;
+  struct thr_data *thrptr = &g_thr_repository.m_thread[thr_no].m_thr_data;
 
   if (unlikely(pthread_equal(thrptr->m_thr_id, pthread_self()) == 0))
   {
@@ -4818,7 +5074,7 @@ Uint32
 mt_get_blocklist(SimulatedBlock * block, Uint32 arr[], Uint32 len)
 {
   Uint32 thr_no = block->getThreadId();
-  thr_data *thr_ptr = g_thr_repository.m_thread + thr_no;
+  struct thr_data *thr_ptr = &g_thr_repository.m_thread[thr_no].m_thr_data;
 
   for (Uint32 i = 0; i < thr_ptr->m_instance_count; i++)
   {
@@ -4833,7 +5089,7 @@ mt_get_thr_stat(class SimulatedBlock * b
 {
   bzero(dst, sizeof(* dst));
   Uint32 thr_no = block->getThreadId();
-  thr_data *selfptr = g_thr_repository.m_thread + thr_no;
+  struct thr_data *selfptr = &g_thr_repository.m_thread[thr_no].m_thr_data;
 
   THRConfigApplier & conf = globalEmulatorData.theConfiguration->m_thr_config;
   dst->thr_no = thr_no;

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.cpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2012-01-24 06:20:13 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2012-01-30 16:31:29 +0000
@@ -1101,7 +1101,7 @@ TAPTEST(mt_thr_config)
         "main={ keso=88, count=23},ldm,ldm",
         "main={ cpuset=1-3 }, ldm={cpuset=3-4}",
         "main={ cpuset=1-3 }, ldm={cpubind=2}",
-        "tc,tc,tc={count=5}",
+        "tc,tc,tc={count=25}",
         0
       };
 

=== modified file 'storage/ndb/src/mgmsrv/ConfigInfo.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2012-01-25 10:39:40 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2012-02-02 21:00:28 +0000
@@ -1118,7 +1118,7 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_USED,
     CI_RESTART_INITIAL,
     ConfigInfo::CI_INT,
-    "4",
+    STR_VALUE(NDB_DEFAULT_LOG_PARTS),
     "4",
     STR_VALUE(NDB_MAX_LOG_PARTS)
   },
@@ -1688,6 +1688,19 @@ const ConfigInfo::ParamInfo ConfigInfo::
     "true"},
 
   {
+    CFG_EXTRA_SEND_BUFFER_MEMORY,
+    "ExtraSendBufferMemory",
+    DB_TOKEN,
+    "Extra send buffer memory to use for send buffers in all transporters",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_INT64,
+    "0",
+    "0",
+    "32G"
+  },
+
+  {
     CFG_TOTAL_SEND_BUFFER_MEMORY,
     "TotalSendBufferMemory",
     DB_TOKEN,
@@ -1707,7 +1720,7 @@ const ConfigInfo::ParamInfo ConfigInfo::
     "Amount of bytes (out of TotalSendBufferMemory) to reserve for connection\n"
     "between data nodes. This memory will not be available for connections to\n"
     "management server or API nodes.",
-    ConfigInfo::CI_USED,
+    ConfigInfo::CI_DEPRECATED,
     false,
     ConfigInfo::CI_INT,
     "0",

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.cpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp	2012-01-17 08:33:59 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp	2012-01-30 15:12:41 +0000
@@ -643,7 +643,10 @@ TransporterFacade::configure(NodeId node
   // Configure send buffers
   Uint32 total_send_buffer = 0;
   iter.get(CFG_TOTAL_SEND_BUFFER_MEMORY, &total_send_buffer);
-  theTransporterRegistry->allocate_send_buffers(total_send_buffer);
+  Uint64 extra_send_buffer = 0;
+  iter.get(CFG_EXTRA_SEND_BUFFER_MEMORY, &extra_send_buffer);
+  theTransporterRegistry->allocate_send_buffers(total_send_buffer,
+                                                extra_send_buffer);
 
   Uint32 auto_reconnect=1;
   iter.get(CFG_AUTO_RECONNECT, &auto_reconnect);

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-12-20 08:49:07 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2012-01-28 10:11:10 +0000
@@ -210,16 +210,21 @@ ErrorBundle ErrorCodes[] = {
   { 623,  HA_ERR_RECORD_FILE_FULL, IS, "623" },
   { 624,  HA_ERR_RECORD_FILE_FULL, IS, "624" },
   { 625,  HA_ERR_INDEX_FILE_FULL, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
-  { 633,  HA_ERR_INDEX_FILE_FULL, IS, "Table fragment hash index has reached maximum possible size" },
+  { 633,  HA_ERR_INDEX_FILE_FULL, IS,
+    "Table fragment hash index has reached maximum possible size" },
   { 640,  DMEC, IS, "Too many hash indexes (should not happen)" },
   { 826,  HA_ERR_RECORD_FILE_FULL, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
   { 827,  HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
+  { 889,  HA_ERR_RECORD_FILE_FULL, IS,
+    "Table fragment fixed data reference has reached maximum possible value (specify MAXROWS or increase no of partitions)"},
   { 902,  HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
   { 903,  HA_ERR_INDEX_FILE_FULL, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
   { 904,  HA_ERR_INDEX_FILE_FULL, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
   { 905,  DMEC, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
   { 1601, HA_ERR_RECORD_FILE_FULL, IS, "Out extents, tablespace full" },
   { 1602, DMEC, IS,"No datafile in tablespace" },
+  { 1603, HA_ERR_RECORD_FILE_FULL, IS,
+    "Table fragment fixed data reference has reached maximum possible value (specify MAXROWS or increase no of partitions)"},
 
   /**
    * TimeoutExpired 

=== modified file 'storage/ndb/src/ndbjtie/NdbApiWrapper.hpp'
--- a/storage/ndb/src/ndbjtie/NdbApiWrapper.hpp	2012-01-19 18:16:31 +0000
+++ b/storage/ndb/src/ndbjtie/NdbApiWrapper.hpp	2012-01-26 20:57:36 +0000
@@ -3192,6 +3192,13 @@ struct NdbApiWrapper {
         return obj.nextResult(p0, p1);
     }
 
+    static int
+    NdbScanOperation__nextResultCopyOut
+    ( NdbScanOperation & obj, char * p0, bool p1, bool p2 )
+    {
+        return obj.nextResultCopyOut(p0, p1, p2);
+    }
+
     static void
     NdbScanOperation__close
     ( NdbScanOperation & obj, bool p0, bool p1 )

=== modified file 'storage/ndb/src/ndbjtie/com/mysql/ndbjtie/ndbapi/NdbScanOperation.java'
--- a/storage/ndb/src/ndbjtie/com/mysql/ndbjtie/ndbapi/NdbScanOperation.java	2012-01-19 18:16:31 +0000
+++ b/storage/ndb/src/ndbjtie/com/mysql/ndbjtie/ndbapi/NdbScanOperation.java	2012-01-26 20:57:36 +0000
@@ -87,6 +87,7 @@ public class NdbScanOperation extends Nd
     public /*_virtual_*/ native int readTuples(int/*_LockMode_*/ lock_mode /*_= LM_Read_*/, int/*_Uint32_*/ scan_flags /*_= 0_*/, int/*_Uint32_*/ parallel /*_= 0_*/, int/*_Uint32_*/ batch /*_= 0_*/);
     public final native int nextResult(boolean fetchAllowed /*_= true_*/, boolean forceSend /*_= false_*/);
     // MMM! support <out:char *> or check if needed: public final native int nextResult(const char * * out_row_ptr, boolean fetchAllowed, boolean forceSend);
+    public final native int nextResultCopyOut(ByteBuffer/*_char *_*/ buffer, boolean fetchAllowed, boolean forceSend);
     public final native void close(boolean forceSend /*_= false_*/, boolean releaseOp /*_= false_*/);
     public final native NdbOperation/*_NdbOperation *_*/ lockCurrentTuple();
     public final native NdbOperation/*_NdbOperation *_*/ lockCurrentTuple(NdbTransaction/*_NdbTransaction *_*/ lockTrans);

=== modified file 'storage/ndb/src/ndbjtie/ndbapi_jtie.hpp'
--- a/storage/ndb/src/ndbjtie/ndbapi_jtie.hpp	2012-01-20 06:22:16 +0000
+++ b/storage/ndb/src/ndbjtie/ndbapi_jtie.hpp	2012-01-28 10:11:10 +0000
@@ -9114,6 +9114,22 @@ Java_com_mysql_ndbjtie_ndbapi_NdbScanOpe
 
 /*
  * Class:     com_mysql_ndbjtie_ndbapi_NdbScanOperation
+ * Method:    nextResultCopyOut
+ * Signature: (Ljava/nio/ByteBuffer;ZZ)I
+ */
+JNIEXPORT jint JNICALL
+Java_com_mysql_ndbjtie_ndbapi_NdbScanOperation_nextResultCopyOut(JNIEnv * env, jobject obj, jobject p0, jboolean p1, jboolean p2)
+{
+    TRACE("jint Java_com_mysql_ndbjtie_ndbapi_NdbScanOperation_nextResultCopyOut(JNIEnv *, jobject, jobject, jboolean, jboolean)");
+#ifndef NDBJTIE_USE_WRAPPED_VARIANT_FOR_FUNCTION
+    return gcall_mfr< ttrait_c_m_n_n_NdbScanOperation_t, ttrait_int, ttrait_char_1p_bb, ttrait_bool, ttrait_bool, &NdbScanOperation::nextResultCopyOut >(env, obj, p0, p1, p2);
+#else
+    return gcall_fr< ttrait_int, ttrait_c_m_n_n_NdbScanOperation_r, ttrait_char_1p_bb, ttrait_bool, ttrait_bool, &NdbApiWrapper::NdbScanOperation__nextResultCopyOut >(env, NULL, obj, p0, p1, p2);
+#endif // NDBJTIE_USE_WRAPPED_VARIANT_FOR_FUNCTION
+}
+
+/*
+ * Class:     com_mysql_ndbjtie_ndbapi_NdbScanOperation
  * Method:    close
  * Signature: (ZZ)V
  */

=== modified file 'storage/ndb/test/ndbapi/CMakeLists.txt'
--- a/storage/ndb/test/ndbapi/CMakeLists.txt	2011-12-09 12:44:18 +0000
+++ b/storage/ndb/test/ndbapi/CMakeLists.txt	2012-02-20 21:10:49 +0000
@@ -67,6 +67,8 @@ TARGET_LINK_LIBRARIES(testSRBank ndbbank
 ADD_EXECUTABLE(testLimits testLimits.cpp)
 ADD_EXECUTABLE(testSingleUserMode testSingleUserMode.cpp)
 ADD_EXECUTABLE(testIndexStat testIndexStat.cpp)
+ADD_EXECUTABLE(testUpgrade testUpgrade.cpp)
+ADD_EXECUTABLE(testAsynchMultiwait testAsynchMultiwait.cpp)
 
 SET(BINS create_all_tabs create_tab
   drop_all_tabs flexAsynch flexBench
@@ -81,7 +83,9 @@ SET(BINS create_all_tabs create_tab
   DbAsyncGenerator test_event_merge testNdbinfo
   testNativeDefault testLimits testSpj
   testSingleUserMode
-  testIndexStat)
+  testIndexStat
+  testUpgrade
+  testAsynchMultiwait)
 
 IF(MSVC)
   
@@ -95,4 +99,6 @@ FOREACH(B ${BINS})
   TARGET_LINK_LIBRARIES(${B} ndbclient_so)
 ENDFOREACH()
 
+TARGET_LINK_LIBRARIES(testUpgrade mysqlclient)
+
 INSTALL(TARGETS ${BINS} DESTINATION bin)

=== modified file 'storage/ndb/test/ndbapi/flexAsynch.cpp'
--- a/storage/ndb/test/ndbapi/flexAsynch.cpp	2012-01-23 20:25:28 +0000
+++ b/storage/ndb/test/ndbapi/flexAsynch.cpp	2012-02-14 08:16:48 +0000
@@ -786,6 +786,7 @@ executeTrans(ThreadNdb* pThread,
           ignore this record
         */
         aNdbObject->closeTransaction(tConArray[num_ops]);
+        tConArray[num_ops] = NULL;
         continue;
       }
       for (unsigned int k = 0; k < tNoOfOpsPerTrans; k++) {

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2012-01-04 20:25:40 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2012-02-23 12:37:59 +0000
@@ -205,6 +205,7 @@ extern const char * g_clusters;
  *   we keep full path to them here
  */
 char * find_bin_path(const char * basename);
+char * find_bin_path(const char * prefix, const char * basename);
 extern const char * g_ndb_mgmd_bin_path;
 extern const char * g_ndbd_bin_path;
 extern const char * g_ndbmtd_bin_path;

=== modified file 'storage/ndb/test/run-test/command.cpp'
--- a/storage/ndb/test/run-test/command.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/run-test/command.cpp	2012-02-20 21:15:00 +0000
@@ -102,6 +102,24 @@ set_env_var(const BaseString& existing,
   return newEnv;
 }
 
+static
+char *
+dirname(const char * path)
+{
+  char * s = strdup(path);
+  size_t len = strlen(s);
+  for (size_t i = 1; i<len; i++)
+  {
+    if (s[len - i] == '/')
+    {
+      s[len - i] = 0;
+      return s;
+    }
+  }
+  free(s);
+  return 0;
+}
+
 
 Vector<atrt_process> g_saved_procs;
 
@@ -162,8 +180,15 @@ do_change_version(atrt_config& config, S
                                   BaseString("MYSQL_BASE_DIR"),
                                   BaseString(new_prefix));
   proc.m_proc.m_env.assign(newEnv);
-  BaseString suffix(proc.m_proc.m_path.substr(strlen(old_prefix)));
-  proc.m_proc.m_path.assign(new_prefix).append(suffix);
+
+  ssize_t pos = proc.m_proc.m_path.lastIndexOf('/') + 1;
+  BaseString exename(proc.m_proc.m_path.substr(pos));
+  char * exe = find_bin_path(new_prefix, exename.c_str());
+  proc.m_proc.m_path = exe;
+  if (exe)
+  {
+    free(exe);
+  }
   if (process_args && strlen(process_args))
   {
     /* Beware too long args */
@@ -171,6 +196,33 @@ do_change_version(atrt_config& config, S
     proc.m_proc.m_args.append(process_args);
   }
 
+  {
+    /**
+     * In 5.5...binaries aren't compiled with rpath
+     * So we need an explicit LD_LIBRARY_PATH
+     * So when upgrading..we need to change LD_LIBRARY_PATH
+     * So I hate 5.5...
+     */
+    ssize_t p0 = proc.m_proc.m_env.indexOf(" LD_LIBRARY_PATH=");
+    ssize_t p1 = proc.m_proc.m_env.indexOf(' ', p0 + 1);
+
+    BaseString part0 = proc.m_proc.m_env.substr(0, p0);
+    BaseString part1 = proc.m_proc.m_env.substr(p1);
+
+    proc.m_proc.m_env.assfmt("%s%s",
+                             part0.c_str(),
+                             part1.c_str());
+
+    BaseString lib(g_libmysqlclient_so_path);
+    ssize_t pos = lib.lastIndexOf('/') + 1;
+    BaseString libname(lib.substr(pos));
+    char * exe = find_bin_path(new_prefix, libname.c_str());
+    char * dir = dirname(exe);
+    proc.m_proc.m_env.appfmt(" LD_LIBRARY_PATH=%s", dir);
+    free(exe);
+    free(dir);
+  }
+
   ndbout << proc << endl;
 
   g_logger.info("starting process...");

=== modified file 'storage/ndb/test/run-test/conf-upgrade.cnf'
--- a/storage/ndb/test/run-test/conf-upgrade.cnf	2011-05-19 17:47:28 +0000
+++ b/storage/ndb/test/run-test/conf-upgrade.cnf	2012-02-21 08:27:55 +0000
@@ -4,6 +4,7 @@ baseport = 14000
 clusters = .4node
 mysqld = CHOOSE_host1
 fix-nodeid=1
+mt=2
 
 [ndb_mgmd]
 
@@ -12,6 +13,7 @@ skip-innodb
 loose-skip-bdb
 socket=mysql.sock
 skip-grant-tables
+default-storage-engine=myisam
 
 [client]
 protocol=tcp

=== modified file 'storage/ndb/test/run-test/setup.cpp'
--- a/storage/ndb/test/run-test/setup.cpp	2012-01-04 20:25:40 +0000
+++ b/storage/ndb/test/run-test/setup.cpp	2012-02-23 12:37:59 +0000
@@ -1075,6 +1075,12 @@ operator<<(NdbOut& out, const atrt_proce
 char *
 find_bin_path(const char * exe)
 {
+  return find_bin_path(g_prefix, exe);
+}
+
+char *
+find_bin_path(const char * prefix, const char * exe)
+{
   if (exe == 0)
     return 0;
 
@@ -1089,7 +1095,7 @@ find_bin_path(const char * exe)
   for (int i = 0; g_search_path[i] != 0; i++)
   {
     BaseString p;
-    p.assfmt("%s/%s/%s", g_prefix, g_search_path[i], exe);
+    p.assfmt("%s/%s/%s", prefix, g_search_path[i], exe);
     if (File_class::exists(p.c_str()))
     {
       return strdup(p.c_str());

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk-cluster branch (ole.john.aske:3430 to 3431) Ole John Aske24 Feb