List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:October 5 2011 7:38am
Subject:bzr push into mysql-trunk-cluster branch (magnus.blaudd:3385 to 3388)
View as plain text  
 3388 magnus.blaudd@stripped	2011-10-05 [merge]
      Merge 5.5-cluster -> trunk-cluster

    modified:
      storage/ndb/clusterj/clusterj-test/CMakeLists.txt
      storage/ndb/config/type_JAVA.cmake
      storage/ndb/ndb_configure.cmake
 3387 magnus.blaudd@stripped	2011-10-05 [merge]
      Merge 5.5-cluster -> trunk-cluster

    added:
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNotNullPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNullPredicateImpl.java
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBatching.java
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBindValuesImpl.java
      storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.cpp
      storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.h
    modified:
      VERSION
      cmake/configure.pl
      mysql-test/collections/default.experimental
      mysql-test/include/default_ndbd.cnf
      mysql-test/mysql-test-run.pl
      mysql-test/suite/ndb/my.cnf
      mysql-test/suite/ndb/r/ndb_config.result
      mysql-test/suite/ndb/r/ndb_join_pushdown.result
      mysql-test/suite/ndb/r/ndb_temporary.result
      mysql-test/suite/ndb/t/disabled.def
      mysql-test/suite/ndb/t/ndb_join_pushdown.test
      mysql-test/suite/ndb/t/ndb_temporary.test
      mysql-test/suite/ndb_binlog/my.cnf
      mysql-test/suite/ndb_binlog/t/ndb_binlog_log_transaction_id-master.opt
      mysql-test/suite/ndb_rpl/my.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_trans.cnf
      mysql-test/suite/ndb_team/my.cnf
      mysql-test/suite/ndb_team/t/ndb_autodiscover3.test
      mysql-test/suite/rpl_ndb/my.cnf
      mysql-test/suite/rpl_ndb/t/disabled.def
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_implicit_commit_binlog.test
      sql/abstract_query_plan.cc
      sql/abstract_query_plan.h
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_glue.h
      sql/ha_ndbcluster_push.cc
      sql/ha_ndbcluster_push.h
      sql/sql_partition.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/VERSION
      storage/ndb/clusterj/clusterj-api/pom.xml
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/query/PredicateOperand.java
      storage/ndb/clusterj/clusterj-bindings/pom.xml
      storage/ndb/clusterj/clusterj-core/pom.xml
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ParameterImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/DomainFieldHandler.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanFilter.java
      storage/ndb/clusterj/clusterj-jdbc/pom.xml
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/InterceptorImpl.java
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/QueryExecutionContextJDBCImpl.java
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerImpl.java
      storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BatchDeleteQueryAllPrimitivesTest.java
      storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BigIntegerTypesTest.java
      storage/ndb/clusterj/clusterj-jdbc/src/test/resources/clusterj.properties
      storage/ndb/clusterj/clusterj-jpatest/pom.xml
      storage/ndb/clusterj/clusterj-openjpa/pom.xml
      storage/ndb/clusterj/clusterj-test/pom.xml
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNotNullTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNullTest.java
      storage/ndb/clusterj/clusterj-tie/pom.xml
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanFilterImpl.java
      storage/ndb/clusterj/pom.xml
      storage/ndb/compile-cluster
      storage/ndb/include/CMakeLists.txt
      storage/ndb/include/portlib/NdbMutex.h
      storage/ndb/ndb_configure.cmake
      storage/ndb/src/common/debugger/signaldata/ScanTab.cpp
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/portlib/NdbCondition.c
      storage/ndb/src/common/portlib/NdbMutex.c
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/common/util/ndbzio.c
      storage/ndb/src/kernel/SimBlockList.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/ndbd.cpp
      storage/ndb/src/kernel/vm/ArrayPool.hpp
      storage/ndb/src/kernel/vm/Configuration.cpp
      storage/ndb/src/kernel/vm/Emulator.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.hpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/src/ndbjtie/test/test/MySqlUtilsCharsetMapTest.java
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/test/ndbapi/testRestartGci.cpp
      storage/ndb/test/ndbapi/test_event.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/src/NDBT_Find.cpp
      storage/ndb/test/tools/hugoJoin.cpp
      support-files/compiler_warnings.supp
 3386 magnus.blaudd@stripped	2011-10-05 [merge]
      Merge 5.5-cluster(3520) -> trunk-cluster

    modified:
      config.h.cmake
      storage/ndb/memcache/CMakeLists.txt
      storage/ndb/memcache/include/atomics.h
      storage/ndb/memcache/include/hash_item_util.h
      storage/ndb/memcache/include/ndb_engine.h
      storage/ndb/memcache/include/ndbmemcache_config.in
      storage/ndb/memcache/include/timing.h
      storage/ndb/memcache/src/ClusterConnectionPool.cc
      storage/ndb/memcache/src/QueryPlan.cc
      storage/ndb/memcache/src/TableSpec.cc
      storage/ndb/memcache/src/atomics.c
      storage/ndb/memcache/unit/all_tests.h
      storage/ndb/ndb_configure.cmake
 3385 magnus.blaudd@stripped	2011-10-05 [merge]
      Merge

    modified:
      mysql-test/suite/ndb_rpl/r/ndb_rpl_myisam2ndb.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb.test
      mysql-test/suite/rpl_ndb/r/rpl_ndb_relayrotate.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_row_basic.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_sp006.result
      mysql-test/t/disabled.def
      mysql-test/t/mysql_plugin.test
=== modified file 'VERSION'
--- a/VERSION	2011-09-27 12:11:16 +0000
+++ b/VERSION	2011-10-05 07:24:39 +0000
@@ -1,4 +1,4 @@
 MYSQL_VERSION_MAJOR=5
 MYSQL_VERSION_MINOR=6
 MYSQL_VERSION_PATCH=4
-MYSQL_VERSION_EXTRA=-ndb-7.2.1-alpha
+MYSQL_VERSION_EXTRA=-ndb-7.2.2-alpha

=== modified file 'cmake/configure.pl'
--- a/cmake/configure.pl	2011-09-28 09:38:22 +0000
+++ b/cmake/configure.pl	2011-10-05 07:24:39 +0000
@@ -227,6 +227,11 @@ foreach my $option (@ARGV)
     $cmakeargs = $cmakeargs." \"-DWITH_NDB_CCFLAGS=".substr($option,17)."\"";
     next;
   }
+  if ($option =~ /cmake-args=/)
+  {
+    $cmakeargs = $cmakeargs." ".substr($option,11);
+    next;
+  }
 #endif
   if ($option =~ /with-gcov/)
   {

=== modified file 'config.h.cmake'
--- a/config.h.cmake	2011-09-21 11:01:41 +0000
+++ b/config.h.cmake	2011-10-05 06:42:12 +0000
@@ -461,7 +461,9 @@
 #cmakedefine _LARGE_FILES 1
 #cmakedefine _LARGEFILE_SOURCE 1
 #cmakedefine _LARGEFILE64_SOURCE 1
+#ifndef _FILE_OFFSET_BITS
 #cmakedefine _FILE_OFFSET_BITS @_FILE_OFFSET_BITS@
+#endif
 
 #cmakedefine TIME_WITH_SYS_TIME 1
 

=== modified file 'mysql-test/collections/default.experimental'
--- a/mysql-test/collections/default.experimental	2011-09-27 12:11:16 +0000
+++ b/mysql-test/collections/default.experimental	2011-10-05 07:24:39 +0000
@@ -34,13 +34,10 @@ rpl.rpl_show_slave_running              
 sys_vars.max_sp_recursion_depth_func @solaris # Bug#11753919 2010-01-20 alik Several test cases fail on Solaris with error Thread stack overrun
 sys_vars.wait_timeout_func                    # Bug#11750645 2010-04-26 alik wait_timeout_func fails
 
-# BUG #59055 : All ndb tests should be removed from the repository
-# Leaving the sys_vars tests for now. sys_vars.all_vars.test fails on removing ndb tests
-sys_vars.ndb_log_update_as_write_basic
-sys_vars.have_ndbcluster_basic
-sys_vars.ndb_log_updated_only_basic
 sys_vars.rpl_init_slave_func		 # Bug#12535301 2011-05-09 andrei sys_vars.rpl_init_slave_func mismatches in daily-5.5
 
 rpl.rpl_mixed_mts_rec_crash_safe @solaris          # Bug 12902514 2011-08-19 andrei mts recovery tests are slow
 rpl.rpl_mixed_mts_rec_crash_safe_checksum @solaris # same as rpl_mixed_mts_rec_crash_safe
 rpl.rpl_mixed_mts_crash_safe @solaris              # same as rpl_mixed_mts_rec_crash_safe
+
+rpl_ndb.rpl_ndb_mixed_implicit_commit_binlog # jonas: seems to fail randomly in PB2

=== modified file 'mysql-test/include/default_ndbd.cnf'
--- a/mysql-test/include/default_ndbd.cnf	2011-03-31 20:19:39 +0000
+++ b/mysql-test/include/default_ndbd.cnf	2011-09-29 17:47:44 +0000
@@ -4,7 +4,7 @@ MaxNoOfSavedMessages=          1000
 MaxNoOfConcurrentTransactions= 2048
 MaxNoOfConcurrentOperations=   10000
 DataMemory=                    20M
-IndexMemory=                   3M
+IndexMemory=                   5M
 Diskless=                      0
 TimeBetweenWatchDogCheck=      30000
 MaxNoOfOrderedIndexes=         128

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2011-10-03 09:06:59 +0000
+++ b/mysql-test/mysql-test-run.pl	2011-10-05 07:24:39 +0000
@@ -390,6 +390,7 @@ sub main {
       }
     }
   }
+  mtr_report("opt_suites: $opt_suites");
 
   init_timers();
 

=== modified file 'mysql-test/suite/ndb/my.cnf'
--- a/mysql-test/suite/ndb/my.cnf	2011-09-05 09:32:41 +0000
+++ b/mysql-test/suite/ndb/my.cnf	2011-10-03 08:02:28 +0000
@@ -38,7 +38,7 @@ NodeId=255
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
-ndb-wait-connected=20
+ndb-wait-connected=30
 ndb-wait-setup=120
 ndb-cluster-connection-pool=3
 ndb-extra-logging=99

=== modified file 'mysql-test/suite/ndb/r/ndb_config.result'
--- a/mysql-test/suite/ndb/r/ndb_config.result	2011-09-04 17:04:25 +0000
+++ b/mysql-test/suite/ndb/r/ndb_config.result	2011-09-29 19:16:31 +0000
@@ -1,10 +1,10 @@
 == 1 ==
 ndb_mgmd,3,localhost mysqld,49,localhost mysqld,16,localhost mysqld,32,localhost mysqld,48,localhost mysqld,63,localhost mysqld,127,localhost mysqld,192,localhost mysqld,228,localhost mysqld,229,localhost mysqld,230,localhost mysqld,231,localhost mysqld,232,localhost mysqld,233,localhost mysqld,255,localhost ndbd,1,localhost ndbd,2,localhost
 == 2 ==
-1,localhost,20971520,3145728 2,localhost,20971520,3145728
+1,localhost,20971520,5242880 2,localhost,20971520,5242880
 == 3 ==
-1 localhost 20971520 3145728
-2 localhost 20971520 3145728
+1 localhost 20971520 5242880
+2 localhost 20971520 5242880
 == 4 ==
 1 2
 == 5 ==

=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-03 08:05:59 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-05 07:24:39 +0000
@@ -2060,33 +2060,33 @@ drop table tx;
 alter table t1 partition by key(a);
 explain select count(*) from t1 
 join t1 as t2 on t2.a = t1.c 
-join t1 as t3 on t3.a = t1.c;
+join t1 as t3 on t3.a = t1.d;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	16	Parent of 3 pushed join@1
 1	SIMPLE	t2	ref	PRIMARY	PRIMARY	4	test.t1.c	1	Child of 't1' in pushed join@1
-1	SIMPLE	t3	ref	PRIMARY	PRIMARY	4	test.t1.c	1	Child of 't1' in pushed join@1
+1	SIMPLE	t3	ref	PRIMARY	PRIMARY	4	test.t1.d	1	Child of 't1' in pushed join@1
 select count(*) from t1 
 join t1 as t2 on t2.a = t1.c 
-join t1 as t3 on t3.a = t1.c;
+join t1 as t3 on t3.a = t1.d;
 count(*)
-208
+176
 CREATE TABLE tx (
 a int NOT NULL,
 PRIMARY KEY (`a`)
 );
 delete from t1;
 insert into tx values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
-insert into t1 select 1, x1.a * 10+x2.a, 1, 0 from tx as x1 cross join tx as x2;
+insert into t1 select 1, x1.a * 10+x2.a, 1, 1 from tx as x1 cross join tx as x2;
 explain select count(*) from t1 as x1
 join t1 as x2 on x2.a = x1.c and x1.b < 2 
-join t1 as x3 on x3.a = x1.c;
+join t1 as x3 on x3.a = x1.d;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	100	Parent of 3 pushed join@1; Using where with pushed condition
 1	SIMPLE	x2	ref	PRIMARY	PRIMARY	4	test.x1.c	1	Child of 'x1' in pushed join@1
-1	SIMPLE	x3	ref	PRIMARY	PRIMARY	4	test.x1.c	1	Child of 'x1' in pushed join@1
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	4	test.x1.d	1	Child of 'x1' in pushed join@1
 select count(*) from t1 as x1
 join t1 as x2 on x2.a = x1.c and x1.b < 2 
-join t1 as x3 on x3.a = x1.c;
+join t1 as x3 on x3.a = x1.d;
 count(*)
 20000
 drop table t1;
@@ -2925,20 +2925,22 @@ a3	b3	c3	d3	a3	b3	c3	d3
 explain extended
 select straight_join * from
 t3 as x1
-join t3 as y1 on x1.b3 = y1.b3 and x1.d3 = y1.d3
-join t3 as x2 on x2.b3 = y1.b3
+join t3 as y1 on y1.b3 = x1.b3 and y1.d3 = x1.d3
+join t3 as x2 on x2.b3 = y1.b3+0
 join t3 as y2 on y2.b3 = x2.c3 and y2.d3 = x1.c3;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
-1	SIMPLE	x1	ALL	b3,c3,c3_2	NULL	NULL	NULL	7	100.00	Parent of 4 pushed join@1; Using where with pushed condition: ((`test`.`x1`.`d3` is not null) and (`test`.`x1`.`c3` is not null))
+1	SIMPLE	x1	ALL	b3,c3,c3_2	NULL	NULL	NULL	7	100.00	Parent of 2 pushed join@1; Using where with pushed condition: ((`test`.`x1`.`d3` is not null) and (`test`.`x1`.`c3` is not null))
 1	SIMPLE	y1	ref	b3	b3	9	test.x1.b3,test.x1.d3	1	100.00	Child of 'x1' in pushed join@1
-1	SIMPLE	x2	ref	b3,c3,c3_2	b3	4	test.x1.b3	1	100.00	Child of 'x1' in pushed join@1; Using where with pushed condition: (`test`.`x2`.`c3` is not null)
-1	SIMPLE	y2	ref	b3	b3	9	test.x2.c3,test.x1.c3	1	100.00	Child of 'x2' in pushed join@1
+1	SIMPLE	x2	ref	b3,c3,c3_2	b3	4	func	1	100.00	Parent of 2 pushed join@2; Using where with pushed condition: (`test`.`x2`.`c3` is not null)
+1	SIMPLE	y2	ref	b3	b3	9	test.x2.c3,test.x1.c3	1	100.00	Child of 'x2' in pushed join@2
 Warnings:
-Note	1003	/* select#1 */ select straight_join `test`.`x1`.`a3` AS `a3`,`test`.`x1`.`b3` AS `b3`,`test`.`x1`.`c3` AS `c3`,`test`.`x1`.`d3` AS `d3`,`test`.`y1`.`a3` AS `a3`,`test`.`y1`.`b3` AS `b3`,`test`.`y1`.`c3` AS `c3`,`test`.`y1`.`d3` AS `d3`,`test`.`x2`.`a3` AS `a3`,`test`.`x2`.`b3` AS `b3`,`test`.`x2`.`c3` AS `c3`,`test`.`x2`.`d3` AS `d3`,`test`.`y2`.`a3` AS `a3`,`test`.`y2`.`b3` AS `b3`,`test`.`y2`.`c3` AS `c3`,`test`.`y2`.`d3` AS `d3` from `test`.`t3` `x1` join `test`.`t3` `y1` join `test`.`t3` `x2` join `test`.`t3` `y2` where ((`test`.`y1`.`d3` = `test`.`x1`.`d3`) and (`test`.`y1`.`b3` = `test`.`x1`.`b3`) and (`test`.`x2`.`b3` = `test`.`x1`.`b3`) and (`test`.`y2`.`d3` = `test`.`x1`.`c3`) and (`test`.`y2`.`b3` = `test`.`x2`.`c3`))
+Note	9999	Can't push table 'x2' as child, column 'b3' does neither 'ref' a column nor a constant
+Note	9999	Can't push table 'y2' as child of 'x1', column 'x2.c3' is outside scope of pushable join
+Note	1003	/* select#1 */ select straight_join `test`.`x1`.`a3` AS `a3`,`test`.`x1`.`b3` AS `b3`,`test`.`x1`.`c3` AS `c3`,`test`.`x1`.`d3` AS `d3`,`test`.`y1`.`a3` AS `a3`,`test`.`y1`.`b3` AS `b3`,`test`.`y1`.`c3` AS `c3`,`test`.`y1`.`d3` AS `d3`,`test`.`x2`.`a3` AS `a3`,`test`.`x2`.`b3` AS `b3`,`test`.`x2`.`c3` AS `c3`,`test`.`x2`.`d3` AS `d3`,`test`.`y2`.`a3` AS `a3`,`test`.`y2`.`b3` AS `b3`,`test`.`y2`.`c3` AS `c3`,`test`.`y2`.`d3` AS `d3` from `test`.`t3` `x1` join `test`.`t3` `y1` join `test`.`t3` `x2` join `test`.`t3` `y2` where ((`test`.`y1`.`d3` = `test`.`x1`.`d3`) and (`test`.`y1`.`b3` = `test`.`x1`.`b3`) and (`test`.`y2`.`d3` = `test`.`x1`.`c3`) and (`test`.`y2`.`b3` = `test`.`x2`.`c3`) and (`test`.`x2`.`b3` = (`test`.`x1`.`b3` + 0)))
 select straight_join * from
 t3 as x1
-join t3 as y1 on x1.b3 = y1.b3 and x1.d3 = y1.d3
-join t3 as x2 on x2.b3 = y1.b3
+join t3 as y1 on y1.b3 = x1.b3 and y1.d3 = x1.d3
+join t3 as x2 on x2.b3 = y1.b3+0
 join t3 as y2 on y2.b3 = x2.c3 and y2.d3 = x1.c3;
 a3	b3	c3	d3	a3	b3	c3	d3	a3	b3	c3	d3	a3	b3	c3	d3
 prepare stmt1 from
@@ -4773,23 +4775,37 @@ PRIMARY KEY (`a`,`b`)
 insert into t2 values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
 insert into t3 select 1, x1.a * 10+x2.a from t2 as x1 cross join t2 as x2;
 explain select straight_join count(*) from t1 as x0  
-join t3 as x1 on x0.c=x1.a  
-join t1 as x2 on x0.c=x2.a 
-join t3 as x3 on x2.c=x3.a  
-join t1 as x4 on x0.d=x4.a and x3.b=x4.b;
+join t3 as x1 on x1.a=x0.c
+join t1 as x2 on x2.a=x0.d
+join t3 as x3 on x3.a=x2.c
+join t1 as x4 on x4.a=x0.d and x4.b=x3.b;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	x0	ALL	NULL	NULL	NULL	NULL	4	Parent of 5 pushed join@1
 1	SIMPLE	x1	ref	PRIMARY	PRIMARY	4	test.x0.c	1	Child of 'x0' in pushed join@1
-1	SIMPLE	x2	ref	PRIMARY	PRIMARY	4	test.x0.c	1	Child of 'x0' in pushed join@1
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	4	test.x0.d	1	Child of 'x0' in pushed join@1
 1	SIMPLE	x3	ref	PRIMARY	PRIMARY	4	test.x2.c	1	Child of 'x2' in pushed join@1
 1	SIMPLE	x4	eq_ref	PRIMARY	PRIMARY	8	test.x0.d,test.x3.b	1	Child of 'x3' in pushed join@1
 select straight_join count(*) from t1 as x0  
-join t3 as x1 on x0.c=x1.a  
-join t1 as x2 on x0.c=x2.a 
-join t3 as x3 on x2.c=x3.a  
-join t1 as x4 on x0.d=x4.a and x3.b=x4.b;
+join t3 as x1 on x1.a=x0.c
+join t1 as x2 on x2.a=x0.d
+join t3 as x3 on x3.a=x2.c
+join t1 as x4 on x4.a=x0.d and x4.b=x3.b;
 count(*)
 4800
+explain select straight_join count(*) from t1 as x1  
+join t1 as x2 on x1.c=x2.a and x2.d=2
+join t3 as x3 on x1.d=x3.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	4	Parent of 3 pushed join@1
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	4	test.x1.c	1	Child of 'x1' in pushed join@1; Using where with pushed condition
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	4	test.x1.d	1	Child of 'x1' in pushed join@1
+select straight_join count(*) from t1 as x1  
+join t1 as x2 on x1.c=x2.a and x2.d=2
+join t3 as x3 on x1.d=x3.a;
+count(*)
+300
+Local_range_scans
+4
 drop table t1;
 drop table t2;
 drop table t3;
@@ -5469,11 +5485,11 @@ and spj_counts_at_end.counter_name <> 'R
        and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
 CONST_PRUNED_RANGE_SCANS_RECEIVED	6
-LOCAL_TABLE_SCANS_SENT	248
+LOCAL_TABLE_SCANS_SENT	250
 PRUNED_RANGE_SCANS_RECEIVED	25
-RANGE_SCANS_RECEIVED	727
+RANGE_SCANS_RECEIVED	733
 READS_RECEIVED	58
-TABLE_SCANS_RECEIVED	248
+TABLE_SCANS_RECEIVED	250
 drop table spj_counts_at_startup;
 drop table spj_counts_at_end;
 scan_count_derived
@@ -5483,9 +5499,9 @@ pruned_scan_count
 sorted_scan_count
 40
 pushed_queries_defined
-397
+401
 pushed_queries_dropped
 12
 pushed_queries_executed
-548
+552
 set ndb_join_pushdown = @save_ndb_join_pushdown;

=== modified file 'mysql-test/suite/ndb/r/ndb_temporary.result'
--- a/mysql-test/suite/ndb/r/ndb_temporary.result	2011-01-25 09:45:42 +0000
+++ b/mysql-test/suite/ndb/r/ndb_temporary.result	2011-09-30 08:27:11 +0000
@@ -7,7 +7,7 @@ ERROR HY000: Table storage engine 'ndbcl
 drop table t1;
 CREATE TABLE bar ( id TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY ) ENGINE=NDBCluster ;
 CREATE TEMPORARY TABLE foo LIKE bar ;
-ERROR HY000: Can't create table 'test.foo' (errno: 1478)
+ERROR HY000: Cannot create temporary table with partitions
 DROP TABLE bar;
 SET SESSION storage_engine=NDBCLUSTER;
 create table t1 (a int key);

=== modified file 'mysql-test/suite/ndb/t/disabled.def'
--- a/mysql-test/suite/ndb/t/disabled.def	2011-07-08 09:34:07 +0000
+++ b/mysql-test/suite/ndb/t/disabled.def	2011-10-05 07:24:39 +0000
@@ -18,8 +18,6 @@ ndb_disconnect_ddl        : Bug#31853 fl
 
 ndb_sql_allow_batching   : SEAGULL WL3733 xxx_allow_batching
 
-ndb_temporary : SEAGULL patch to disallow temporary tables does not work
-
 ndb_dbug_lock : SEAGULL gsl locking order
 
 ndb_ddl_open_trans : SEAGULL mdl locking...

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-09-09 13:21:05 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-09-29 13:11:52 +0000
@@ -1025,10 +1025,10 @@ connection spj;
 
 explain select count(*) from t1 
   join t1 as t2 on t2.a = t1.c 
-  join t1 as t3 on t3.a = t1.c;
+  join t1 as t3 on t3.a = t1.d;
 select count(*) from t1 
   join t1 as t2 on t2.a = t1.c 
-  join t1 as t3 on t3.a = t1.c;
+  join t1 as t3 on t3.a = t1.d;
 
 # Test bushy join with pruned scan and larger result set.
 
@@ -1043,14 +1043,14 @@ delete from t1;
 
 insert into tx values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
 
-insert into t1 select 1, x1.a * 10+x2.a, 1, 0 from tx as x1 cross join tx as x2;
+insert into t1 select 1, x1.a * 10+x2.a, 1, 1 from tx as x1 cross join tx as x2;
 
 explain select count(*) from t1 as x1
   join t1 as x2 on x2.a = x1.c and x1.b < 2 
-  join t1 as x3 on x3.a = x1.c;
+  join t1 as x3 on x3.a = x1.d;
 select count(*) from t1 as x1
   join t1 as x2 on x2.a = x1.c and x1.b < 2 
-  join t1 as x3 on x3.a = x1.c;
+  join t1 as x3 on x3.a = x1.d;
 
 connection ddl;
 drop table t1;
@@ -1656,14 +1656,14 @@ select straight_join * 
 explain extended
 select straight_join * from
   t3 as x1
-  join t3 as y1 on x1.b3 = y1.b3 and x1.d3 = y1.d3
-  join t3 as x2 on x2.b3 = y1.b3
+  join t3 as y1 on y1.b3 = x1.b3 and y1.d3 = x1.d3
+  join t3 as x2 on x2.b3 = y1.b3+0
   join t3 as y2 on y2.b3 = x2.c3 and y2.d3 = x1.c3;
 --sorted_result
 select straight_join * from
   t3 as x1
-  join t3 as y1 on x1.b3 = y1.b3 and x1.d3 = y1.d3
-  join t3 as x2 on x2.b3 = y1.b3
+  join t3 as y1 on y1.b3 = x1.b3 and y1.d3 = x1.d3
+  join t3 as x2 on x2.b3 = y1.b3+0
   join t3 as y2 on y2.b3 = x2.c3 and y2.d3 = x1.c3;
 
 
@@ -3312,16 +3312,38 @@ connection spj;
 insert into t3 select 1, x1.a * 10+x2.a from t2 as x1 cross join t2 as x2;
 
 explain select straight_join count(*) from t1 as x0  
-   join t3 as x1 on x0.c=x1.a  
-   join t1 as x2 on x0.c=x2.a 
-   join t3 as x3 on x2.c=x3.a  
-   join t1 as x4 on x0.d=x4.a and x3.b=x4.b;
+   join t3 as x1 on x1.a=x0.c
+   join t1 as x2 on x2.a=x0.d
+   join t3 as x3 on x3.a=x2.c
+   join t1 as x4 on x4.a=x0.d and x4.b=x3.b;
 
 select straight_join count(*) from t1 as x0  
-   join t3 as x1 on x0.c=x1.a  
-   join t1 as x2 on x0.c=x2.a 
-   join t3 as x3 on x2.c=x3.a  
-   join t1 as x4 on x0.d=x4.a and x3.b=x4.b;
+   join t3 as x1 on x1.a=x0.c
+   join t1 as x2 on x2.a=x0.d
+   join t3 as x3 on x3.a=x2.c
+   join t1 as x4 on x4.a=x0.d and x4.b=x3.b;
+
+# If the first batch of an index scan has low parallelism and returns few rows,
+# there is a mechanism that will try to query the remaining fragments within
+# the same batch. This is done in order to avoid repeating other branches of 
+# a bushy scan whenever possible. This is a test of that mechanism. Scan
+# of x2 should return only one row. Therefore we should be able to fetch
+# x2 in one batch and scan x3 only once.
+
+let $scan_rows = query_get_value(select sum(val) as Value from ndbinfo.counters where block_name='DBSPJ' and counter_name='LOCAL_RANGE_SCANS_SENT', Value, 1);
+
+explain select straight_join count(*) from t1 as x1  
+   join t1 as x2 on x1.c=x2.a and x2.d=2
+   join t3 as x3 on x1.d=x3.a;  
+
+select straight_join count(*) from t1 as x1  
+   join t1 as x2 on x1.c=x2.a and x2.d=2
+   join t3 as x3 on x1.d=x3.a;
+
+--disable_query_log
+--eval select sum(val) - $scan_rows as Local_range_scans from ndbinfo.counters where block_name='DBSPJ' and counter_name='LOCAL_RANGE_SCANS_SENT';
+--enable_query_log
+
 
 connection ddl;
 drop table t1;

=== modified file 'mysql-test/suite/ndb/t/ndb_temporary.test'
--- a/mysql-test/suite/ndb/t/ndb_temporary.test	2011-01-25 09:50:48 +0000
+++ b/mysql-test/suite/ndb/t/ndb_temporary.test	2011-09-30 08:27:11 +0000
@@ -24,7 +24,7 @@ drop table t1;
 # create temporary like on an ndb table should give an error (bug#57437)
 #
 CREATE TABLE bar ( id TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY ) ENGINE=NDBCluster ;
---error ER_CANT_CREATE_TABLE
+--error ER_PARTITION_NO_TEMPORARY
 CREATE TEMPORARY TABLE foo LIKE bar ;
 DROP TABLE bar;
 

=== modified file 'mysql-test/suite/ndb_binlog/my.cnf'
--- a/mysql-test/suite/ndb_binlog/my.cnf	2011-09-06 10:10:36 +0000
+++ b/mysql-test/suite/ndb_binlog/my.cnf	2011-09-22 17:03:53 +0000
@@ -11,7 +11,7 @@ ndbapi=,,,,,,,,,,,
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
-ndb-wait-connected=20
+ndb-wait-connected=30
 ndb-wait-setup=120
 ndb-cluster-connection-pool=3
 ndb-extra-logging=99

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_log_transaction_id-master.opt'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_log_transaction_id-master.opt	2011-09-07 22:50:01 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_log_transaction_id-master.opt	2011-09-28 09:40:14 +0000
@@ -1 +1 @@
---ndb-log-transaction-id
+--ndb-log-transaction-id --log-bin-use-v1-row-events=false

=== modified file 'mysql-test/suite/ndb_rpl/my.cnf'
--- a/mysql-test/suite/ndb_rpl/my.cnf	2011-09-06 10:10:36 +0000
+++ b/mysql-test/suite/ndb_rpl/my.cnf	2011-09-22 17:03:53 +0000
@@ -21,7 +21,7 @@ ndbapi=,,,,
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
-ndb-wait-connected=20
+ndb-wait-connected=30
 ndb-wait-setup=120
 ndb-cluster-connection-pool=3
 slave-allow-batching

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_trans.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_trans.cnf	2011-09-09 09:30:43 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_epoch_trans.cnf	2011-09-28 10:46:30 +0000
@@ -11,10 +11,14 @@
 # Potential infinite loops are broken by both servers
 # on each cluster having the same server-id
 
+[cluster_config.slave]
+mysqld=,
+
 [mysqld]
 log-slave-updates
 ndb-log-apply-status
 ndb-log-transaction-id
+log-bin-use-v1-row-events=false
 
 [mysqld.1.1]
 server-id= 1

=== modified file 'mysql-test/suite/ndb_team/my.cnf'
--- a/mysql-test/suite/ndb_team/my.cnf	2011-09-06 10:10:36 +0000
+++ b/mysql-test/suite/ndb_team/my.cnf	2011-10-01 15:05:42 +0000
@@ -11,21 +11,12 @@ ndbapi=,,,,,,,,,
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
-ndb-wait-connected=20
+ndb-wait-connected=30
 ndb-wait-setup=120
 ndb-cluster-connection-pool=3
 ndb-extra-logging=99
 ndb-index-stat-enable=0
 
-[mysqld.1.1]
-
-[mysqld.1.1]
-
-# Directory where slaves find the dumps generated by "load data"
-# on the server. The path need to have constant length otherwise
-# test results will vary, thus a relative path is used.
-slave-load-tmpdir=            ../../../tmp
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/ndb_team/t/ndb_autodiscover3.test'
--- a/mysql-test/suite/ndb_team/t/ndb_autodiscover3.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb_team/t/ndb_autodiscover3.test	2011-09-30 06:29:58 +0000
@@ -79,6 +79,14 @@ select * from t2 order by a limit 3;
 select * from t2;
 show tables like 't2';
 reset master;
+#
+# Add this in 5.5...I don't want to disable all test
+#   btw, we should add a new testcase for this
+--disable_query_log
+--disable_result_log
+drop table if exists t2;
+--enable_result_log
+--enable_query_log
 create table t2 (a int key) engine=ndbcluster;
 insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
 select * from t2 order by a limit 3;
@@ -109,6 +117,15 @@ reset master;
 select * from t2;
 show tables like 't2';
 reset master;
+#
+# TODO 5.5 failure...
+# TODO we should write more comprehensive testcase for this
+#
+--disable_query_log
+--disable_result_log
+drop table if exists t2;
+--enable_result_log
+--enable_query_log
 create table t2 (a int key) engine=ndbcluster;
 insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
 select * from t2 order by a limit 3;

=== modified file 'mysql-test/suite/rpl_ndb/my.cnf'
--- a/mysql-test/suite/rpl_ndb/my.cnf	2011-09-06 10:10:36 +0000
+++ b/mysql-test/suite/rpl_ndb/my.cnf	2011-09-22 17:03:53 +0000
@@ -21,7 +21,7 @@ ndbapi=,,,,
 [mysqld]
 # Make all mysqlds use cluster
 ndbcluster
-ndb-wait-connected=20
+ndb-wait-connected=30
 ndb-wait-setup=120
 ndb-cluster-connection-pool=3
 slave-allow-batching

=== modified file 'mysql-test/suite/rpl_ndb/t/disabled.def'
--- a/mysql-test/suite/rpl_ndb/t/disabled.def	2011-05-25 06:47:03 +0000
+++ b/mysql-test/suite/rpl_ndb/t/disabled.def	2011-09-30 08:49:22 +0000
@@ -12,5 +12,4 @@
 
 rpl_ndb_set_null       : bug #51100
 rpl_ndb_row_implicit_commit_binlog : BUG#12586131 - fails on SET PASSWORD
-rpl_ndb_mixed_implicit_commit_binlog : BUG#12586131 - fails on SET PASSWORD
 

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_implicit_commit_binlog.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_implicit_commit_binlog.test	2011-04-19 12:19:23 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_implicit_commit_binlog.test	2011-09-30 08:49:22 +0000
@@ -3,7 +3,7 @@
 ################################################################################
 --source include/have_binlog_format_mixed.inc
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --let $engine=NDB
 --source extra/rpl_tests/rpl_implicit_commit_binlog.test

=== modified file 'sql/abstract_query_plan.cc'
--- a/sql/abstract_query_plan.cc	2011-09-30 10:24:10 +0000
+++ b/sql/abstract_query_plan.cc	2011-10-05 07:24:39 +0000
@@ -257,6 +257,33 @@ namespace AQP
     return get_join_tab()->table;
   }
 
+  double Table_access::get_fanout() const
+  {
+    switch (get_access_type())
+    {
+      case AT_PRIMARY_KEY:
+      case AT_UNIQUE_KEY:
+        return 1.0;
+
+      case AT_ORDERED_INDEX_SCAN:
+        DBUG_ASSERT(get_join_tab()->join->best_positions[m_tab_no].records_read>0.0);
+        return get_join_tab()->join->best_positions[m_tab_no].records_read;
+
+      case AT_MULTI_PRIMARY_KEY:
+      case AT_MULTI_UNIQUE_KEY:
+      case AT_MULTI_MIXED:
+        DBUG_ASSERT(get_join_tab()->join->best_positions[m_tab_no].records_read>0.0);
+        return get_join_tab()->join->best_positions[m_tab_no].records_read;
+
+      case AT_TABLE_SCAN:
+        DBUG_ASSERT(get_join_tab()->table->file->stats.records>0.0);
+        return static_cast<double>(get_join_tab()->table->file->stats.records);
+
+      default:
+        return 99999999.0;
+    }
+  }
+
   /** Get the JOIN_TAB object that corresponds to this operation.*/
   const JOIN_TAB* Table_access::get_join_tab() const
   {

=== modified file 'sql/abstract_query_plan.h'
--- a/sql/abstract_query_plan.h	2011-09-14 13:56:17 +0000
+++ b/sql/abstract_query_plan.h	2011-09-28 10:55:58 +0000
@@ -205,6 +205,8 @@ namespace AQP
 
     TABLE* get_table() const;
 
+    double get_fanout() const;
+
     Item_equal* get_item_equal(const Item_field* field_item) const;
 
     void dbug_print() const;

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-10-03 12:34:35 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-05 07:24:39 +0000
@@ -12882,6 +12882,36 @@ static int ndbcluster_end(handlerton *ht
   }
   my_hash_free(&ndbcluster_open_tables);
 
+  {
+    pthread_mutex_lock(&ndbcluster_mutex);
+    uint save = ndbcluster_dropped_tables.records; (void)save;
+    while (ndbcluster_dropped_tables.records)
+    {
+      NDB_SHARE *share=
+        (NDB_SHARE*) my_hash_element(&ndbcluster_dropped_tables, 0);
+#ifndef DBUG_OFF
+      fprintf(stderr,
+              "NDB: table share %s with use_count %d state: %s(%u) not freed\n",
+              share->key, share->use_count,
+              get_share_state_string(share->state),
+              (uint)share->state);
+      /**
+       * For unknown reasons...the dist-priv tables linger here
+       * TODO investigate why
+       */
+      if (Ndb_dist_priv_util::is_distributed_priv_table(share->db,
+                                                        share->table_name))
+      {
+        save--;
+      }
+#endif
+      ndbcluster_real_free_share(&share);
+    }
+    pthread_mutex_unlock(&ndbcluster_mutex);
+    DBUG_ASSERT(save == 0);
+  }
+  my_hash_free(&ndbcluster_dropped_tables);
+
   ndb_index_stat_end();
   ndbcluster_disconnect();
 
@@ -17828,7 +17858,7 @@ static MYSQL_SYSVAR_ULONG(
   "to cluster management and data nodes.",
   NULL,                              /* check func. */
   NULL,                              /* update func. */
-  0,                                 /* default */
+  30,                                /* default */
   0,                                 /* min */
   ONE_YEAR_IN_SECONDS,               /* max */
   0                                  /* block */
@@ -17843,7 +17873,7 @@ static MYSQL_SYSVAR_ULONG(
   "complete (0 = no wait)",
   NULL,                              /* check func. */
   NULL,                              /* update func. */
-  15,                                /* default */
+  30,                                /* default */
   0,                                 /* min */
   ONE_YEAR_IN_SECONDS,               /* max */
   0                                  /* block */

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-10-04 12:53:30 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-05 07:24:39 +0000
@@ -3234,7 +3234,7 @@ ndb_binlog_index_table__open(THD *thd,
   if (open_and_lock_tables(thd, &tables, derived, flags))
   {
     if (thd->killed)
-      sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
+      DBUG_PRINT("error", ("NDB Binlog: Opening ndb_binlog_index: killed"));
     else
       sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
                       thd->get_stmt_da()->sql_errno(),
@@ -3274,7 +3274,10 @@ ndb_binlog_index_table__write_rows(THD *
 
   if (ndb_binlog_index_table__open(thd, &ndb_binlog_index))
   {
-    sql_print_error("NDB Binlog: Unable to open ndb_binlog_index table");
+    if (thd->killed)
+      DBUG_PRINT("error", ("NDB Binlog: Unable to lock table ndb_binlog_index, killed"));
+    else
+      sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
     error= -1;
     goto add_ndb_binlog_index_err;
   }
@@ -7220,6 +7223,7 @@ restart_cluster_failure:
               */
               if (thd->killed)
               {
+                DBUG_PRINT("error", ("Failed to write to ndb_binlog_index at shutdown, retrying"));
                 (void) mysql_mutex_lock(&LOCK_thread_count);
                 volatile THD::killed_state killed= thd->killed;
                 /* We are cleaning up, allow for flushing last epoch */

=== modified file 'sql/ha_ndbcluster_glue.h'
--- a/sql/ha_ndbcluster_glue.h	2011-09-30 15:38:44 +0000
+++ b/sql/ha_ndbcluster_glue.h	2011-10-05 07:32:06 +0000
@@ -77,6 +77,7 @@ bool close_cached_tables(THD *thd, TABLE
 
 #endif
 
+
 extern ulong opt_server_id_mask;
 
 static inline

=== modified file 'sql/ha_ndbcluster_push.cc'
--- a/sql/ha_ndbcluster_push.cc	2011-10-03 08:05:59 +0000
+++ b/sql/ha_ndbcluster_push.cc	2011-10-05 07:24:39 +0000
@@ -1052,12 +1052,20 @@ ndb_pushed_builder_ctx::optimize_query_p
   DBUG_ENTER("optimize_query_plan");
   const uint root_no= m_join_root->get_access_no();
 
+  for (uint tab_no= root_no; tab_no<m_plan.get_access_count(); tab_no++)
+  {
+    if (m_join_scope.contain(tab_no))
+    {
+      m_tables[tab_no].m_fanout = m_plan.get_table_access(tab_no)->get_fanout();
+      m_tables[tab_no].m_child_fanout = 1.0;
+    }
+  }
+
   // Find an optimal order for joining the tables
   for (uint tab_no= m_plan.get_access_count()-1;
        tab_no > root_no;
        tab_no--)
   {
-    struct pushed_tables &table= m_tables[tab_no];
     if (!m_join_scope.contain(tab_no))
       continue;
 
@@ -1067,6 +1075,7 @@ ndb_pushed_builder_ctx::optimize_query_p
      * don't skip any dependent parents from our ancestors
      * when selecting the actuall 'm_parent' to be used.
      */
+    pushed_tables &table= m_tables[tab_no];
     if (!table.m_depend_parents.is_clear_all())
     {
       ndb_table_access_map const &dependency= table.m_depend_parents;
@@ -1104,12 +1113,40 @@ ndb_pushed_builder_ctx::optimize_query_p
 
     /**
      * In order to take advantage of the parallelism in the SPJ block; 
-     * Choose the first possible parent candidate. Will result in the
-     * most 'bushy' query plan (aka: star-join)
+     * Initial parent candidate is the first possible among 'parents'.
+     * Will result in the most 'bushy' query plan (aka: star-join)
      */
     parent_no= parents.first_table(root_no);
+
+    if (table.m_fanout*table.m_child_fanout > 1.0 ||
+        !ndbcluster_is_lookup_operation(m_plan.get_table_access(tab_no)->get_access_type()))
+    {
+      /**
+       * This is a index-scan or lookup with scan childs.
+       * Push optimization for index-scan execute:
+       *
+       * These are relative expensive operation which we try to avoid to 
+       * execute whenever possible. By making them depending on parent 
+       * operations with high selectivity, they will be eliminated when
+       * the parent returns no matching rows.
+       *
+       * -> Execute index-scan after any such parents
+       */
+      for (uint candidate= parent_no+1; candidate<parents.length(); candidate++)
+      {
+        if (parents.contain(candidate))
+        {
+          if (m_tables[candidate].m_fanout > 1.0)
+            break;
+
+          parent_no= candidate;     // Parent candidate is selective, eval after
+        }
+      }
+    }
+
     DBUG_ASSERT(parent_no < tab_no);
     table.m_parent= parent_no;
+    m_tables[parent_no].m_child_fanout*= table.m_fanout*table.m_child_fanout;
 
     ndb_table_access_map dependency(table.m_depend_parents);
     dependency.clear_bit(parent_no);
@@ -1123,7 +1160,7 @@ ndb_pushed_builder_ctx::optimize_query_p
   {
     if (m_join_scope.contain(tab_no))
     {
-      struct pushed_tables &table= m_tables[tab_no];
+      pushed_tables &table= m_tables[tab_no];
       const uint parent_no= table.m_parent;
       table.m_ancestors= m_tables[parent_no].m_ancestors;
       table.m_ancestors.add(parent_no);

=== modified file 'sql/ha_ndbcluster_push.h'
--- a/sql/ha_ndbcluster_push.h	2011-10-03 08:05:59 +0000
+++ b/sql/ha_ndbcluster_push.h	2011-10-05 07:24:39 +0000
@@ -279,6 +279,8 @@ private:
       m_depend_parents(), 
       m_parent(MAX_TABLES), 
       m_ancestors(), 
+      m_fanout(1.0),
+      m_child_fanout(1.0),
       m_op(NULL) 
     {}
 
@@ -313,6 +315,16 @@ private:
      */
     ndb_table_access_map m_ancestors;
 
+    /**
+     * The fanout of this table.
+     */
+    double m_fanout;
+
+    /**
+     * The (cross) product of all child fanouts.
+     */
+    double m_child_fanout;
+
     const NdbQueryOperationDef* m_op;
   } m_tables[MAX_TABLES];
 

=== modified file 'sql/sql_partition.cc'
--- a/sql/sql_partition.cc	2011-09-23 10:44:30 +0000
+++ b/sql/sql_partition.cc	2011-10-05 07:24:39 +0000
@@ -4728,9 +4728,9 @@ uint prep_alter_part_table(THD *thd, TAB
       /* 'ALTER TABLE t REORG PARTITION' only allowed with auto partition 
           if default partitioning is used */
       if (tab_part_info->part_type != HASH_PARTITION ||
-          (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION &&
+          ((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
            !tab_part_info->use_default_num_partitions) ||
-          ((!table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
+          ((!(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION))&&
            tab_part_info->use_default_num_partitions))
 #else
       if (tab_part_info->part_type != HASH_PARTITION ||

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-10-03 10:43:39 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-10-05 07:24:39 +0000
@@ -59,6 +59,7 @@ IF(CMAKE_CXX_FLAGS)
   STRING(REPLACE "-Werror" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
 ENDIF()
 
+
 NDB_CHECK_MYSQL_CLUSTER(${VERSION})
 
 #
@@ -114,7 +115,11 @@ IF (NOT DEFINED WITH_NDBCLUSTER)
   RETURN()
 ENDIF()
 
-MESSAGE(STATUS "Building NDB")
+IF(CMAKE_SIZEOF_VOID_P EQUAL 4)
+  MESSAGE(STATUS "Building NDB 32-bit")
+ELSE()
+  MESSAGE(STATUS "Building NDB 64-bit")
+ENDIF()
 
 INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/ndb_configure.cmake)
 

=== modified file 'storage/ndb/VERSION'
--- a/storage/ndb/VERSION	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/VERSION	2011-10-05 07:24:39 +0000
@@ -2,5 +2,5 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=2
-NDB_VERSION_BUILD=1
+NDB_VERSION_BUILD=2
 NDB_VERSION_STATUS=""

=== modified file 'storage/ndb/clusterj/clusterj-api/pom.xml'
--- a/storage/ndb/clusterj/clusterj-api/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-api/pom.xml	2011-09-26 13:51:16 +0000
@@ -20,13 +20,13 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-api</artifactId>
   <packaging>bundle</packaging>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <name>ClusterJ API</name>
   <description>The API for ClusterJ</description>
   <build>

=== modified file 'storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/query/PredicateOperand.java'
--- a/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/query/PredicateOperand.java	2011-05-26 21:04:45 +0000
+++ b/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/query/PredicateOperand.java	2011-10-02 21:20:50 +0000
@@ -88,4 +88,16 @@ public interface PredicateOperand {
      */
     Predicate like(PredicateOperand other);
 
+    /** Return a Predicate representing comparing this to null.
+     *
+     * @return a new Predicate
+     */
+    Predicate isNull();
+
+    /** Return a Predicate representing comparing this to not null.
+     *
+     * @return a new Predicate
+     */
+    Predicate isNotNull();
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-bindings/pom.xml'
--- a/storage/ndb/clusterj/clusterj-bindings/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-bindings/pom.xml	2011-09-26 13:51:16 +0000
@@ -20,13 +20,13 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-bindings</artifactId>
   <packaging>bundle</packaging>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <name>ClusterJ Bindings</name>
   <description>The ndb-bindings implementation of ClusterJ storage spi</description>
   <build>

=== modified file 'storage/ndb/clusterj/clusterj-core/pom.xml'
--- a/storage/ndb/clusterj/clusterj-core/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-core/pom.xml	2011-09-26 13:51:16 +0000
@@ -20,13 +20,13 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-core</artifactId>
   <packaging>bundle</packaging>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <name>ClusterJ Core</name>
   <description>The core implementation of ClusterJ</description>
   <build>

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java	2011-06-24 20:58:44 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java	2011-10-02 21:20:50 +0000
@@ -174,6 +174,14 @@ public abstract class AbstractDomainFiel
         }
     }
 
+    public void filterIsNull(ScanFilter filter) {
+        filter.isNull(storeColumn);
+    }
+
+    public void filterIsNotNull(ScanFilter filter) {
+        filter.isNotNull(storeColumn);
+    }
+
     public String getColumnName() {
         return columnName;
     }

=== added file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNotNullPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNotNullPredicateImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNotNullPredicateImpl.java	2011-10-02 21:20:50 +0000
@@ -0,0 +1,49 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package com.mysql.clusterj.core.query;
+
+import com.mysql.clusterj.core.spi.QueryExecutionContext;
+import com.mysql.clusterj.core.store.ScanFilter;
+import com.mysql.clusterj.core.store.ScanOperation;
+
+public class IsNotNullPredicateImpl extends PredicateImpl {
+
+    /** My property */
+    protected PropertyImpl property;
+
+    /** Construct a new IsNotNull predicate
+     * 
+     * @param dobj the query domain object that owns this predicate
+     * @param property the property
+     */
+    public IsNotNullPredicateImpl(QueryDomainTypeImpl<?> dobj, PropertyImpl property) {
+        super(dobj);
+        this.property = property;
+    }
+
+    /** Set the condition into the filter.
+     * @param context the query execution context with the parameter values (ignored for isNotNull)
+     * @param op the operation
+     * @param filter the filter
+     */
+    @Override
+    public void filterCmpValue(QueryExecutionContext context, ScanOperation op, ScanFilter filter) {
+        property.filterIsNotNull(filter);
+    }
+
+}

=== added file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNullPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNullPredicateImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNullPredicateImpl.java	2011-10-02 21:20:50 +0000
@@ -0,0 +1,49 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package com.mysql.clusterj.core.query;
+
+import com.mysql.clusterj.core.spi.QueryExecutionContext;
+import com.mysql.clusterj.core.store.ScanFilter;
+import com.mysql.clusterj.core.store.ScanOperation;
+
+public class IsNullPredicateImpl extends PredicateImpl {
+
+    /** My property */
+    protected PropertyImpl property;
+
+    /** Construct a new IsNull predicate
+     * 
+     * @param dobj the query domain object that owns this predicate
+     * @param property the property
+     */
+    public IsNullPredicateImpl(QueryDomainTypeImpl<?> dobj, PropertyImpl property) {
+        super(dobj);
+        this.property = property;
+    }
+
+    /** Set the condition into the filter.
+     * @param context the query execution context with the parameter values (ignored for isNull)
+     * @param op the operation
+     * @param filter the filter
+     */
+    @Override
+    public void filterCmpValue(QueryExecutionContext context, ScanOperation op, ScanFilter filter) {
+        property.filterIsNull(filter);
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ParameterImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ParameterImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ParameterImpl.java	2011-10-02 21:20:50 +0000
@@ -118,6 +118,16 @@ public class ParameterImpl implements Pr
                 local.message("ERR_NotImplemented"));
     }
 
+    public Predicate isNull() {
+        throw new UnsupportedOperationException(
+                local.message("ERR_NotImplemented"));
+    }
+
+    public Predicate isNotNull() {
+        throw new UnsupportedOperationException(
+                local.message("ERR_NotImplemented"));
+    }
+
     public void setProperty(PropertyImpl property) {
         if (this.property != null && this.property.fmd.getType() != property.fmd.getType()) {
             throw new ClusterJUserException(local.message("ERR_Multiple_Parameter_Usage", parameterName,

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-10-02 21:20:50 +0000
@@ -81,8 +81,7 @@ public abstract class PredicateImpl impl
     }
 
     void markBoundsForCandidateIndices(QueryExecutionContext context, CandidateIndexImpl[] candidateIndices) {
-        throw new ClusterJFatalInternalException(
-                local.message("ERR_Implementation_Should_Not_Occur"));
+        // default is nothing to do
     }
 
     public void operationSetBounds(QueryExecutionContext context,
@@ -161,10 +160,14 @@ public abstract class PredicateImpl impl
     }
 
     /** Mark all parameters as being required. */
-    public abstract void markParameters();
+    public void markParameters() {
+        // default is nothing to do
+    }
 
     /** Unmark all parameters as being required. */
-    public abstract void unmarkParameters();
+    public  void unmarkParameters() {
+        // default is nothing to do
+    }
 
     private void assertPredicateImpl(Predicate other) {
         if (!(other instanceof PredicateImpl)) {

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java	2011-10-02 21:20:50 +0000
@@ -83,7 +83,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "equal"));
         }
-        return (Predicate) new EqualPredicateImpl(dobj, this, (ParameterImpl)other);
+        return new EqualPredicateImpl(dobj, this, (ParameterImpl)other);
     }
 
     public Predicate between(PredicateOperand lower, PredicateOperand upper) {
@@ -91,7 +91,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "between"));
         }
-        return (Predicate) new BetweenPredicateImpl(dobj, this, (ParameterImpl)lower, (ParameterImpl)upper);
+        return new BetweenPredicateImpl(dobj, this, (ParameterImpl)lower, (ParameterImpl)upper);
     }
 
     public Predicate greaterThan(PredicateOperand other) {
@@ -99,7 +99,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "greaterThan"));
         }
-        return (Predicate) new GreaterThanPredicateImpl(dobj, this, (ParameterImpl)other);
+        return new GreaterThanPredicateImpl(dobj, this, (ParameterImpl)other);
     }
 
     public Predicate greaterEqual(PredicateOperand other) {
@@ -107,7 +107,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "greaterEqual"));
         }
-        return (Predicate) new GreaterEqualPredicateImpl(dobj, this, (ParameterImpl)other);
+        return new GreaterEqualPredicateImpl(dobj, this, (ParameterImpl)other);
     }
 
     public Predicate lessThan(PredicateOperand other) {
@@ -115,7 +115,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "lessThan"));
         }
-        return (Predicate) new LessThanPredicateImpl(dobj, this, (ParameterImpl)other);
+        return new LessThanPredicateImpl(dobj, this, (ParameterImpl)other);
     }
 
     public Predicate lessEqual(PredicateOperand other) {
@@ -123,7 +123,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "lessEqual"));
         }
-        return (Predicate) new LessEqualPredicateImpl(dobj, this, (ParameterImpl)other);
+        return new LessEqualPredicateImpl(dobj, this, (ParameterImpl)other);
     }
 
     public Predicate in(PredicateOperand other) {
@@ -131,7 +131,7 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "in"));
         }
-        return (Predicate) new InPredicateImpl(dobj, this, (ParameterImpl)other);
+        return new InPredicateImpl(dobj, this, (ParameterImpl)other);
     }
 
     public Predicate like(PredicateOperand other) {
@@ -139,7 +139,15 @@ public class PropertyImpl implements Pre
             throw new ClusterJUserException(
                     local.message("ERR_Only_Parameters", "like"));
         }
-        return (Predicate) new LikePredicateImpl(dobj, this, (ParameterImpl)other);
+        return new LikePredicateImpl(dobj, this, (ParameterImpl)other);
+    }
+
+    public Predicate isNull() {
+        return new IsNullPredicateImpl(dobj, this);
+    }
+
+    public Predicate isNotNull() {
+        return new IsNotNullPredicateImpl(dobj, this);
     }
 
     void markLowerBound(CandidateIndexImpl[] candidateIndices, PredicateImpl predicate, boolean strict) {
@@ -167,4 +175,12 @@ public class PropertyImpl implements Pre
         }
     }
 
+    public void filterIsNull(ScanFilter filter) {
+        fmd.filterIsNull(filter);
+    }
+
+    public void filterIsNotNull(ScanFilter filter) {
+        fmd.filterIsNotNull(filter);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/DomainFieldHandler.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/DomainFieldHandler.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/DomainFieldHandler.java	2011-10-02 21:20:50 +0000
@@ -78,4 +78,8 @@ public interface DomainFieldHandler {
 
     Object getValue(QueryExecutionContext context, String parameterName);
 
+    void filterIsNull(ScanFilter filter);
+
+    void filterIsNotNull(ScanFilter filter);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanFilter.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanFilter.java	2011-05-26 21:04:45 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanFilter.java	2011-10-02 21:20:50 +0000
@@ -59,6 +59,8 @@ public interface ScanFilter {
 
     public void isNull(Column storeColumn);
     
+    public void isNotNull(Column storeColumn);
+    
     public void delete();
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/pom.xml'
--- a/storage/ndb/clusterj/clusterj-jdbc/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/pom.xml	2011-09-26 13:51:16 +0000
@@ -19,13 +19,13 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>clusterj</groupId>
   <artifactId>clusterj-jdbc</artifactId>
   <name>ClusterJ JDBC Plugin</name>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <dependencies>
     <dependency>
       <groupId>junit</groupId>

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/InterceptorImpl.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/InterceptorImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/InterceptorImpl.java	2011-09-26 13:57:18 +0000
@@ -17,8 +17,10 @@
 
 package com.mysql.clusterj.jdbc;
 
+import com.mysql.clusterj.ClusterJFatalInternalException;
 import com.mysql.clusterj.ClusterJHelper;
 import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.LockMode;
 import com.mysql.clusterj.SessionFactory;
 import com.mysql.clusterj.core.query.QueryDomainTypeImpl;
 import com.mysql.clusterj.core.spi.SessionSPI;
@@ -28,7 +30,10 @@ import com.mysql.clusterj.core.util.Logg
 import com.mysql.clusterj.core.util.LoggerFactoryService;
 import com.mysql.jdbc.Connection;
 import com.mysql.jdbc.ResultSetInternalMethods;
+import com.mysql.jdbc.ServerPreparedStatement;
 import com.mysql.jdbc.Statement;
+import com.mysql.jdbc.ServerPreparedStatement.BatchedBindValues;
+import com.mysql.jdbc.ServerPreparedStatement.BindValue;
 import com.mysql.clusterj.jdbc.antlr.ANTLRNoCaseStringStream;
 import com.mysql.clusterj.jdbc.antlr.MySQL51Parser;
 import com.mysql.clusterj.jdbc.antlr.MySQL51Lexer;
@@ -40,6 +45,8 @@ import com.mysql.clusterj.jdbc.antlr.nod
 import com.mysql.clusterj.query.Predicate;
 
 import com.mysql.clusterj.jdbc.SQLExecutor.Executor;
+
+import java.io.InputStream;
 import java.sql.SQLException;
 import java.sql.Savepoint;
 import java.util.ArrayList;
@@ -244,29 +251,45 @@ public class InterceptorImpl {
     public ResultSetInternalMethods preProcess(String sql, Statement statement,
             Connection connection) throws SQLException {
         assertReady();
+        if (logger.isDebugEnabled() && statement != null) 
+            logger.debug(statement.getClass().getName() + ": " + sql);
         if (statement instanceof com.mysql.jdbc.PreparedStatement) {
             com.mysql.jdbc.PreparedStatement preparedStatement =
                 (com.mysql.jdbc.PreparedStatement)statement;
             // key must be interned because we are using IdentityHashMap
-            String preparedSql = preparedStatement.getPreparedSql().intern();
+            // TODO: in case of DELETE, the SQL has already been rewritten at this point, 
+            // and the original SQL is gone
+            // so the key in the table is the rewritten DELETE SQL -- not what we want at all
+            String nonRewrittenSql = preparedStatement.getNonRewrittenSql();
+            String internedSql = nonRewrittenSql.intern();
+            
             // see if we have a parsed version of this query
             Executor sQLExecutor = null;
             synchronized(parsedSqlMap) {
-                sQLExecutor = parsedSqlMap.get(preparedSql);
+                sQLExecutor = parsedSqlMap.get(internedSql);
             }
             // if no cached SQLExecutor, create it, which might take some time
             if (sQLExecutor == null) {
-                sQLExecutor = createSQLExecutor(preparedSql);
+                sQLExecutor = createSQLExecutor(internedSql);
                 if (sQLExecutor != null) {
                     // multiple thread might have created a SQLExecutor but it's ok
                     synchronized(parsedSqlMap) {
-                        parsedSqlMap.put(preparedSql, sQLExecutor);
+                        parsedSqlMap.put(internedSql, sQLExecutor);
                     }
                 }
             }
-            return sQLExecutor.execute(this, preparedStatement.getParameterBindings());
+            try {
+                return sQLExecutor.execute(this, preparedStatement);
+            } catch (Throwable t) {
+                t.printStackTrace();
+                return null;
+            }
+        } else {
+            if (logger.isDebugEnabled() && statement != null) 
+                logger.debug(statement.getClass().getName() + " is not instanceof com.mysql.jdbc.PreparedStatement");
+            // not a prepared statement; won't execute this
+            return null;
         }
-        return null;
     }
 
     /**
@@ -323,6 +346,14 @@ public class InterceptorImpl {
                     result = new SQLExecutor.Noop();
                     break;
                 }
+                boolean forUpdate = null != (CommonTree)root.getFirstChildWithType(MySQL51Parser.FOR);
+                boolean lockShared = null != (CommonTree)root.getFirstChildWithType(MySQL51Parser.LOCK);
+                LockMode lockMode = LockMode.READ_COMMITTED;
+                if (forUpdate) {
+                    lockMode = LockMode.EXCLUSIVE;
+                } else if (lockShared) {
+                    lockMode = LockMode.SHARED;
+                }
                 getSession();
                 dictionary = session.getDictionary();
                 domainTypeHandler = getDomainTypeHandler(tableName, dictionary);
@@ -343,14 +374,15 @@ public class InterceptorImpl {
                 queryDomainType = (QueryDomainTypeImpl<?>) session.createQueryDomainType(domainTypeHandler);
                 if (whereNode == null) {
                     // no where clause (select all rows)
-                    result = new SQLExecutor.Select(domainTypeHandler, columnNames, queryDomainType);
+                    result = new SQLExecutor.Select(domainTypeHandler, columnNames, queryDomainType, lockMode);
                 } else {
                     // create a predicate from the tree
                     Predicate predicate = whereNode.getPredicate(queryDomainType);
                     if (predicate != null) {
                         // where clause that can be executed by clusterj
                         queryDomainType.where(predicate);
-                        result = new SQLExecutor.Select(domainTypeHandler, columnNames, queryDomainType);
+                        int numberOfParameters = whereNode.getNumberOfParameters();
+                        result = new SQLExecutor.Select(domainTypeHandler, columnNames, queryDomainType, lockMode, numberOfParameters);
                         whereType = "clusterj";
                     } else {
                         // where clause that cannot be executed by clusterj
@@ -461,7 +493,7 @@ public class InterceptorImpl {
         lexer.setErrorListener(new QueuingErrorListener(lexer));
         tokens.getTokens();
         if (lexer.getErrorListener().hasErrors()) {
-            logger.warn(local.message("ERR_Lexing_SQ",preparedSql));
+            logger.warn(local.message("ERR_Lexing_SQL",preparedSql));
             return result;
         }
         PlaceholderNode.resetId();

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/QueryExecutionContextJDBCImpl.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/QueryExecutionContextJDBCImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/QueryExecutionContextJDBCImpl.java	2011-09-26 13:57:18 +0000
@@ -20,17 +20,16 @@ package com.mysql.clusterj.jdbc;
 import java.math.BigDecimal;
 import java.math.BigInteger;
 import java.sql.Date;
-import java.sql.SQLException;
 import java.sql.Time;
 import java.sql.Timestamp;
 
-import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.ClusterJFatalInternalException;
 import com.mysql.clusterj.core.query.QueryExecutionContextImpl;
 import com.mysql.clusterj.core.spi.SessionSPI;
+import com.mysql.clusterj.core.spi.ValueHandler;
 import com.mysql.clusterj.core.util.I18NHelper;
 import com.mysql.clusterj.core.util.Logger;
 import com.mysql.clusterj.core.util.LoggerFactoryService;
-import com.mysql.jdbc.ParameterBindings;
 
 /** This class handles retrieving parameter values from the parameterBindings
  * associated with a PreparedStatement.
@@ -44,10 +43,7 @@ public class QueryExecutionContextJDBCIm
     static final Logger logger = LoggerFactoryService.getFactory().getInstance(QueryExecutionContextJDBCImpl.class);
 
     /** The wrapped ParameterBindings */
-    ParameterBindings parameterBindings;
-
-    /** The current offset */
-    int offset = 0;
+    ValueHandler parameterBindings;
 
     /** The number of parameters */
     int numberOfParameters;
@@ -58,176 +54,104 @@ public class QueryExecutionContextJDBCIm
      * @param numberOfParameters the number of parameters per statement
      */
     public QueryExecutionContextJDBCImpl(SessionSPI session,
-            ParameterBindings parameterBindings, int numberOfParameters) {
+            ValueHandler parameterBindings, int numberOfParameters) {
         super(session);
         this.parameterBindings = parameterBindings;
         this.numberOfParameters = numberOfParameters;
     }
 
-    /** Advance to the next statement (and next number of affected rows).
-     */
-    public void nextStatement() {
-        offset += numberOfParameters;
-    }
-
     public Byte getByte(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Byte result = parameterBindings.getByte(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Byte result = parameterBindings.getByte(parameterIndex);
+        return result;
     }
 
     public BigDecimal getBigDecimal(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            BigDecimal result = parameterBindings.getBigDecimal(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        BigDecimal result = parameterBindings.getBigDecimal(parameterIndex);
+        return result;
     }
 
     public BigInteger getBigInteger(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            BigInteger result = parameterBindings.getBigDecimal(parameterIndex).toBigInteger();
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        BigInteger result = parameterBindings.getBigDecimal(parameterIndex).toBigInteger();
+        return result;
     }
 
     public Boolean getBoolean(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Boolean result = parameterBindings.getBoolean(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Boolean result = parameterBindings.getBoolean(parameterIndex);
+        return result;
     }
 
     public byte[] getBytes(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            byte[] result = parameterBindings.getBytes(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        byte[] result = parameterBindings.getBytes(parameterIndex);
+        return result;
     }
 
     public Double getDouble(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Double result = parameterBindings.getDouble(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Double result = parameterBindings.getDouble(parameterIndex);
+        return result;
     }
 
     public Float getFloat(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Float result = parameterBindings.getFloat(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Float result = parameterBindings.getFloat(parameterIndex);
+        return result;
     }
 
     public Integer getInt(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Integer result = parameterBindings.getInt(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Integer result = parameterBindings.getInt(parameterIndex);
+        return result;
     }
 
     public Date getJavaSqlDate(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            java.sql.Date result = parameterBindings.getDate(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        java.sql.Date result = parameterBindings.getJavaSqlDate(parameterIndex);
+        return result;
     }
 
     public Time getJavaSqlTime(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Time result = parameterBindings.getTime(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Time result = parameterBindings.getJavaSqlTime(parameterIndex);
+        return result;
     }
 
     public Timestamp getJavaSqlTimestamp(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            java.sql.Timestamp result = parameterBindings.getTimestamp(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        java.sql.Timestamp result = parameterBindings.getJavaSqlTimestamp(parameterIndex);
+        return result;
     }
 
     public java.util.Date getJavaUtilDate(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            java.util.Date result = parameterBindings.getDate(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        java.util.Date result = parameterBindings.getJavaUtilDate(parameterIndex);
+        return result;
     }
 
     public Long getLong(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Long result = parameterBindings.getLong(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Long result = parameterBindings.getLong(parameterIndex);
+        return result;
     }
 
     public Short getShort(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Short result = parameterBindings.getShort(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        Short result = parameterBindings.getShort(parameterIndex);
+        return result;
     }
 
     public String getString(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            String result = parameterBindings.getString(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        int parameterIndex = Integer.valueOf(index);
+        String result = parameterBindings.getString(parameterIndex);
+        return result;
     }
 
     public Object getObject(String index) {
-        try {
-            int parameterIndex = Integer.valueOf(index) + offset;
-            Object result = parameterBindings.getObject(parameterIndex);
-            return result;
-        } catch (SQLException ex) {
-                throw new ClusterJUserException(local.message("ERR_Getting_Parameter_Value", offset, index), ex);
-        }
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
     }
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java	2011-09-26 13:57:18 +0000
@@ -17,26 +17,28 @@
 
 package com.mysql.clusterj.jdbc;
 
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import com.mysql.clusterj.ClusterJDatastoreException;
 import com.mysql.clusterj.ClusterJFatalInternalException;
 import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.LockMode;
 import com.mysql.clusterj.core.query.QueryDomainTypeImpl;
-import com.mysql.clusterj.core.query.QueryExecutionContextImpl;
 import com.mysql.clusterj.core.spi.DomainTypeHandler;
-import com.mysql.clusterj.core.spi.QueryExecutionContext;
 import com.mysql.clusterj.core.spi.SessionSPI;
-import com.mysql.clusterj.core.spi.ValueHandler;
 import com.mysql.clusterj.core.store.ResultData;
 import com.mysql.clusterj.core.util.I18NHelper;
 import com.mysql.clusterj.core.util.Logger;
 import com.mysql.clusterj.core.util.LoggerFactoryService;
 import com.mysql.jdbc.ParameterBindings;
+import com.mysql.jdbc.PreparedStatement;
 import com.mysql.jdbc.ResultSetInternalMethods;
-
-import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
+import com.mysql.jdbc.ServerPreparedStatement;
+import com.mysql.jdbc.ServerPreparedStatement.BindValue;
 
 /** This class contains behavior to execute various SQL commands. There is one subclass for each
  * command to be executed. 
@@ -73,6 +75,18 @@ public class SQLExecutor {
     /** The query domain type for qualified SELECT and DELETE operations */
     protected QueryDomainTypeImpl<?> queryDomainType;
 
+    /** Does the jdbc driver support bind values (mysql 5.1.17 and later)? */
+    static boolean bindValueSupport = getBindValueSupport();
+
+    static boolean getBindValueSupport() {
+        try {
+            com.mysql.jdbc.ServerPreparedStatement.class.getMethod("getParameterBindValues", (Class<?>[])null);
+            return true;
+        } catch (Exception e) {
+            return false;
+        }
+    }
+
     public SQLExecutor(DomainTypeHandlerImpl<?> domainTypeHandler, List<String> columnNames, int numberOfParameters) {
         this(domainTypeHandler, columnNames);
         this.numberOfParameters = numberOfParameters;
@@ -110,12 +124,12 @@ public class SQLExecutor {
 
         /** Execute the SQL command
          * @param session the clusterj session which must not be null
-         * @param parameterBindings the parameter bindings from the prepared statement
+         * @param preparedStatement the prepared statement
          * @return the result of executing the statement, or null
          * @throws SQLException
          */
         ResultSetInternalMethods execute(InterceptorImpl interceptor,
-                ParameterBindings parameterBindings) throws SQLException;
+                PreparedStatement preparedStatement) throws SQLException;
     }
 
     /** This class implements the Executor contract but returns null, indicating that
@@ -124,7 +138,7 @@ public class SQLExecutor {
     public static class Noop implements Executor {
 
         public ResultSetInternalMethods execute(InterceptorImpl interceptor,
-                ParameterBindings parameterBindings) throws SQLException {
+                PreparedStatement preparedStatement) throws SQLException {
             return null;
         }
     }
@@ -133,23 +147,41 @@ public class SQLExecutor {
      */
     public static class Select extends SQLExecutor implements Executor {
 
-        public Select(DomainTypeHandlerImpl<?> domainTypeHandler, List<String> columnNames, QueryDomainTypeImpl<?> queryDomainType) {
+        private LockMode lockMode;
+
+        public Select(DomainTypeHandlerImpl<?> domainTypeHandler, List<String> columnNames,
+                QueryDomainTypeImpl<?> queryDomainType, LockMode lockMode, int numberOfParameters) {
             super(domainTypeHandler, columnNames, queryDomainType);
+            this.numberOfParameters = numberOfParameters;
+            this.lockMode = lockMode;
             if (queryDomainType == null) {
                 throw new ClusterJFatalInternalException("queryDomainType must not be null for Select.");
             }
         }
 
+        public Select(DomainTypeHandlerImpl<?> domainTypeHandler,
+                List<String> columnNames, QueryDomainTypeImpl<?> queryDomainType, LockMode lockMode) {
+            this(domainTypeHandler, columnNames, queryDomainType, lockMode, 0);
+        }
+
         public ResultSetInternalMethods execute(InterceptorImpl interceptor,
-                ParameterBindings parameterBindings) throws SQLException {
-            // create value handler to copy data from parameters to ndb
-            // count the parameters
-            int count = countParameters(parameterBindings);
+                PreparedStatement preparedStatement) throws SQLException {
             SessionSPI session = interceptor.getSession();
-            Map<String, Object> parameters = createParameterMap(queryDomainType, parameterBindings, 0, count);
-            QueryExecutionContext context = new QueryExecutionContextImpl(session, parameters);
+            session.setLockMode(lockMode);
+            // create value handler to copy data from parameters to ndb
+            ValueHandlerBatching valueHandlerBatching = getValueHandler(preparedStatement, null);
+            if (valueHandlerBatching == null) {
+                return null;
+            }
+            int numberOfStatements = valueHandlerBatching.getNumberOfStatements();
+            if (numberOfStatements != 1) {
+                return null;
+            }
+            QueryExecutionContextJDBCImpl context = 
+                new QueryExecutionContextJDBCImpl(session, valueHandlerBatching, numberOfParameters);
             session.startAutoTransaction();
             try {
+                valueHandlerBatching.next();
                 ResultData resultData = queryDomainType.getResultData(context);
                 // session.endAutoTransaction();
                 return new ResultSetInternalMethodsImpl(resultData, columnNumberToFieldNumberMap, 
@@ -176,30 +208,32 @@ public class SQLExecutor {
         }
 
         public ResultSetInternalMethods execute(InterceptorImpl interceptor,
-                ParameterBindings parameterBindings) throws SQLException {
+                PreparedStatement preparedStatement) throws SQLException {
             SessionSPI session = interceptor.getSession();
             if (queryDomainType == null) {
                 int rowsDeleted = session.deletePersistentAll(domainTypeHandler);
-                if (logger.isDebugEnabled()) logger.debug("deleteAll deleted: " + rowsDeleted);
+                if (logger.isDebugEnabled()) 
+                    logger.debug("deleteAll deleted: " + rowsDeleted);
                 return new ResultSetInternalMethodsUpdateCount(rowsDeleted);
             } else {
-                int numberOfBoundParameters = countParameters(parameterBindings);
-                int numberOfStatements = numberOfBoundParameters / numberOfParameters;
+                ValueHandlerBatching valueHandlerBatching = getValueHandler(preparedStatement, null);
+                if (valueHandlerBatching == null) {
+                    return null;
+                }
+                int numberOfStatements = valueHandlerBatching.getNumberOfStatements();
+                if (logger.isDebugEnabled()) 
+                    logger.debug("executing numberOfStatements: " + numberOfStatements 
+                            + " with numberOfParameters: " + numberOfParameters);
                 long[] deleteResults = new long[numberOfStatements];
-                if (logger.isDebugEnabled()) logger.debug(
-                        " numberOfParameters: " + numberOfParameters
-                        + " numberOfBoundParameters: " + numberOfBoundParameters
-                        + " numberOfStatements: " + numberOfStatements
-                        );
                 QueryExecutionContextJDBCImpl context = 
-                    new QueryExecutionContextJDBCImpl(session, parameterBindings, numberOfParameters);
-                for (int i = 0; i < numberOfStatements; ++i) {
+                    new QueryExecutionContextJDBCImpl(session, valueHandlerBatching, numberOfParameters);
+                int i = 0;
+                while (valueHandlerBatching.next()) {
                     // this will execute each statement in the batch using different parameters
                     int statementRowsDeleted = queryDomainType.deletePersistentAll(context);
-                    if (logger.isDebugEnabled()) logger.debug("statement " + i
-                            + " deleted " + statementRowsDeleted);
-                    deleteResults[i] = statementRowsDeleted;
-                    context.nextStatement();
+                    if (logger.isDebugEnabled())
+                        logger.debug("statement " + i + " deleted " + statementRowsDeleted);
+                    deleteResults[i++] = statementRowsDeleted;
                 }
                 return new ResultSetInternalMethodsUpdateCount(deleteResults);
             }
@@ -215,20 +249,26 @@ public class SQLExecutor {
         }
 
         public ResultSetInternalMethods execute(InterceptorImpl interceptor,
-                ParameterBindings parameterBindings) throws SQLException {
+                PreparedStatement preparedStatement) throws SQLException {
             SessionSPI session = interceptor.getSession();
-            int numberOfBoundParameters = countParameters(parameterBindings);
+            int numberOfBoundParameters = preparedStatement.getParameterMetaData().getParameterCount();
             int numberOfStatements = numberOfBoundParameters / numberOfParameters;
-            if (logger.isDebugEnabled()) logger.debug("SQLExecutor.Insert.execute"
-                    + " numberOfParameters: " + numberOfParameters
+            if (logger.isDebugEnabled())
+                logger.debug("numberOfParameters: " + numberOfParameters
                     + " numberOfBoundParameters: " + numberOfBoundParameters
                     + " numberOfFields: " + numberOfFields
                     + " numberOfStatements: " + numberOfStatements
                     );
             // interceptor.beforeClusterjStart();
             // session asks for values by field number which are converted to parameter number
-            for (int offset = 0; offset < numberOfBoundParameters; offset += numberOfParameters) {
-                ValueHandler valueHandler = getValueHandler(parameterBindings, fieldNumberToColumnNumberMap, offset);
+            ValueHandlerBatching valueHandler = getValueHandler(preparedStatement, fieldNumberToColumnNumberMap);
+            if (valueHandler == null) {
+                // we cannot handle this request
+                return null;
+            }
+            int count = 0;
+            while(valueHandler.next()) {
+                if (logger.isDetailEnabled()) logger.detail("inserting row " + count++);
                 session.insert(domainTypeHandler, valueHandler);
             }
             session.flush();
@@ -237,6 +277,43 @@ public class SQLExecutor {
         }
     }
 
+    protected ValueHandlerBatching getValueHandler(
+            PreparedStatement preparedStatement, int[] fieldNumberToColumnNumberMap) {
+        ValueHandlerBatching result = null;
+        try {
+            int numberOfBoundParameters = preparedStatement.getParameterMetaData().getParameterCount();
+            int numberOfStatements = numberOfParameters == 0 ? 1 : numberOfBoundParameters / numberOfParameters;
+            if (logger.isDebugEnabled()) logger.debug(
+                    " numberOfParameters: " + numberOfParameters
+                    + " numberOfBoundParameters: " + numberOfBoundParameters
+                    + " numberOfStatements: " + numberOfStatements
+                    + " fieldNumberToColumnNumberMap: " + Arrays.toString(fieldNumberToColumnNumberMap)
+                    );
+            if (preparedStatement instanceof ServerPreparedStatement) {
+                if (bindValueSupport) {
+                    ServerPreparedStatement serverPreparedStatement = (ServerPreparedStatement)preparedStatement;
+                    BindValue[] bindValues = serverPreparedStatement.getParameterBindValues();
+                    result = new ValueHandlerBindValuesImpl(bindValues, fieldNumberToColumnNumberMap,
+                            numberOfStatements, numberOfParameters);
+                } else {
+                    // note if you try to get parameter bindings from a server prepared statement, NPE in the driver
+                    // so if it's a server prepared statement without bind value support, e.g. using a JDBC driver
+                    // earlier than 5.1.17, returning null will allow the driver to pursue its normal path.
+                }
+            } else {
+            // not a server prepared statement; treat as regular prepared statement
+            ParameterBindings parameterBindings = preparedStatement.getParameterBindings();
+            result = new ValueHandlerImpl(parameterBindings, fieldNumberToColumnNumberMap, 
+                    numberOfStatements, numberOfParameters);
+            }
+        } catch (SQLException ex) {
+            throw new ClusterJDatastoreException(ex);
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+        return result;
+    }
+
     /** Create the parameter map assigning each bound parameter a number.
      * The result is a map in which the key is a String whose key is a cardinal number
      * starting with 1 (for JDBC which uses 1-origin for numbering)
@@ -316,17 +393,6 @@ public class SQLExecutor {
         }
     }
 
-    /** Create a value handler (part of the clusterj spi) to retrieve values from jdbc parameter bindings.
-     * @param parameterBindings the jdbc parameter bindings from prepared statements
-     * @param fieldNumberToParameterNumberMap map from field number to parameter number
-     * @param offset into the parameter bindings for this instance (used for batch execution)
-     * @return
-     */
-    protected ValueHandler getValueHandler(ParameterBindings parameterBindings,
-            int[] fieldNumberToParameterNumberMap, int offset) {
-        return new ValueHandlerImpl(parameterBindings, fieldNumberToParameterNumberMap, offset);
-    }
-
     /** If detailed logging is enabled write the parameter bindings to the log.
      * @param parameterBindings the jdbc parameter bindings
      */
@@ -346,30 +412,4 @@ public class SQLExecutor {
         }
     }
 
-    /** Count the number of bound parameters. If this is a batch execution, then the
-     * number of bound parameters is the number of statements in the batch times the
-     * number of parameters per statement.
-     * If detailed logging is enabled write the parameter bindings to the log.
-     * @param parameterBindings the jdbc parameter bindings
-     */
-    protected static int countParameters(ParameterBindings parameterBindings) {
-        int i = 0;
-        while (true) {
-            try {
-                ++i;
-                // parameters are 1-origin per jdbc specification
-                Object objectValue = parameterBindings.getObject(i);
-                if (logger.isDetailEnabled()) {
-                    logger.detail("parameterBinding: parameter " + i
-                            + " has value: " + objectValue
-                            + " of type " + objectValue.getClass());
-                }
-            } catch (Exception e) {
-                // we don't know how many parameters are bound...
-                break;
-            }
-        }
-        return i - 1;
-    }
-
 }

=== added file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBatching.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBatching.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBatching.java	2011-09-26 13:57:18 +0000
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.jdbc;
+
+import com.mysql.clusterj.core.spi.ValueHandler;
+import com.mysql.clusterj.core.util.I18NHelper;
+import com.mysql.clusterj.core.util.Logger;
+import com.mysql.clusterj.core.util.LoggerFactoryService;
+
+/** This interface handles retrieving parameter values from the parameterBindings
+ * associated with a PreparedStatement or batchedBindValues from a ServerPreparedStatement.
+ */
+public interface ValueHandlerBatching extends ValueHandler {
+
+    /** My message translator */
+    static final I18NHelper local = I18NHelper.getInstance(ValueHandlerBatching.class);
+
+    /** My logger */
+    static final Logger logger = LoggerFactoryService.getFactory().getInstance(ValueHandlerBatching.class);
+
+    /**
+     * Advance to the next parameter set. If successful, return true. If there are no more
+     * parameter sets, return false.
+     * @return true if positioned to a valid parameter set
+     */
+    public boolean next();
+
+    public int getNumberOfStatements();
+
+}

=== added file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBindValuesImpl.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBindValuesImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerBindValuesImpl.java	2011-09-26 13:57:18 +0000
@@ -0,0 +1,477 @@
+/*
+ *  Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.jdbc;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.Date;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Iterator;
+import java.util.List;
+
+import com.mysql.clusterj.ClusterJFatalInternalException;
+import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.ColumnMetadata;
+import com.mysql.clusterj.core.spi.DomainTypeHandler;
+import com.mysql.clusterj.core.util.I18NHelper;
+import com.mysql.clusterj.core.util.Logger;
+import com.mysql.clusterj.core.util.LoggerFactoryService;
+import com.mysql.jdbc.ServerPreparedStatement.BatchedBindValues;
+import com.mysql.jdbc.ServerPreparedStatement.BindValue;
+
+/** This class handles retrieving parameter values from the parameterBindings
+ * associated with a PreparedStatement.
+ */
+public class ValueHandlerBindValuesImpl implements ValueHandlerBatching {
+
+    /** My message translator */
+    static final I18NHelper local = I18NHelper.getInstance(ValueHandlerBindValuesImpl.class);
+
+    /** My logger */
+    static final Logger logger = LoggerFactoryService.getFactory().getInstance(ValueHandlerBindValuesImpl.class);
+
+    private Iterator<?> parameterSetListIterator;
+    private BindValue[] bindValues;
+    private int[] fieldNumberToColumnNumberMap;
+    private int numberOfStatements;
+    private int currentStatement = 0;
+    private int offset;
+    private int numberOfParameters;
+
+    public ValueHandlerBindValuesImpl(List<?> parameterSetList, int[] fieldNumberToColumnNumberMap) {
+        this.parameterSetListIterator = parameterSetList.iterator();
+        this.numberOfStatements = parameterSetList.size();
+        this.fieldNumberToColumnNumberMap = fieldNumberToColumnNumberMap;
+    }
+
+    public ValueHandlerBindValuesImpl(BindValue[] bindValues, int[] fieldNumberToColumnNumberMap,
+            int numberOfStatements, int numberOfParameters) {
+        this.bindValues = bindValues;
+        this.fieldNumberToColumnNumberMap = fieldNumberToColumnNumberMap;
+        this.numberOfStatements = numberOfStatements;
+        this.numberOfParameters = numberOfParameters;
+        this.offset = -numberOfParameters;
+    }
+
+    /** Position to the next parameter set. If no more parameter sets, return false.
+     * @result true if positioned on a valid parameter set
+     */
+    @Override
+    public boolean next() {
+        if (parameterSetListIterator == null) {
+            offset += numberOfParameters;
+            return currentStatement++ < numberOfStatements;
+        }
+        if (parameterSetListIterator.hasNext()) {
+            Object parameterSet = parameterSetListIterator.next();
+            if (parameterSet instanceof BatchedBindValues) {
+                bindValues = ((BatchedBindValues)parameterSet).batchedParameterValues;
+            } else {
+                throw new ClusterJFatalInternalException(
+                        local.message("ERR_Mixed_Server_Prepared_Statement_Values", parameterSet.getClass().getName()));
+            }
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public int getNumberOfStatements() {
+        return numberOfStatements;
+    }
+
+    /** Get the index into the BindValue array. The number needs to be increased by 1
+     * because SQL is 1-origin while the java array is 0-origin.
+     * @param fieldNumber the field number for the requested field
+     * @return the 0-origin index into the BindValue array
+     */
+    private int getIndex(int fieldNumber) {
+        return fieldNumberToColumnNumberMap==null? fieldNumber + offset - 1:
+            fieldNumberToColumnNumberMap[fieldNumber] + offset - 1;
+    }
+
+    @Override
+    public BigDecimal getBigDecimal(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof BigDecimal) {
+            return (BigDecimal) value;
+        } else if (value instanceof String) {
+            return new BigDecimal((String)value);
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "BigDecimal", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public BigInteger getBigInteger(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof BigDecimal) {
+            return ((BigDecimal)value).toBigInteger();
+        } else if (value instanceof String) {
+                return new BigDecimal((String)value).toBigInteger();
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "BigDecimal", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public boolean getBoolean(int fieldNumber) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public boolean[] getBooleans(int fieldNumber) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public byte getByte(int fieldNumber) {
+        return (byte)bindValues[getIndex(fieldNumber)].longBinding;
+    }
+
+    @Override
+    public byte[] getBytes(int fieldNumber) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public double getDouble(int fieldNumber) {
+        return bindValues[getIndex(fieldNumber)].doubleBinding;
+    }
+
+    @Override
+    public float getFloat(int fieldNumber) {
+        return bindValues[getIndex(fieldNumber)].floatBinding;
+    }
+
+    @Override
+    public int getInt(int fieldNumber) {
+        return (int)bindValues[getIndex(fieldNumber)].longBinding;
+    }
+
+    @Override
+    public Date getJavaSqlDate(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof java.sql.Date) {
+            return (java.sql.Date) value;
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "java.sql.Date", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public Time getJavaSqlTime(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof java.sql.Time) {
+            return (java.sql.Time) value;
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "java.sql.Time", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public Timestamp getJavaSqlTimestamp(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof java.sql.Timestamp) {
+            return (java.sql.Timestamp) value;
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "java.sql.Timestamp", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public java.util.Date getJavaUtilDate(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof java.util.Date) {
+            return (java.util.Date) value;
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "java.util.Date", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public long getLong(int fieldNumber) {
+        return bindValues[getIndex(fieldNumber)].longBinding;
+    }
+
+    @Override
+    public Boolean getObjectBoolean(int fieldNumber) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public Byte getObjectByte(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        if (bindValue.isNull) {
+            return null;
+        } else {
+            return (byte)bindValue.longBinding;
+        }
+    }
+
+    @Override
+    public Double getObjectDouble(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        if (bindValue.isNull) {
+            return null;
+        } else {
+            return bindValue.doubleBinding;
+        }
+    }
+
+    @Override
+    public Float getObjectFloat(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        if (bindValue.isNull) {
+            return null;
+        } else {
+            return bindValue.floatBinding;
+        }
+    }
+
+    @Override
+    public Integer getObjectInt(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        if (bindValue.isNull) {
+            return null;
+        } else {
+            return (int)bindValue.longBinding;
+        }
+    }
+
+    @Override
+    public Long getObjectLong(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        if (bindValue.isNull) {
+            return null;
+        } else {
+            return bindValue.longBinding;
+        }
+    }
+
+    @Override
+    public Short getObjectShort(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        if (bindValue.isNull) {
+            return null;
+        } else {
+            return (short)bindValue.longBinding;
+        }
+    }
+
+    @Override
+    public short getShort(int fieldNumber) {
+        return (short)bindValues[getIndex(fieldNumber)].longBinding;
+    }
+
+    @Override
+    public String getString(int fieldNumber) {
+        BindValue bindValue = bindValues[getIndex(fieldNumber)];
+        Object value = bindValue.value;
+        if (value instanceof String) {
+            return (String) value;
+        } else {
+            throw new ClusterJUserException(
+                    local.message("ERR_Parameter_Wrong_Type", "String", value.getClass().getName()));
+        }
+    }
+
+    @Override
+    public boolean isNull(int fieldNumber) {
+        return bindValues[getIndex(fieldNumber)].isNull;
+    }
+
+    @Override
+    public boolean isModified(int fieldNumber) {
+        return fieldNumberToColumnNumberMap[fieldNumber] != -1;
+    }
+
+    @Override
+    public void markModified(int fieldNumber) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public String pkToString(DomainTypeHandler<?> domainTypeHandler) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void resetModified() {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setBigDecimal(int fieldNumber, BigDecimal value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setBigInteger(int fieldNumber, BigInteger bigIntegerExact) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setBoolean(int fieldNumber, boolean b) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setBooleans(int fieldNumber, boolean[] b) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setByte(int fieldNumber, byte value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setBytes(int fieldNumber, byte[] value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setDouble(int fieldNumber, double value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setFloat(int fieldNumber, float value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    public void setInt(int fieldNumber, int value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setJavaSqlDate(int fieldNumber, Date value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setJavaSqlTime(int fieldNumber, Time value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setJavaSqlTimestamp(int fieldNumber, Timestamp value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setJavaUtilDate(int fieldNumber, java.util.Date value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setLong(int fieldNumber, long value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObject(int fieldNumber, Object value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectBoolean(int fieldNumber, Boolean value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectByte(int fieldNumber, Byte value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectDouble(int fieldNumber, Double value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectFloat(int fieldNumber, Float value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectInt(int fieldNumber, Integer value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectLong(int fieldNumber, Long value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setObjectShort(int fieldNumber, Short value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setShort(int fieldNumber, short value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void setString(int fieldNumber, String value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public ColumnMetadata[] columnMetadata() {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public Boolean found() {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void found(Boolean found) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public Object get(int columnNumber) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+    @Override
+    public void set(int columnNumber, Object value) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Should_Not_Occur"));
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerImpl.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/ValueHandlerImpl.java	2011-09-26 13:57:18 +0000
@@ -28,17 +28,15 @@ import com.mysql.clusterj.ClusterJDatast
 import com.mysql.clusterj.ClusterJFatalInternalException;
 import com.mysql.clusterj.ColumnMetadata;
 import com.mysql.clusterj.core.spi.DomainTypeHandler;
-import com.mysql.clusterj.core.spi.ValueHandler;
 import com.mysql.clusterj.core.util.I18NHelper;
 import com.mysql.clusterj.core.util.Logger;
 import com.mysql.clusterj.core.util.LoggerFactoryService;
-
 import com.mysql.jdbc.ParameterBindings;
 
 /** This class handles retrieving parameter values from the parameterBindings
  * associated with a PreparedStatement.
  */
-public class ValueHandlerImpl implements ValueHandler {
+public class ValueHandlerImpl implements ValueHandlerBatching {
 
     /** My message translator */
     static final I18NHelper local = I18NHelper.getInstance(ValueHandlerImpl.class);
@@ -47,24 +45,48 @@ public class ValueHandlerImpl implements
     static final Logger logger = LoggerFactoryService.getFactory().getInstance(ValueHandlerImpl.class);
 
     private ParameterBindings parameterBindings;
-    private int[] fieldNumberMap;
-
+    private final int[] fieldNumberMap;
+    private final int numberOfBoundParameters;
+    private final int numberOfStatements;
+    private final int numberOfParameters;
     /** The offset into the parameter bindings, used for batch processing */
     private int offset;
 
-    public ValueHandlerImpl(ParameterBindings parameterBindings, int[] fieldNumberMap, int offset) {
+    public ValueHandlerImpl(ParameterBindings parameterBindings, int[] fieldNumberMap, 
+            int numberOfStatements, int numberOfParameters) {
         this.parameterBindings = parameterBindings;
         this.fieldNumberMap = fieldNumberMap;
-        this.offset = offset;
-    }
-
-    public ValueHandlerImpl(ParameterBindings parameterBindings, int[] fieldNumberMap) {
-        this(parameterBindings, fieldNumberMap, 0);
+        this.numberOfParameters = numberOfParameters;
+        this.offset = -numberOfParameters;
+        this.numberOfBoundParameters = numberOfStatements * numberOfParameters;
+        this.numberOfStatements = numberOfStatements;
+    }
+
+    @Override
+    public int getNumberOfStatements() {
+        return numberOfStatements;
+    }
+
+    @Override
+    public boolean next() {
+        offset += numberOfParameters;
+        return offset < numberOfBoundParameters;
+    }
+
+    /** Return the index into the parameterBindings for this offset and field number.
+     * Offset moves the "window" to the next set of parameters for multi-statement
+     * (batched) statements.
+     * @param fieldNumber the origin-0 number
+     * @return the index into the parameterBindings
+     */
+    private int getIndex(int fieldNumber) {
+        int result = fieldNumberMap == null ? offset + fieldNumber : offset + fieldNumberMap[fieldNumber];
+        return result;
     }
 
     public BigDecimal getBigDecimal(int fieldNumber) {
         try {
-            return parameterBindings.getBigDecimal(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getBigDecimal(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -72,7 +94,7 @@ public class ValueHandlerImpl implements
 
     public BigInteger getBigInteger(int fieldNumber) {
         try {
-            return parameterBindings.getBigDecimal(offset + fieldNumberMap[fieldNumber]).toBigInteger();
+            return parameterBindings.getBigDecimal(getIndex(fieldNumber)).toBigInteger();
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -80,7 +102,7 @@ public class ValueHandlerImpl implements
 
     public boolean getBoolean(int fieldNumber) {
         try {
-            return parameterBindings.getBoolean(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getBoolean(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -92,7 +114,7 @@ public class ValueHandlerImpl implements
 
     public byte getByte(int fieldNumber) {
         try {
-            return parameterBindings.getByte(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getByte(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -104,7 +126,7 @@ public class ValueHandlerImpl implements
 
     public double getDouble(int fieldNumber) {
         try {
-            return parameterBindings.getDouble(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getDouble(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -112,7 +134,7 @@ public class ValueHandlerImpl implements
 
     public float getFloat(int fieldNumber) {
         try {
-            return parameterBindings.getFloat(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getFloat(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -120,7 +142,7 @@ public class ValueHandlerImpl implements
 
     public int getInt(int fieldNumber) {
         try {
-            return parameterBindings.getInt(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getInt(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -128,7 +150,7 @@ public class ValueHandlerImpl implements
 
     public Date getJavaSqlDate(int fieldNumber) {
         try {
-            return parameterBindings.getDate(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getDate(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -136,7 +158,7 @@ public class ValueHandlerImpl implements
 
     public Time getJavaSqlTime(int fieldNumber) {
         try {
-            return parameterBindings.getTime(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getTime(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -144,7 +166,7 @@ public class ValueHandlerImpl implements
 
     public Timestamp getJavaSqlTimestamp(int fieldNumber) {
         try {
-            return parameterBindings.getTimestamp(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getTimestamp(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -152,7 +174,7 @@ public class ValueHandlerImpl implements
 
     public java.util.Date getJavaUtilDate(int fieldNumber) {
         try {
-            return parameterBindings.getDate(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getDate(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -160,7 +182,7 @@ public class ValueHandlerImpl implements
 
     public long getLong(int fieldNumber) {
         try {
-            return parameterBindings.getLong(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getLong(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -172,7 +194,7 @@ public class ValueHandlerImpl implements
 
     public Byte getObjectByte(int fieldNumber) {
         try {
-            return parameterBindings.getByte(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getByte(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -180,7 +202,7 @@ public class ValueHandlerImpl implements
 
     public Double getObjectDouble(int fieldNumber) {
         try {
-            return parameterBindings.getDouble(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getDouble(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -188,7 +210,7 @@ public class ValueHandlerImpl implements
 
     public Float getObjectFloat(int fieldNumber) {
         try {
-            return parameterBindings.getFloat(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getFloat(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -196,7 +218,7 @@ public class ValueHandlerImpl implements
 
     public Integer getObjectInt(int fieldNumber) {
         try {
-            return parameterBindings.getInt(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getInt(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -204,7 +226,7 @@ public class ValueHandlerImpl implements
 
     public Long getObjectLong(int fieldNumber) {
         try {
-            return parameterBindings.getLong(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getLong(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -212,7 +234,7 @@ public class ValueHandlerImpl implements
 
     public Short getObjectShort(int fieldNumber) {
         try {
-            return parameterBindings.getShort(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getShort(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -220,7 +242,7 @@ public class ValueHandlerImpl implements
 
     public short getShort(int fieldNumber) {
         try {
-            return parameterBindings.getShort(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getShort(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -228,7 +250,7 @@ public class ValueHandlerImpl implements
 
     public String getString(int fieldNumber) {
         try {
-            return parameterBindings.getString(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.getString(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }
@@ -240,7 +262,7 @@ public class ValueHandlerImpl implements
 
     public boolean isNull(int fieldNumber) {
         try {
-            return parameterBindings.isNull(offset + fieldNumberMap[fieldNumber]);
+            return parameterBindings.isNull(getIndex(fieldNumber));
         } catch (SQLException e) {
             throw new ClusterJDatastoreException(e);
         }

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BatchDeleteQueryAllPrimitivesTest.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BatchDeleteQueryAllPrimitivesTest.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BatchDeleteQueryAllPrimitivesTest.java	2011-09-26 13:57:18 +0000
@@ -81,7 +81,7 @@ create table allprimitives (
         deleteEqualQuery("id", "PRIMARY", 5, 0);
         try {
             connection.setAutoCommit(false);
-            PreparedStatement preparedStatement = connection.prepareStatement("DELETE FROM allprimitives where id = ?");
+            PreparedStatement preparedStatement = ((com.mysql.jdbc.Connection)connection).serverPrepareStatement("DELETE FROM allprimitives where id = ?");
             // delete 4 through 9 (excluding 5 which is already gone)
             for (int i = 4; i < 9; ++i) {
                 preparedStatement.setInt(1, i);

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BigIntegerTypesTest.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BigIntegerTypesTest.java	2011-02-21 11:53:51 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/test/java/jdbctest/BigIntegerTypesTest.java	2011-09-26 13:57:18 +0000
@@ -36,6 +36,11 @@ create index idx_decimal_null_btree on b
 create unique index idx_decimal_null_both on bigintegertypes(decimal_null_both);
      */
 
+    @Override
+    protected boolean getCleanupAfterTest() {
+        return false;
+    }
+
     /** One of two tests in the superclass that we don't want to run */
     @Override
     public void testWriteJDBCReadNDB() {

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/test/resources/clusterj.properties'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/test/resources/clusterj.properties	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/test/resources/clusterj.properties	2011-09-28 10:46:30 +0000
@@ -4,7 +4,7 @@ com.mysql.clusterj.connect.delay=5
 com.mysql.clusterj.connect.verbose=1
 com.mysql.clusterj.connect.timeout.before=30
 com.mysql.clusterj.connect.timeout.after=20
-com.mysql.clusterj.jdbc.url=jdbc:mysql://localhost:9306/test?statementInterceptors=com.mysql.clusterj.jdbc.StatementInterceptor&connectionLifecycleInterceptors=com.mysql.clusterj.jdbc.ConnectionLifecycleInterceptor&poop=doop&com.mysql.clusterj.connectstring=localhost:9311&rewriteBatchedStatements=true&cachePrepStmts=true
+com.mysql.clusterj.jdbc.url=jdbc:mysql://localhost:9306/test?statementInterceptors=com.mysql.clusterj.jdbc.StatementInterceptor&connectionLifecycleInterceptors=com.mysql.clusterj.jdbc.ConnectionLifecycleInterceptor&com.mysql.clusterj.connectstring=localhost:9311&cachePrepStmts=true&rewriteBatchedStatements=true
 com.mysql.clusterj.jdbc.driver=com.mysql.jdbc.Driver
 com.mysql.clusterj.jdbc.username=root
 com.mysql.clusterj.jdbc.password=

=== modified file 'storage/ndb/clusterj/clusterj-jpatest/pom.xml'
--- a/storage/ndb/clusterj/clusterj-jpatest/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-jpatest/pom.xml	2011-09-26 13:51:16 +0000
@@ -28,12 +28,12 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
     <modelVersion>4.0.0</modelVersion>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-jpatest</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
     <packaging>jar</packaging>
     <name>ClusterJ JPA Integration Tests</name>
 

=== modified file 'storage/ndb/clusterj/clusterj-openjpa/pom.xml'
--- a/storage/ndb/clusterj/clusterj-openjpa/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-openjpa/pom.xml	2011-09-26 13:51:16 +0000
@@ -24,12 +24,12 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-openjpa</artifactId>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <packaging>bundle</packaging>
   <name>ClusterJ OpenJPA Integration</name>
 

=== modified file 'storage/ndb/clusterj/clusterj-test/CMakeLists.txt'
--- a/storage/ndb/clusterj/clusterj-test/CMakeLists.txt	2011-09-16 12:58:08 +0000
+++ b/storage/ndb/clusterj/clusterj-test/CMakeLists.txt	2011-10-04 05:22:45 +0000
@@ -32,15 +32,15 @@ ENDFOREACH()
 SET ( CLASSPATH 
   target/classes
   ${WITH_CLASSPATH}
-  ${CMAKE_SOURCE_DIR}/storage/ndb/clusterj/clusterj-api/target/classes
-  ${CMAKE_SOURCE_DIR}/storage/ndb/clusterj/clusterj-core/target/classes)
+  ${CMAKE_BINARY_DIR}/storage/ndb/clusterj/clusterj-api/target/classes
+  ${CMAKE_BINARY_DIR}/storage/ndb/clusterj/clusterj-core/target/classes)
 
 SET(CLUSTERJ_TEST_EXPORTS regression,testsuite.clusterj,testsuite.clusterj.model,testsuite.clusterj.domaintypehandler)
 CREATE_MANIFEST(manifest.mf ${CLUSTERJ_TEST_EXPORTS} clusterj-test)
 
 CREATE_JAR(clusterj-test ${JAVA_SOURCES}
   CLASSPATH ${CLASSPATH}
-  MANIFEST manifest.mf
+  MANIFEST ${CMAKE_CURRENT_SOURCE_DIR}/manifest.mf
   DEPENDENCIES clusterj.jar
   EXTRA_FILES src/main/resources/META-INF
               src/main/resources/schema.sql

=== modified file 'storage/ndb/clusterj/clusterj-test/pom.xml'
--- a/storage/ndb/clusterj/clusterj-test/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-test/pom.xml	2011-09-26 13:51:16 +0000
@@ -20,13 +20,13 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-test</artifactId>
   <packaging>jar</packaging>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <name>ClusterJ Test Suite</name>
   <build>
     <plugins>

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java	2011-07-05 22:11:45 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java	2011-10-02 21:20:50 +0000
@@ -87,6 +87,8 @@ abstract public class AbstractQueryTest 
         public PredicateOperand paramUpperPredicate;
         public PredicateOperand paramInPredicate;
         public Predicate equal;
+        public Predicate isNull;
+        public Predicate isNotNull;
         public Predicate equalOrEqual;
         public Predicate greaterThan;
         public Predicate greaterEqual;
@@ -117,6 +119,8 @@ abstract public class AbstractQueryTest 
         public PredicateOperand extraParamInPredicate;
         public PredicateOperand extraProperty;
         public Predicate extraEqual;
+        public Predicate extraIsNull;
+        public Predicate extraIsNotNull;
         public Predicate extraGreaterThan;
         public Predicate extraGreaterEqual;
         public Predicate extraLessThan;
@@ -150,6 +154,8 @@ abstract public class AbstractQueryTest 
             propertyPredicate = dobj.get(propertyName);
             // comparison operations
             equal = propertyPredicate.equal(paramEqualPredicate);
+            isNull = propertyPredicate.isNull();
+            isNotNull = propertyPredicate.isNotNull();
             greaterThan = propertyPredicate.greaterThan(paramLowerPredicate);
             greaterEqual = propertyPredicate.greaterEqual(paramLowerPredicate);
             lessThan = propertyPredicate.lessThan(paramUpperPredicate);
@@ -186,6 +192,8 @@ abstract public class AbstractQueryTest 
             this.extraProperty = dobj.get(extraPropertyName);
             // comparison operations
             this.extraEqual = extraProperty.equal(extraParamEqualPredicate);
+            this.extraIsNull = extraProperty.isNull();
+            this.extraIsNotNull = extraProperty.isNotNull();
             this.extraGreaterThan = extraProperty.greaterThan(extraParamLowerPredicate);
             this.extraGreaterEqual = extraProperty.greaterEqual(extraParamLowerPredicate);
             this.extraLessThan = extraProperty.lessThan(extraParamUpperPredicate);
@@ -307,6 +315,26 @@ abstract public class AbstractQueryTest 
             }
             };
                     
+    PredicateProvider extraIsNullPredicateProvider = 
+        new PredicateProvider() {
+            public Predicate getPredicate(QueryHolder holder) {
+                return holder.extraIsNull;
+                }
+            public String toString() {
+                return " isNull";
+            }
+            };
+                            
+    PredicateProvider extraIsNotNullPredicateProvider = 
+        new PredicateProvider() {
+            public Predicate getPredicate(QueryHolder holder) {
+                return holder.extraIsNotNull;
+                }
+            public String toString() {
+                return " isNotNull";
+            }
+            };
+                                    
     /** Print the result instance. Override this in a subclass if needed.
      * 
      * @param instance the instance to print if needed
@@ -330,6 +358,32 @@ abstract public class AbstractQueryTest 
         tx.commit();
     }
 
+    public void isNullQuery(String propertyName, String expectedIndex, int... expected) {
+        tx.begin();
+        QueryHolder holder = new QueryHolder(getInstanceType(), propertyName, expectedIndex);
+        // specify the where clause
+        holder.dobj.where(holder.isNull);
+        // create the query
+        holder.createQuery(session);
+        // get the results
+        holder.setExpectedResultIds(expected);
+        holder.checkResults(propertyName + " isNull");
+        tx.commit();
+    }
+
+    public void isNotNullQuery(String propertyName, String expectedIndex, int... expected) {
+        tx.begin();
+        QueryHolder holder = new QueryHolder(getInstanceType(), propertyName, expectedIndex);
+        // specify the where clause
+        holder.dobj.where(holder.isNotNull);
+        // create the query
+        holder.createQuery(session);
+        // get the results
+        holder.setExpectedResultIds(expected);
+        holder.checkResults(propertyName + " isNotNull");
+        tx.commit();
+    }
+
     public void likeQuery(String propertyName, String expectedIndex,
             Object parameterValue, int... expected) {
         tx.begin();

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNotNullTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNotNullTest.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNotNullTest.java	2011-10-03 07:22:29 +0000
@@ -1,6 +1,5 @@
 /*
-Copyright 2010 Sun Microsystems, Inc.
-All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -110,4 +109,36 @@ public class QueryNotNullTest extends Ab
         failOnError();        
     }
 
+    public void testExtraEqualIsNotNull() {
+        equalAnd1ExtraQuery("int_not_null_btree", 8, "int_null_none", extraIsNotNullPredicateProvider, "dummy unused value", "idx_int_not_null_btree", 8);
+        equalAnd1ExtraQuery("int_not_null_hash", 8, "int_null_none", extraIsNotNullPredicateProvider, "dummy unused value", "none", 8);
+        equalAnd1ExtraQuery("int_not_null_both", 8, "int_null_none", extraIsNotNullPredicateProvider, "dummy unused value", "idx_int_not_null_both", 8);
+        equalAnd1ExtraQuery("int_not_null_none", 8, "int_null_none", extraIsNotNullPredicateProvider, "dummy unused value", "none", 8);
+        failOnError();        
+    }
+
+    public void testBtreeIsNotNull() {
+        isNotNullQuery("int_not_null_btree", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        isNotNullQuery("int_null_btree", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        failOnError();        
+    }
+
+    public void testHashIsNotNull() {
+        isNotNullQuery("int_not_null_hash", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        isNotNullQuery("int_null_hash", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        failOnError();        
+    }
+
+    public void testBothIsNotNull() {
+        isNotNullQuery("int_not_null_both", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        isNotNullQuery("int_null_both", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        failOnError();        
+    }
+
+    public void testNoneIsNotNull() {
+        isNotNullQuery("int_not_null_none", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        isNotNullQuery("int_null_none", "none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+        failOnError();        
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNullTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNullTest.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNullTest.java	2011-10-03 07:22:29 +0000
@@ -1,6 +1,5 @@
 /*
-Copyright 2010 Sun Microsystems, Inc.
-All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
 This program is free software; you can redistribute it and/or modify
 it under the terms of the GNU General Public License as published by
@@ -113,6 +112,38 @@ public class QueryNullTest extends Abstr
         failOnError();        
     }
 
+    public void testExtraIsNull() {
+        equalAnd1ExtraQuery("int_not_null_btree", 8, "int_null_none", extraIsNullPredicateProvider, "dummy unused value", "idx_int_not_null_btree");
+        equalAnd1ExtraQuery("int_not_null_hash", 8, "int_null_none", extraIsNullPredicateProvider, "dummy unused value", "none");
+        equalAnd1ExtraQuery("int_not_null_both", 8, "int_null_none", extraIsNullPredicateProvider, "dummy unused value", "idx_int_not_null_both");
+        equalAnd1ExtraQuery("int_not_null_none", 8, "int_null_none", extraIsNullPredicateProvider, "dummy unused value", "none");
+        failOnError();        
+    }
+
+    public void testBtreeIsNull() {
+        isNullQuery("int_not_null_btree", "none");
+        isNullQuery("int_null_btree", "none");
+        failOnError();        
+    }
+
+    public void testHashIsNull() {
+        isNullQuery("int_not_null_hash", "none");
+        isNullQuery("int_null_hash", "none");
+        failOnError();        
+    }
+
+    public void testBothIsNull() {
+        isNullQuery("int_not_null_both", "none");
+        isNullQuery("int_null_both", "none");
+        failOnError();        
+    }
+
+    public void testNoneIsNull() {
+        isNullQuery("int_not_null_none", "none");
+        isNullQuery("int_null_none", "none");
+        failOnError();        
+    }
+
     public void testGreaterThanNull() {
         try {
             greaterThanQuery("int_not_null_btree", "none", null);

=== modified file 'storage/ndb/clusterj/clusterj-tie/pom.xml'
--- a/storage/ndb/clusterj/clusterj-tie/pom.xml	2011-07-05 22:25:18 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/pom.xml	2011-09-26 13:51:16 +0000
@@ -20,13 +20,13 @@
   <parent>
     <groupId>com.mysql.clusterj</groupId>
     <artifactId>clusterj-aggregate</artifactId>
-    <version>7.1.16-SNAPSHOT</version>
+    <version>7.1.17-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-tie</artifactId>
   <packaging>bundle</packaging>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <name>ClusterJ Tie</name>
   <description>The ndbj-tie implementation of ClusterJ storage spi</description>
   <build>

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanFilterImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanFilterImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanFilterImpl.java	2011-10-03 07:22:29 +0000
@@ -160,6 +160,11 @@ class ScanFilterImpl implements ScanFilt
         handleError(returnCode, ndbScanFilter);
     }
 
+    public void isNotNull(Column storeColumn) {
+        int returnCode = ndbScanFilter.isnotnull(storeColumn.getColumnId());
+        handleError(returnCode, ndbScanFilter);
+    }
+
     public void end() {
         int returnCode = ndbScanFilter.end();
         handleError(returnCode, ndbScanFilter);

=== modified file 'storage/ndb/clusterj/pom.xml'
--- a/storage/ndb/clusterj/pom.xml	2011-09-05 13:55:32 +0000
+++ b/storage/ndb/clusterj/pom.xml	2011-09-26 13:51:16 +0000
@@ -24,7 +24,7 @@
   <groupId>com.mysql.clusterj</groupId>
   <artifactId>clusterj-aggregate</artifactId>
   <packaging>pom</packaging>
-  <version>7.1.16-SNAPSHOT</version>
+  <version>7.1.17-SNAPSHOT</version>
   <name>ClusterJ Aggregate</name>
   <description>The aggregate maven project of ClusterJ</description>
   <modules>
@@ -87,32 +87,32 @@
     <dependency>
       <groupId>com.mysql.clusterj</groupId>
       <artifactId>clusterj-api</artifactId>
-      <version>7.1.16-SNAPSHOT</version>
+      <version>7.1.17-SNAPSHOT</version>
     </dependency>
     <dependency>
       <groupId>com.mysql.clusterj</groupId>
       <artifactId>clusterj-core</artifactId>
-      <version>7.1.16-SNAPSHOT</version>
+      <version>7.1.17-SNAPSHOT</version>
     </dependency>
     <dependency>
       <groupId>com.mysql.clusterj</groupId>
       <artifactId>clusterj-test</artifactId>
-      <version>7.1.16-SNAPSHOT</version>
+      <version>7.1.17-SNAPSHOT</version>
     </dependency>
     <dependency>
       <groupId>com.mysql.clusterj</groupId>
       <artifactId>clusterj-tie</artifactId>
-      <version>7.1.16-SNAPSHOT</version>
+      <version>7.1.17-SNAPSHOT</version>
     </dependency>
     <dependency>
       <groupId>com.mysql.clusterj</groupId>
       <artifactId>clusterj-jpatest</artifactId>
-      <version>7.1.16-SNAPSHOT</version>
+      <version>7.1.17-SNAPSHOT</version>
     </dependency>
     <dependency>
       <groupId>ndbjtie</groupId>
       <artifactId>ndbjtie</artifactId>
-      <version>7.1.16-SNAPSHOT</version>
+      <version>7.1.17-SNAPSHOT</version>
     </dependency>
     </dependencies>
   </dependencyManagement>

=== modified file 'storage/ndb/compile-cluster'
--- a/storage/ndb/compile-cluster	2011-09-19 14:55:25 +0000
+++ b/storage/ndb/compile-cluster	2011-09-29 11:30:39 +0000
@@ -31,13 +31,15 @@ use Getopt::Long;
 # thus acting like a filter and passing all other arguments
 # straight through
 my $opt_debug;
+my $opt_build_type;
+
 Getopt::Long::Configure("pass_through");
 GetOptions(
 
   # Build MySQL Server and NDB with debug
   'debug' => \$opt_debug,
   'with-debug:s' => sub { $opt_debug = 1; },
-
+  'build-type=s' => \$opt_build_type,
 ) or exit(1);
 
 # Find source root directory, assume this script is
@@ -72,6 +74,7 @@ my $cmake_version_id;
 #
 {
   my @args;
+  my @opt_build_type_arg;
   push(@args, "$srcdir/cmake/configure.pl");
 
   # MySQL Server options
@@ -87,7 +90,12 @@ my $cmake_version_id;
   push(@args, "--with-plugin-ndbcluster");
   push(@args, "--with-ndb-test");
 
-  cmd($^X, @args, @ARGV);
+  if ($opt_build_type)
+  {
+    push(@opt_build_type_arg, "--cmake-args=\"-G " . $opt_build_type . "\"");
+  }
+
+  cmd($^X, @args, @ARGV, @opt_build_type_arg);
 }
 
 #

=== modified file 'storage/ndb/config/type_JAVA.cmake'
--- a/storage/ndb/config/type_JAVA.cmake	2011-09-16 07:51:29 +0000
+++ b/storage/ndb/config/type_JAVA.cmake	2011-10-04 05:22:45 +0000
@@ -131,7 +131,7 @@ MACRO(CREATE_JAR)
     IF(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${F})
       ADD_CUSTOM_COMMAND(
         OUTPUT ${MARKER}
-        COMMAND ${CMAKE_COMMAND} -E copy_directory ${F} ${CLASS_DIR}/${N}
+        COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/${F} ${CLASS_DIR}/${N}
         COMMAND ${CMAKE_COMMAND} -E touch ${MARKER}
         DEPENDS ${F} ${OLD_MARKER}
         COMMENT "Adding directory ${N} to ${TARGET}.jar"
@@ -139,7 +139,7 @@ MACRO(CREATE_JAR)
     ELSE()
       ADD_CUSTOM_COMMAND(
         OUTPUT ${MARKER}
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${F} ${CLASS_DIR}/${N}
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/${F} ${CLASS_DIR}/${N}
         COMMAND ${CMAKE_COMMAND} -E touch ${MARKER}
         DEPENDS ${F} ${OLD_MARKER}
         COMMENT "Adding file ${N} to ${TARGET}.jar"

=== modified file 'storage/ndb/include/CMakeLists.txt'
--- a/storage/ndb/include/CMakeLists.txt	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/CMakeLists.txt	2011-10-03 11:34:40 +0000
@@ -24,6 +24,8 @@ CHECK_TYPE_SIZE("long long" NDB_SIZEOF_L
 CONFIGURE_FILE(ndb_types.h.in
                ${CMAKE_CURRENT_SOURCE_DIR}/ndb_types.h
                @ONLY)
+# Exclude ndb_types.h from "make dist"
+LIST(APPEND CPACK_SOURCE_IGNORE_FILES include/ndb_types\\\\.h$)
 
 #
 # Read a value for variable from ndb_configure.m4
@@ -86,4 +88,6 @@ ENDIF()
 CONFIGURE_FILE(ndb_version.h.in
                ${CMAKE_CURRENT_SOURCE_DIR}/ndb_version.h
                @ONLY)
+# Exclude ndb_version.h from "make dist"
+LIST(APPEND CPACK_SOURCE_IGNORE_FILES include/ndb_version\\\\.h$)
 

=== modified file 'storage/ndb/include/portlib/NdbMutex.h'
--- a/storage/ndb/include/portlib/NdbMutex.h	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/portlib/NdbMutex.h	2011-09-28 10:46:30 +0000
@@ -30,11 +30,12 @@ extern "C" {
 #else
 #include <pthread.h>
 #endif
-#ifndef NDB_MUTEX_STAT
+#if !defined NDB_MUTEX_STAT && !defined NDB_MUTEX_DEADLOCK_DETECTOR
 typedef pthread_mutex_t NdbMutex;
 #else
 typedef struct {
   pthread_mutex_t mutex;
+#ifdef NDB_MUTEX_STAT
   unsigned cnt_lock;
   unsigned cnt_lock_contention;
   unsigned cnt_trylock_ok;
@@ -47,6 +48,10 @@ typedef struct {
   unsigned long long max_hold_time_ns;
   unsigned long long lock_start_time_ns;
   char name[32];
+#endif
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  struct ndb_mutex_state * m_mutex_state;
+#endif
 } NdbMutex;
 #endif
 

=== modified file 'storage/ndb/memcache/CMakeLists.txt'
--- a/storage/ndb/memcache/CMakeLists.txt	2011-09-21 08:30:46 +0000
+++ b/storage/ndb/memcache/CMakeLists.txt	2011-09-22 03:45:48 +0000
@@ -82,16 +82,13 @@ add_definitions(-DDEBUG_OUTPUT)
 # Set extra flags for the C compiler
 IF(${CMAKE_COMPILER_IS_GNUCC})
   SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --std=gnu99 -Wall -Wredundant-decls")
-ELSE()
-  message(STATUS "Not gnu c")
+ELSEIF(CMAKE_C_COMPILER_ID MATCHES "SunPro")
+  SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -xc99=all")
 ENDIF()
 
 # Set extra flags for the C++ compiler
 IF(${CMAKE_COMPILER_IS_GNUCXX})
   STRING(REPLACE "-fno-implicit-templates" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
-  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-variadic-macros")
-ELSE()
-  message(STATUS "Not gnu c++")
 ENDIF()
 
 

=== modified file 'storage/ndb/memcache/include/atomics.h'
--- a/storage/ndb/memcache/include/atomics.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/atomics.h	2011-09-22 04:38:55 +0000
@@ -81,7 +81,6 @@ typedef volatile Uint32 ndbmc_atomic32_t
 
 DECLARE_FUNCTIONS_WITH_C_LINKAGE
 
-Int32 atomic_add_int(ndbmc_atomic32_t *loc, int amount);
 int atomic_cmp_swap_int(ndbmc_atomic32_t *loc, int oldvalue, int newvalue);
 int atomic_cmp_swap_ptr(void * volatile *loc, void *oldvalue, void *newvalue);
 

=== modified file 'storage/ndb/memcache/include/hash_item_util.h'
--- a/storage/ndb/memcache/include/hash_item_util.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/hash_item_util.h	2011-09-22 04:38:55 +0000
@@ -21,7 +21,6 @@
 #define NDBMEMCACHE_HASH_ITEM_UTIL_H
 
 #include <sys/types.h>
-#include <stdbool.h>
 #include <stdint.h>
 
 #include <memcached/engine.h> 

=== modified file 'storage/ndb/memcache/include/ndb_engine.h'
--- a/storage/ndb/memcache/include/ndb_engine.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/ndb_engine.h	2011-09-22 04:38:55 +0000
@@ -23,7 +23,10 @@
 #include "ndbmemcache_config.h"
 
 #include <pthread.h>
+
+#ifndef __cplusplus
 #include <stdbool.h>
+#endif
 
 #include <memcached/engine.h>
 #include <memcached/util.h>

=== modified file 'storage/ndb/memcache/include/ndbmemcache_config.in'
--- a/storage/ndb/memcache/include/ndbmemcache_config.in	2011-09-17 23:23:26 +0000
+++ b/storage/ndb/memcache/include/ndbmemcache_config.in	2011-09-22 03:35:26 +0000
@@ -25,7 +25,9 @@
 
 #cmakedefine HAVE_MACH_MACH_TIME_H
 #cmakedefine HAVE_SRANDOMDEV
+#ifndef HAVE_GETHRTIME
 #cmakedefine HAVE_GETHRTIME
+#endif
 #cmakedefine HAVE_GETHRVTIME
 #cmakedefine HAVE_MEMSET
 

=== modified file 'storage/ndb/memcache/include/timing.h'
--- a/storage/ndb/memcache/include/timing.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/timing.h	2011-09-22 04:59:29 +0000
@@ -51,9 +51,9 @@ typedef int time_point_t;
 
 /* On platforms without gethrvtime(), define get_thread_time() to 0. */
 #ifdef HAVE_GETHRVTIME
-#define get_thread_vtime(X) gethrvtime(X)
+#define get_thread_vtime() gethrvtime()
 #else
-#define get_thread_vtime(X) 0
+#define get_thread_vtime() 0
 #endif
 
 

=== modified file 'storage/ndb/memcache/src/ClusterConnectionPool.cc'
--- a/storage/ndb/memcache/src/ClusterConnectionPool.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/ClusterConnectionPool.cc	2011-09-22 07:38:13 +0000
@@ -52,7 +52,7 @@ ClusterConnectionPool *get_connection_po
 }
 
 
-bool store_connection_pool_for_cluster(const char *name, 
+void store_connection_pool_for_cluster(const char *name, 
                                        const ClusterConnectionPool *p) {
   DEBUG_ENTER();
   if(name == 0) name = "[default]";

=== modified file 'storage/ndb/memcache/src/QueryPlan.cc'
--- a/storage/ndb/memcache/src/QueryPlan.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/QueryPlan.cc	2011-09-22 02:10:17 +0000
@@ -20,6 +20,7 @@
 #include <stdio.h>
 #include <assert.h>
 #include <stddef.h>
+#include <strings.h>
 
 #include "NdbApi.hpp"
 #include <memcached/extension_loggers.h>

=== modified file 'storage/ndb/memcache/src/TableSpec.cc'
--- a/storage/ndb/memcache/src/TableSpec.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/TableSpec.cc	2011-09-22 02:10:17 +0000
@@ -26,10 +26,37 @@
 #include "debug.h"
 #include "Record.h" 
 
-/* MAX_KEY_COLUMNS and MAX_VAL_COLUMNS are defined in Record.h */
+/* Reimplmentation of strsep() which is not always available */
+char * tokenize_list(char **stringloc, const char *delim) {
+  char *token_start = *stringloc;
+  char *s = *stringloc;
+
+  if (s == 0) return 0;
+
+  while(1) {
+    char c = *s++;
+    char sepc;
+    const char * sep_char = delim;
+    do { /* iterate over the possible delimiter characters */
+      sepc = *sep_char++;
+      if (sepc == c) {  /* found a delim.  modify the string in-place: */
+        if(c) {
+          s[-1] = 0;  /* place a null before the delim. */
+          *stringloc = s;  /* advance pointer to next span */
+        }
+        else { 
+          *stringloc = 0;  /* signal end of tokenizing */
+        }
+        return token_start;
+      }
+    } while (sepc != 0);
+  }
+}
+
 
 /* Parse a comma-separated string like "column1, column2".
-   Makes a copy of "list".
+   Makes a copy of "list".  
+   MAX_KEY_COLUMNS and MAX_VAL_COLUMNS are defined in Record.h 
 */
 int TableSpec::build_column_list(const char ** const &col_array, 
                                  const char *list) {
@@ -37,7 +64,7 @@ int TableSpec::build_column_list(const c
   if(list == 0) return 0;
   char *next = strdup(list);  
   while(next && n < (MAX_KEY_COLUMNS + MAX_VAL_COLUMNS)) {
-    char *item = strsep(& next, ", ");
+    char *item = tokenize_list(& next, ", ");
     if(*item != '\0')
     {
       col_array[n++] = item;

=== modified file 'storage/ndb/memcache/src/atomics.c'
--- a/storage/ndb/memcache/src/atomics.c	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/atomics.c	2011-09-22 04:59:29 +0000
@@ -31,7 +31,7 @@ int atomic_cmp_swap_int(ndbmc_atomic32_t
   return (stored_old == old);  
 }
 
-int atomic_cmp_swap_ptr(volatile void **loc, void *old, void *new) {
+int atomic_cmp_swap_ptr(void * volatile *loc, void *old, void *new) {
   void * stored_old;
   
   membar_enter();
@@ -44,13 +44,4 @@ int atomic_cmp_swap_ptr_nobarrier(volati
   return (atomic_cas_ptr(loc, old, new) == old);
 }
 
-Int32 atomic_add_int(ndbmc_atomic32_t *loc, Int32 amount) {
-  Int 32 nv;
-
-  membar_enter();
-  nv = atomic_add_32_nv(loc, amount);
-  membar_exit();
-  return nv;
-}
-
 #endif

=== modified file 'storage/ndb/memcache/unit/all_tests.h'
--- a/storage/ndb/memcache/unit/all_tests.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/unit/all_tests.h	2011-09-22 04:59:29 +0000
@@ -27,7 +27,7 @@
 #define require(x) if(!(x)) return __LINE__;
 #define pass return 0;
 
-#define detail(v, fmt, ...) if(v) printf (fmt, ## __VA_ARGS__)
+#define detail(v, ...) if(v) printf (__VA_ARGS__)
 #define RESULT getNdbError().code
 
 #define REQ_NONE           0

=== modified file 'storage/ndb/ndb_configure.cmake'
--- a/storage/ndb/ndb_configure.cmake	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/ndb_configure.cmake	2011-10-05 07:32:06 +0000
@@ -68,7 +68,7 @@ CHECK_FUNCTION_EXISTS(bzero HAVE_BZERO)
 
 CHECK_INCLUDE_FILES(sun_prefetch.h HAVE_SUN_PREFETCH_H)
 
-CHECK_CXX_SOURCE_COMPILES("
+CHECK_CXX_SOURCE_RUNS("
 unsigned A = 7;
 int main()
 {
@@ -187,6 +187,8 @@ ENDIF()
 
 CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/include/ndb_config.h.in
                ${CMAKE_CURRENT_BINARY_DIR}/include/ndb_config.h)
+# Exclude ndb_config.h from "make dist"
+LIST(APPEND CPACK_SOURCE_IGNORE_FILES include/ndb_config\\\\.h$)
 
 # Define HAVE_NDB_CONFIG_H to make ndb_global.h include the
 # generated ndb_config.h
@@ -206,6 +208,9 @@ IF(WITH_CLASSPATH)
   MESSAGE(STATUS "Using supplied classpath: ${WITH_CLASSPATH}")
 ELSE()
   SET(WITH_CLASSPATH "$ENV{CLASSPATH}")
+  IF(WIN32)
+    STRING(REPLACE "\\" "/" WITH_CLASSPATH "${WITH_CLASSPATH}")
+  ENDIF()
   IF(WITH_CLASSPATH)
     MESSAGE(STATUS "Using CLASSPATH from environment: ${WITH_CLASSPATH}")    
   ENDIF()

=== modified file 'storage/ndb/src/common/debugger/signaldata/ScanTab.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp	2011-10-03 08:02:28 +0000
@@ -78,9 +78,9 @@ printSCANTABCONF(FILE * output, const Ui
   size_t op_count= requestInfo & (~ScanTabConf::EndOfData);
   if (op_count)
   {
-    fprintf(output, " Operation(s) [api tc rows len]:\n");
     if (len == ScanTabConf::SignalLength + 4 * op_count)
     {
+      fprintf(output, " Operation(s) [api tc rows len]:\n");
       ScanTabConf::OpData * op = (ScanTabConf::OpData*)
         (theData + ScanTabConf::SignalLength);
       for(size_t i = 0; i<op_count; i++)
@@ -91,9 +91,9 @@ printSCANTABCONF(FILE * output, const Ui
         op++;
       }
     }
-    else
+    else if (len == ScanTabConf::SignalLength + 3 * op_count)
     {
-      assert(len == ScanTabConf::SignalLength + 3 * op_count);
+      fprintf(output, " Operation(s) [api tc rows len]:\n");      
       for(size_t i = 0; i<op_count; i++)
       {
         ScanTabConf::OpData * op = (ScanTabConf::OpData*)
@@ -104,6 +104,12 @@ printSCANTABCONF(FILE * output, const Ui
                 ScanTabConf::getLength(op->rows));
       }
     }
+    else
+    {
+      // ScanTabConf::OpData stored in section 0 of signal.
+      assert(len == ScanTabConf::SignalLength);
+      fprintf(output, " Long signal. Cannot print operations.");
+    }
     fprintf(output, "\n");
   }
   return false;

=== modified file 'storage/ndb/src/common/portlib/CMakeLists.txt'
--- a/storage/ndb/src/common/portlib/CMakeLists.txt	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/portlib/CMakeLists.txt	2011-09-28 10:46:30 +0000
@@ -27,7 +27,7 @@ ADD_CONVENIENCE_LIBRARY(ndbportlib
             NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp
             NdbMem.c NdbConfig.c NdbTick.c NdbDir.cpp
             ndb_daemon.cc ${EXTRA_SRC}
-            NdbNuma.cpp)
+            NdbNuma.cpp NdbMutex_DeadlockDetector.cpp)
 TARGET_LINK_LIBRARIES(ndbportlib mysys ${LIBSOCKET})
 
 ADD_EXECUTABLE(NdbDir-t

=== modified file 'storage/ndb/src/common/portlib/NdbCondition.c'
--- a/storage/ndb/src/common/portlib/NdbCondition.c	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/portlib/NdbCondition.c	2011-09-28 10:46:30 +0000
@@ -27,6 +27,10 @@ static int init = 0;
 static int clock_id = CLOCK_REALTIME;
 #endif
 
+#if defined NDB_MUTEX_STAT || defined NDB_MUTEX_DEADLOCK_DETECTOR
+#define NDB_MUTEX_STRUCT
+#endif
+
 void
 NdbCondition_initialize(int need_monotonic)
 {
@@ -129,7 +133,7 @@ NdbCondition_Wait(struct NdbCondition* p
   if (p_cond == NULL || p_mutex == NULL)
     return 1;
   
-#ifdef NDB_MUTEX_STAT
+#ifdef NDB_MUTEX_STRUCT
   result = pthread_cond_wait(&p_cond->cond, &p_mutex->mutex);
 #else
   result = pthread_cond_wait(&p_cond->cond, p_mutex);
@@ -187,17 +191,23 @@ NdbCondition_WaitTimeoutAbs(struct NdbCo
                             const struct timespec * abstime)
 {
 #ifdef NDB_WIN
+  /**
+   * mysys windows wrapper of pthread_cond_timedwait
+   *   does not have a const argument for the timespec
+   */
   struct timespec tmp = *abstime;
-  abstime = &tmp;
+  struct timespec * waitarg = &tmp;
+#else
+  const struct timespec * waitarg = abstime;
 #endif
 
   if (p_cond == NULL || p_mutex == NULL)
     return 1;
 
-#ifdef NDB_MUTEX_STAT
-  return pthread_cond_timedwait(&p_cond->cond, &p_mutex->mutex, abstime);
+#ifdef NDB_MUTEX_STRUCT
+  return pthread_cond_timedwait(&p_cond->cond, &p_mutex->mutex, waitarg);
 #else
-  return pthread_cond_timedwait(&p_cond->cond, p_mutex, abstime);
+  return pthread_cond_timedwait(&p_cond->cond, p_mutex, waitarg);
 #endif
 }
 

=== modified file 'storage/ndb/src/common/portlib/NdbMutex.c'
--- a/storage/ndb/src/common/portlib/NdbMutex.c	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/src/common/portlib/NdbMutex.c	2011-10-05 07:24:39 +0000
@@ -21,10 +21,34 @@
 #include <NdbMutex.h>
 #include <NdbMem.h>
 
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+#include "NdbMutex_DeadlockDetector.h"
+#endif
+
 #ifdef NDB_MUTEX_STAT
 static FILE * statout = 0;
 #endif
 
+#if defined NDB_MUTEX_STAT || defined NDB_MUTEX_DEADLOCK_DETECTOR
+#define NDB_MUTEX_STRUCT
+#endif
+
+void
+NdbMutex_SysInit()
+{
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  NdbMutex_DeadlockDetectorInit();
+#endif
+}
+
+void
+NdbMutex_SysEnd()
+{
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  NdbMutex_DeadlockDetectorEnd();
+#endif
+}
+
 NdbMutex* NdbMutex_Create()
 {
   return NdbMutex_CreateWithName(0);
@@ -59,12 +83,15 @@ int NdbMutex_InitWithName(NdbMutex* pNdb
   int result;
   pthread_mutex_t * p;
   DBUG_ENTER("NdbMutex_Init");
+  (void)name;
 
-#ifdef NDB_MUTEX_STAT
+#ifdef NDB_MUTEX_STRUCT
   bzero(pNdbMutex, sizeof(NdbMutex));
+  p = &pNdbMutex->mutex;
+
+#ifdef NDB_MUTEX_STAT
   pNdbMutex->min_lock_wait_time_ns = ~(Uint64)0;
   pNdbMutex->min_hold_time_ns = ~(Uint64)0;
-  p = &pNdbMutex->mutex;
   if (name == 0)
   {
     snprintf(pNdbMutex->name, sizeof(pNdbMutex->name), "%p",
@@ -79,9 +106,10 @@ int NdbMutex_InitWithName(NdbMutex* pNdb
   {
     statout = stdout;
   }
+#endif
+
 #else
   p = pNdbMutex;
-  (void)name;
 #endif
 
 #if defined(VM_TRACE) && \
@@ -99,6 +127,13 @@ int NdbMutex_InitWithName(NdbMutex* pNdb
 #else
   result = pthread_mutex_init(p, 0);
 #endif
+
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  if (result == 0)
+  {
+    ndb_mutex_created(pNdbMutex);
+  }
+#endif
   DBUG_RETURN(result);
 }
 
@@ -109,7 +144,11 @@ int NdbMutex_Destroy(NdbMutex* p_mutex)
   if (p_mutex == NULL)
     return -1;
 
-#ifdef NDB_MUTEX_STAT
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  ndb_mutex_destoyed(p_mutex);
+#endif
+
+#ifdef NDB_MUTEX_STRUCT
   result = pthread_mutex_destroy(&p_mutex->mutex);
 #else
   result = pthread_mutex_destroy(p_mutex);
@@ -201,11 +240,17 @@ int NdbMutex_Lock(NdbMutex* p_mutex)
     p_mutex->cnt_lock++;
     p_mutex->lock_start_time_ns = stop;
   }
+#elif defined NDB_MUTEX_STRUCT
+  result = pthread_mutex_lock(&p_mutex->mutex);
 #else
   result = pthread_mutex_lock(p_mutex);
 #endif
   assert(result == 0);
 
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  ndb_mutex_locked(p_mutex);
+#endif
+
   return result;
 }
 
@@ -234,11 +279,17 @@ int NdbMutex_Unlock(NdbMutex* p_mutex)
       dumpstat(p_mutex);
     }
   }
+#elif defined NDB_MUTEX_STRUCT
+  result = pthread_mutex_unlock(&p_mutex->mutex);
 #else
   result = pthread_mutex_unlock(p_mutex);
 #endif
   assert(result == 0);
 
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  ndb_mutex_unlocked(p_mutex);
+#endif
+
   return result;
 }
 
@@ -261,11 +312,21 @@ int NdbMutex_Trylock(NdbMutex* p_mutex)
   {
     __sync_fetch_and_add(&p_mutex->cnt_trylock_nok, 1);
   }
+#elif defined NDB_MUTEX_STRUCT
+  result = pthread_mutex_trylock(&p_mutex->mutex);
 #else
   result = pthread_mutex_trylock(p_mutex);
 #endif
   assert(result == 0 || result == EBUSY);
 
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  if (result == 0)
+  {
+    ndb_mutex_try_locked(p_mutex);
+  }
+#endif
+
+
   return result;
 }
 

=== added file 'storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.cpp'
--- a/storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.cpp	2011-09-27 17:28:13 +0000
@@ -0,0 +1,422 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <NdbMutex.h>
+#include <NdbThread.h>
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+
+#include "NdbMutex_DeadlockDetector.h"
+
+#define NDB_THREAD_TLS_SELF NDB_THREAD_TLS_MAX
+
+static NdbMutex g_mutex_no_mutex; // We need a mutex to assign numbers to mutexes...
+static nmdd_mask g_mutex_no_mask = { 0, 0 };
+
+static unsigned alloc_mutex_no();
+static void release_mutex_no(unsigned no);
+
+static NdbMutex* get_element(struct nmdd_mutex_array *, unsigned i);
+static void add_mutex_to_array(struct nmdd_mutex_array *, NdbMutex*);
+static void remove_mutex_from_array(struct nmdd_mutex_array *, NdbMutex*);
+
+static void set_bit(struct nmdd_mask *, unsigned no);
+static void clear_bit(struct nmdd_mask* , unsigned no);
+static bool check_bit(const struct nmdd_mask* , unsigned no);
+
+static void release(struct nmdd_mutex_array *);
+static void release(struct nmdd_mask*);
+
+extern "C"
+void
+NdbMutex_DeadlockDetectorInit()
+{
+  NdbMutex_Init(&g_mutex_no_mutex);
+}
+
+extern "C"
+void
+NdbMutex_DeadlockDetectorEnd()
+{
+  release(&g_mutex_no_mask);
+}
+
+extern "C"
+void
+ndb_mutex_created(NdbMutex* p)
+{
+  p->m_mutex_state = (ndb_mutex_state*)malloc(sizeof(ndb_mutex_state));
+  bzero(p->m_mutex_state, sizeof(ndb_mutex_state));
+
+  /**
+   * Assign mutex no
+   */
+  p->m_mutex_state->m_no = alloc_mutex_no();
+}
+
+extern "C"
+void
+ndb_mutex_destoyed(NdbMutex* p)
+{
+  unsigned no = p->m_mutex_state->m_no;
+
+  /**
+   * In order to be able to reuse mutex_no,
+   *   we need to clear this no from all mutexes that has it in before map...
+   *   this is all mutexes in after map
+   */
+  for (unsigned i = 0; i<p->m_mutex_state->m_locked_after_list.m_used; i++)
+  {
+    NdbMutex * m = get_element(&p->m_mutex_state->m_locked_after_list, i);
+    assert(check_bit(&p->m_mutex_state->m_locked_after_mask, m->m_mutex_state->m_no));
+
+    /**
+     * And we need to lock it while doing this
+     */
+    NdbMutex_Lock(m);
+    assert(check_bit(&m->m_mutex_state->m_locked_before_mask, no));
+    clear_bit(&m->m_mutex_state->m_locked_before_mask, no);
+    remove_mutex_from_array(&m->m_mutex_state->m_locked_before_list, p);
+    NdbMutex_Unlock(m);
+  }
+
+  /**
+   * And we need to remove ourselfs from after list of mutexes in out before list
+   */
+  for (unsigned i = 0; i<p->m_mutex_state->m_locked_before_list.m_used; i++)
+  {
+    NdbMutex * m = get_element(&p->m_mutex_state->m_locked_before_list, i);
+    NdbMutex_Lock(m);
+    assert(check_bit(&m->m_mutex_state->m_locked_after_mask, no));
+    clear_bit(&m->m_mutex_state->m_locked_after_mask, no);
+    remove_mutex_from_array(&m->m_mutex_state->m_locked_after_list, p);
+    NdbMutex_Unlock(m);
+  }
+
+  release(&p->m_mutex_state->m_locked_before_mask);
+  release(&p->m_mutex_state->m_locked_before_list);
+  release(&p->m_mutex_state->m_locked_after_mask);
+  release(&p->m_mutex_state->m_locked_after_list);
+  release_mutex_no(no);
+}
+
+static
+ndb_mutex_thr_state*
+get_thr()
+{
+  void * p = NdbThread_GetTlsKey(NDB_THREAD_TLS_SELF);
+  return (ndb_mutex_thr_state*)p;
+}
+
+#define INC_SIZE 16
+
+static
+void
+add_lock_to_thread(ndb_mutex_thr_state * t, NdbMutex * m)
+{
+  add_mutex_to_array(&t->m_mutexes_locked, m);
+}
+
+static
+void
+add_lock_to_mutex_before_list(ndb_mutex_state * m1, NdbMutex * m2)
+{
+  assert(m1 != m2->m_mutex_state);
+  unsigned no = m2->m_mutex_state->m_no;
+  if (!check_bit(&m1->m_locked_before_mask, no))
+  {
+    set_bit(&m1->m_locked_before_mask, no);
+    add_mutex_to_array(&m1->m_locked_before_list, m2);
+  }
+}
+
+static
+void
+add_lock_to_mutex_after_list(ndb_mutex_state * m1, NdbMutex* m2)
+{
+  assert(m1 != m2->m_mutex_state);
+  unsigned no = m2->m_mutex_state->m_no;
+  if (!check_bit(&m1->m_locked_after_mask, no))
+  {
+    set_bit(&m1->m_locked_after_mask, no);
+    add_mutex_to_array(&m1->m_locked_after_list, m2);
+  }
+}
+
+extern "C"
+void
+ndb_mutex_locked(NdbMutex* p)
+{
+  ndb_mutex_state * m = p->m_mutex_state;
+  ndb_mutex_thr_state * thr = get_thr();
+  if (thr == 0)
+  {
+    /**
+     * These are threads not started with NdbThread_Create(...)
+     *   e.g mysql-server threads...ignore these for now
+     */
+    return;
+  }
+
+  for (unsigned i = 0; i < thr->m_mutexes_locked.m_used; i++)
+  {
+    /**
+     * We want to lock m
+     * Check that none of the mutex we curreny have locked
+     *   have m in their *before* list
+     */
+    NdbMutex * h = get_element(&thr->m_mutexes_locked, i);
+    if (check_bit(&h->m_mutex_state->m_locked_before_mask, m->m_no))
+    {
+      abort();
+    }
+
+    /**
+     * Add h to m's list of before-locks
+     */
+    add_lock_to_mutex_before_list(m, h);
+
+    /**
+     * Add m to h's list of after locks
+     */
+    add_lock_to_mutex_after_list(h->m_mutex_state, p);
+  }
+
+  add_lock_to_thread(thr, p);
+}
+
+extern "C"
+void
+ndb_mutex_unlocked(NdbMutex* m)
+{
+  ndb_mutex_thr_state * thr = get_thr();
+  if (thr == 0)
+  {
+    /**
+     * These are threads not started with NdbThread_Create(...)
+     *   e.g mysql-server threads...ignore these for now
+     */
+    return;
+  }
+  unsigned pos = thr->m_mutexes_locked.m_used;
+  assert(pos > 0);
+  assert(get_element(&thr->m_mutexes_locked, pos-1) == m);
+  thr->m_mutexes_locked.m_used --;
+}
+
+extern "C"
+void
+ndb_mutex_try_locked(NdbMutex* p)
+{
+
+}
+
+extern "C"
+void
+ndb_mutex_thread_init(struct ndb_mutex_thr_state* p)
+{
+  bzero(p, sizeof(* p));
+  NdbThread_SetTlsKey(NDB_THREAD_TLS_SELF, p);
+}
+
+extern "C"
+void
+ndb_mutex_thread_exit()
+{
+  ndb_mutex_thr_state * thr = get_thr();
+  if (thr == 0)
+  {
+    /**
+     * These are threads not started with NdbThread_Create(...)
+     *   e.g mysql-server threads...ignore these for now
+     */
+    return;
+  }
+  release(&thr->m_mutexes_locked);
+}
+
+/**
+ * util
+ */
+static
+void
+set_bit(nmdd_mask * mask, unsigned no)
+{
+  unsigned byte_no = no / 8;
+  unsigned bit_no = no & 7;
+  if (byte_no >= mask->m_len)
+  {
+    unsigned new_len = mask->m_len + INC_SIZE;
+    if (byte_no >= new_len)
+    {
+      new_len = byte_no + 1;
+    }
+    unsigned char * new_arr = (unsigned char*)malloc(new_len);
+    bzero(new_arr, new_len);
+    if (mask->m_len != 0)
+    {
+      memcpy(new_arr, mask->m_mask, mask->m_len);
+      free(mask->m_mask);
+    }
+    mask->m_len = new_len;
+    mask->m_mask = new_arr;
+  }
+
+  mask->m_mask[byte_no] |= (1 << bit_no);
+}
+
+static
+void
+clear_bit(nmdd_mask * mask, unsigned no)
+{
+  unsigned byte_no = no / 8;
+  unsigned bit_no = no & 7;
+  if (byte_no >= mask->m_len)
+  {
+    return;
+  }
+
+  mask->m_mask[byte_no] &= ~(unsigned char)(1 << bit_no);
+}
+
+static
+bool
+check_bit(const nmdd_mask * mask, unsigned no)
+{
+  unsigned byte_no = no / 8;
+  unsigned bit_no = no & 7;
+  if (byte_no >= mask->m_len)
+  {
+    return false;
+  }
+
+  return (mask->m_mask[byte_no] & (1 << bit_no)) != 0;
+}
+
+static
+void
+release(nmdd_mask * mask)
+{
+  if (mask->m_len != 0)
+  {
+    free(mask->m_mask);
+  }
+}
+
+static
+NdbMutex*
+get_element(nmdd_mutex_array* arr, unsigned i)
+{
+  assert(i < arr->m_used);
+  return arr->m_array[i];
+}
+
+static
+void
+add_mutex_to_array(nmdd_mutex_array* arr, NdbMutex* m)
+{
+  unsigned pos = arr->m_used;
+  if (arr->m_used == arr->m_array_len)
+  {
+    unsigned new_len = arr->m_array_len + INC_SIZE;
+    NdbMutex** new_arr = (NdbMutex**)malloc(new_len * sizeof(NdbMutex*));
+    if (arr->m_array_len != 0)
+    {
+      memcpy(new_arr, arr->m_array, arr->m_array_len * sizeof(NdbMutex*));
+      free(arr->m_array);
+    }
+    arr->m_array = new_arr;
+    arr->m_array_len = new_len;
+  }
+  for (unsigned i = 0; i<arr->m_used; i++)
+    assert(arr->m_array[i] != m);
+
+  arr->m_array[pos] = m;
+  arr->m_used++;
+}
+
+static
+void
+remove_mutex_from_array(nmdd_mutex_array* arr, NdbMutex* m)
+{
+  for (unsigned i = 0; i < arr->m_used; i++)
+  {
+    unsigned idx = arr->m_used - i - 1;
+    if (arr->m_array[idx] == m)
+    {
+      memmove(arr->m_array+idx,
+              arr->m_array + idx + 1,
+              i * sizeof(NdbMutex*));
+      arr->m_used--;
+      return;
+    }
+  }
+  assert(false);
+}
+
+static
+void
+release(nmdd_mutex_array* arr)
+{
+  if (arr->m_array_len)
+  {
+    free(arr->m_array);
+  }
+}
+
+static
+unsigned
+ff(unsigned char b)
+{
+  for (unsigned i = 0; i<8; i++)
+    if ((b & (1 << i)) == 0)
+      return i;
+  assert(false);
+}
+
+static
+unsigned
+alloc_mutex_no()
+{
+  Guard g(&g_mutex_no_mutex);
+  unsigned no = 0;
+
+  for (unsigned i = 0; i < g_mutex_no_mask.m_len; i++)
+  {
+    if (g_mutex_no_mask.m_mask[i] != 255)
+    {
+      no = (8 * i) + ff(g_mutex_no_mask.m_mask[i]);
+      goto found;
+    }
+  }
+
+  no = 8 * g_mutex_no_mask.m_len;
+found:
+  set_bit(&g_mutex_no_mask, no);
+  assert(check_bit(&g_mutex_no_mask, no));
+  return no;
+}
+
+static
+void
+release_mutex_no(unsigned no)
+{
+  Guard g(&g_mutex_no_mutex);
+  assert(check_bit(&g_mutex_no_mask, no));
+  clear_bit(&g_mutex_no_mask, no);
+}
+
+#endif

=== added file 'storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.h'
--- a/storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.h	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/common/portlib/NdbMutex_DeadlockDetector.h	2011-09-27 17:28:13 +0000
@@ -0,0 +1,73 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_MUTEX_DEADLOCK_DETECTOR_H
+#define NDB_MUTEX_DEADLOCK_DETECTOR_H
+
+#include <NdbMutex.h>
+
+struct nmdd_mask
+{
+  unsigned char * m_mask;
+  unsigned m_len;
+};
+
+struct nmdd_mutex_array
+{
+  NdbMutex ** m_array;
+  unsigned m_used;
+  unsigned m_array_len;
+};
+
+struct ndb_mutex_state
+{
+  struct nmdd_mask m_locked_before_mask; /* mutexes held when locking this mutex */
+  struct nmdd_mutex_array m_locked_before_list; /* mutexes held when locking this mutex */
+
+  struct nmdd_mutex_array m_locked_after_list; /* mutexes locked when holding this mutex*/
+  struct nmdd_mask m_locked_after_mask;        /* mask (for quick check) */
+
+  unsigned m_no; /* my mutex "id" (for access in masks) */
+};
+
+struct ndb_mutex_thr_state
+{
+  struct nmdd_mutex_array m_mutexes_locked;
+};
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+  void NdbMutex_DeadlockDetectorInit();
+  void NdbMutex_DeadlockDetectorEnd();
+
+  void ndb_mutex_created(NdbMutex*);
+  void ndb_mutex_destoyed(NdbMutex*);
+  void ndb_mutex_locked(NdbMutex*);
+  void ndb_mutex_unlocked(NdbMutex*);
+  void ndb_mutex_try_locked(NdbMutex*);
+
+  void ndb_mutex_thread_init(struct ndb_mutex_thr_state*);
+  void ndb_mutex_thread_exit();
+
+#ifdef	__cplusplus
+}
+#endif
+
+
+#endif

=== modified file 'storage/ndb/src/common/portlib/NdbThread.c'
--- a/storage/ndb/src/common/portlib/NdbThread.c	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/portlib/NdbThread.c	2011-09-27 17:28:13 +0000
@@ -38,6 +38,10 @@
 #include <sys/procset.h>
 #endif
 
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+#include "NdbMutex_DeadlockDetector.h"
+#endif
+
 static int g_min_prio = 0;
 static int g_max_prio = 0;
 static int g_prio = 0;
@@ -65,6 +69,9 @@ struct NdbThread 
   char thread_name[16];
   NDB_THREAD_FUNC * func;
   void * object;
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  struct ndb_mutex_thr_state m_mutex_thr_state;
+#endif
 };
 
 #ifdef NDB_SHM_TRANSPORTER
@@ -141,6 +148,11 @@ ndb_thread_wrapper(void* _ss){
       void *ret;
       struct NdbThread * ss = (struct NdbThread *)_ss;
       settid(ss);
+
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+      ndb_mutex_thread_init(&ss->m_mutex_thr_state);
+#endif
+
       NdbMutex_Lock(g_ndb_thread_mutex);
       ss->inited = 1;
       NdbCondition_Signal(g_ndb_thread_condition);
@@ -154,6 +166,7 @@ ndb_thread_wrapper(void* _ss){
   }
 }
 
+static struct NdbThread* g_main_thread = 0;
 
 struct NdbThread*
 NdbThread_CreateObject(const char * name)
@@ -161,6 +174,15 @@ NdbThread_CreateObject(const char * name
   struct NdbThread* tmpThread;
   DBUG_ENTER("NdbThread_Create");
 
+  if (g_main_thread != 0)
+  {
+    if (name)
+    {
+      strnmov(g_main_thread->thread_name, name, sizeof(tmpThread->thread_name));
+    }
+    DBUG_RETURN(g_main_thread);
+  }
+
   tmpThread = (struct NdbThread*)NdbMem_Allocate(sizeof(struct NdbThread));
   if (tmpThread == NULL)
     DBUG_RETURN(NULL);
@@ -183,7 +205,12 @@ NdbThread_CreateObject(const char * name
   settid(tmpThread);
   tmpThread->inited = 1;
 
-  return tmpThread;
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  ndb_mutex_thread_init(&tmpThread->m_mutex_thr_state);
+#endif
+
+  g_main_thread = tmpThread;
+  DBUG_RETURN(tmpThread);
 }
 
 struct NdbThread*
@@ -239,7 +266,7 @@ NdbThread_Create(NDB_THREAD_FUNC *p_thre
   tmpThread->object= p_thread_arg;
 
   NdbMutex_Lock(g_ndb_thread_mutex);
-  result = pthread_create(&tmpThread->thread, 
+  result = pthread_create(&tmpThread->thread,
 			  &thread_attr,
   		          ndb_thread_wrapper,
   		          tmpThread);
@@ -250,7 +277,7 @@ NdbThread_Create(NDB_THREAD_FUNC *p_thre
   {
     NdbMem_Free((char *)tmpThread);
     NdbMutex_Unlock(g_ndb_thread_mutex);
-    return 0;
+    DBUG_RETURN(0);
   }
 
   if (thread_prio == NDB_THREAD_PRIO_HIGH && f_high_prio_set)
@@ -471,7 +498,11 @@ NdbThread_LockCPU(struct NdbThread* pThr
   return error_no;
 }
 
+#ifndef NDB_MUTEX_DEADLOCK_DETECTOR
 static pthread_key(void*, tls_keys[NDB_THREAD_TLS_MAX]);
+#else
+static pthread_key(void*, tls_keys[NDB_THREAD_TLS_MAX + 1]);
+#endif
 
 void *NdbThread_GetTlsKey(NDB_THREAD_TLS key)
 {
@@ -490,6 +521,10 @@ NdbThread_Init()
   g_ndb_thread_condition = NdbCondition_Create();
   pthread_key_create(&(tls_keys[NDB_THREAD_TLS_JAM]), NULL);
   pthread_key_create(&(tls_keys[NDB_THREAD_TLS_THREAD]), NULL);
+#ifdef NDB_MUTEX_DEADLOCK_DETECTOR
+  pthread_key_create(&(tls_keys[NDB_THREAD_TLS_MAX]), NULL);
+#endif
+  NdbThread_CreateObject(0);
   return 0;
 }
 

=== modified file 'storage/ndb/src/common/util/ndb_init.cpp'
--- a/storage/ndb/src/common/util/ndb_init.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/util/ndb_init.cpp	2011-09-27 17:28:13 +0000
@@ -32,6 +32,8 @@ int g_ndb_init_need_monotonic = 0;
 
 static int ndb_init_called = 0;
 
+extern "C" void NdbMutex_SysInit();
+extern "C" void NdbMutex_SysEnd();
 extern "C" void NdbCondition_initialize(int need_monotonic);
 extern "C" void NdbTick_Init(int need_monotonic);
 extern "C" int NdbThread_Init();
@@ -45,6 +47,7 @@ void
 ndb_init_internal()
 {
   NdbOut_Init();
+  NdbMutex_SysInit();
   if (!g_ndb_connection_mutex)
     g_ndb_connection_mutex = NdbMutex_Create();
   if (!g_eventLogger)
@@ -88,7 +91,6 @@ ndb_init()
 void
 ndb_end_internal()
 {
-  NdbThread_End();
   if (g_ndb_connection_mutex) 
   {
     NdbMutex_Destroy(g_ndb_connection_mutex);
@@ -96,6 +98,9 @@ ndb_end_internal()
   }
   if (g_eventLogger)
     destroy_event_logger(&g_eventLogger);
+
+  NdbThread_End();
+  NdbMutex_SysEnd();
 }
 
 void

=== modified file 'storage/ndb/src/common/util/ndbzio.c'
--- a/storage/ndb/src/common/util/ndbzio.c	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/util/ndbzio.c	2011-09-29 05:44:30 +0000
@@ -1070,6 +1070,6 @@ int ndbz_file_size(ndbzio_stream *s, siz
   if (my_fstat(s->file, &stat_buf, 0) != 0)
     return -1;
 
-  *size = stat_buf.st_size;
+  *size = (size_t)stat_buf.st_size;
   return 0; /* OK */
 }

=== modified file 'storage/ndb/src/kernel/SimBlockList.cpp'
--- a/storage/ndb/src/kernel/SimBlockList.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/kernel/SimBlockList.cpp	2011-10-03 08:02:28 +0000
@@ -49,6 +49,8 @@
 #include <BackupProxy.hpp>
 #include <RestoreProxy.hpp>
 #include <PgmanProxy.hpp>
+#include <DbtcProxy.hpp>
+#include <DbspjProxy.hpp>
 #include <mt.hpp>
 
 #ifndef VM_TRACE
@@ -128,7 +130,10 @@ SimBlockList::load(EmulatorData& data){
     theList[8]  = NEW_BLOCK(Dblqh)(ctx);
   else
     theList[8]  = NEW_BLOCK(DblqhProxy)(ctx);
-  theList[9]  = NEW_BLOCK(Dbtc)(ctx);
+  if (globalData.ndbMtTcThreads == 0)
+    theList[9]  = NEW_BLOCK(Dbtc)(ctx);
+  else
+    theList[9] = NEW_BLOCK(DbtcProxy)(ctx);
   if (!mtLqh)
     theList[10] = NEW_BLOCK(Dbtup)(ctx);
   else
@@ -151,18 +156,12 @@ SimBlockList::load(EmulatorData& data){
   else
     theList[18] = NEW_BLOCK(RestoreProxy)(ctx);
   theList[19] = NEW_BLOCK(Dbinfo)(ctx);
-  theList[20]  = NEW_BLOCK(Dbspj)(ctx);
+  if (globalData.ndbMtTcThreads == 0)
+    theList[20]  = NEW_BLOCK(Dbspj)(ctx);
+  else
+    theList[20]  = NEW_BLOCK(DbspjProxy)(ctx);
   assert(NO_OF_BLOCKS == 21);
 
-  if (globalData.isNdbMt) {
-    add_main_thr_map();
-    if (globalData.isNdbMtLqh) {
-      for (int i = 0; i < noOfBlocks; i++)
-        theList[i]->loadWorkers();
-    }
-    finalize_thr_map();
-  }
-
   // Check that all blocks could be created
   for (int i = 0; i < noOfBlocks; i++)
   {
@@ -172,6 +171,14 @@ SimBlockList::load(EmulatorData& data){
                 "Failed to create block", "");
     }
   }
+
+  if (globalData.isNdbMt)
+  {
+    add_main_thr_map();
+    for (int i = 0; i < noOfBlocks; i++)
+      theList[i]->loadWorkers();
+    finalize_thr_map();
+  }
 }
 
 void

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-05 07:24:39 +0000
@@ -442,6 +442,12 @@ void Dbdict::execDBINFO_SCANREQ(Signal *
         c_opRecordPool.getEntrySize(),
         c_opRecordPool.getUsedHi(),
         { 0,0,0,0 }},
+      { "Operation Data",
+        c_opSectionBufferPool.getUsed(),
+        c_opSectionBufferPool.getSize(),
+        c_opSectionBufferPool.getEntrySize(),
+        c_opSectionBufferPool.getUsedHi(),
+        { 0,0,0,0 }},
       { NULL, 0,0,0,0,{0,0,0,0}}
     };
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp	2011-09-29 05:47:22 +0000
@@ -244,7 +244,7 @@ NDB_COMMAND(printSchemafile, 
       exitcode = 1;
       continue;
     }
-    const Uint32 bytes = sbuf.st_size;
+    const Uint32 bytes = (Uint32)sbuf.st_size;
     
     Uint32 * buf = new Uint32[bytes/4+1];
     

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-03 12:14:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-05 07:24:39 +0000
@@ -9018,6 +9018,7 @@ void Dbdih::execDIGETNODESREQ(Signal* si
   TabRecordPtr tabPtr;
   tabPtr.i = req->tableId;
   Uint32 hashValue = req->hashValue;
+  Uint32 distr_key_indicator = req->distr_key_indicator;
   Uint32 ttabFileSize = ctabFileSize;
   Uint32 fragId, newFragId = RNIL;
   DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
@@ -9042,7 +9043,7 @@ loop:
    * of distribution algorithm in use, hashValue
    * IS fragment id.
    */
-  if (req->distr_key_indicator)
+  if (distr_key_indicator)
   {
     fragId = hashValue;
     if (unlikely(fragId >= tabPtr.p->totalfragments))

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-05 07:24:39 +0000
@@ -3865,7 +3865,8 @@ void Dblqh::sendLqhkeyconfTc(Signal* sig
   {
     lqhKeyConf->connectPtr = tcConnectptr.i;
     if (instance() == refToInstance(atcBlockref) &&
-        (Thostptr.i == 0 || Thostptr.i == getOwnNodeId()))
+        (Thostptr.i == 0 || Thostptr.i == getOwnNodeId()) &&
+        globalData.ndbMtTcThreads == 0)
     {
       /**
        * This EXECUTE_DIRECT is multi-thread safe, as we only get here
@@ -11347,8 +11348,8 @@ void Dblqh::scanTupkeyRefLab(Signal* sig
     scanReleaseLocksLab(signal);
     return;
   }//if
-  Uint32 time_passed= tcConnectptr.p->tcTimer - cLqhTimeOutCount;
-  if (rows) 
+  Uint32 time_passed= cLqhTimeOutCount - tcConnectptr.p->tcTimer;
+  if (rows)
   {
     if (time_passed > 1) 
     {

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-10-03 08:02:28 +0000
@@ -580,6 +580,8 @@ public:
     Uint32 m_fragCount;
     // The number of fragments that we scan in parallel.
     Uint32 m_parallelism;
+    // True if we are still receiving the first batch for this operation.
+    bool   m_firstBatch;
     /**
      * True if this is the first instantiation of this operation. A child
      * operation will be instantiated once for each batch of its parent.
@@ -1229,7 +1231,6 @@ private:
   void scanIndex_execSCAN_FRAGCONF(Signal*, Ptr<Request>, Ptr<TreeNode>, Ptr<ScanFragHandle>);
   void scanIndex_parent_row(Signal*,Ptr<Request>,Ptr<TreeNode>, const RowPtr&);
   void scanIndex_fixupBound(Ptr<ScanFragHandle> fragPtr, Uint32 ptrI, Uint32);
-  void scanIndex_send(Signal*,Ptr<Request>,Ptr<TreeNode>);
   void scanIndex_send(Signal* signal,
                       Ptr<Request> requestPtr,
                       Ptr<TreeNode> treeNodePtr,

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-16 14:44:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-03 08:02:28 +0000
@@ -280,16 +280,15 @@ Dbspj::execAPI_FAILREQ(Signal* signal)
 {
   jamEntry();
   Uint32 failedApiNode = signal->theData[0];
-  ndbrequire(signal->theData[1] == QMGR_REF); // As callback hard-codes QMGR
+  Uint32 ref = signal->theData[1];
 
   /**
    * We only need to care about lookups
    *   as SCAN's are aborted by DBTC
    */
-
   signal->theData[0] = failedApiNode;
   signal->theData[1] = reference();
-  sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
+  sendSignal(ref, GSN_API_FAILCONF, signal, 2, JBB);
 }
 
 void
@@ -5024,6 +5023,7 @@ Dbspj::scanIndex_parent_batch_complete(S
   const ScanFragReq * org = (const ScanFragReq*)data.m_scanFragReq;
   ndbrequire(org->batch_size_rows > 0);
 
+  data.m_firstBatch = true;
   if (treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL)
   {
     jam();
@@ -5172,6 +5172,9 @@ Dbspj::scanIndex_send(Signal* signal,
                       Uint32 bs_rows,
                       Uint32& batchRange)
 {
+  jam();
+  ndbassert(bs_bytes > 0);
+  ndbassert(bs_rows > 0);
   /**
    * if (m_bits & prunemask):
    * - Range keys sliced out to each ScanFragHandle
@@ -5452,6 +5455,9 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
 
   if (data.m_frags_outstanding == 0)
   {
+    const bool isFirstBatch = data.m_firstBatch;
+    data.m_firstBatch = false;
+
     const ScanFragReq * const org
       = reinterpret_cast<const ScanFragReq*>(data.m_scanFragReq);
 
@@ -5487,24 +5493,78 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
     {
       jam();
       ndbrequire((requestPtr.p->m_state & Request::RS_ABORTING) != 0);
+      checkBatchComplete(signal, requestPtr, 1);
+      return;
     }
-    else if (! (data.m_rows_received == data.m_rows_expecting))
+
+    if (isFirstBatch && data.m_frags_not_started > 0)
+    {
+      /**
+       * Check if we can expect to be able to fetch the entire result set by
+       * asking for more fragments within the same batch. This may improve 
+       * performance for bushy scans, as subsequent bushy branches must be
+       * re-executed for each batch of this scan.
+       */
+      
+      /**
+       * Find the maximal correlation value that we may have seen so far.
+       * Correlation value must be unique within batch and smaller than 
+       * org->batch_size_rows.
+       */
+      const Uint32 maxCorrVal = (data.m_totalRows) == 0 ? 0 :
+        org->batch_size_rows / data.m_parallelism * (data.m_parallelism - 1)
+        + data.m_totalRows;
+      
+      // Number of rows that we can still fetch in this batch.
+      const Int32 remainingRows 
+        = static_cast<Int32>(org->batch_size_rows - maxCorrVal);
+      
+      if (remainingRows >= data.m_frags_not_started &&
+          /**
+           * Check that (remaning row capacity)/(remaining fragments) is 
+           * greater or equal to (rows read so far)/(finished fragments).
+           */
+          remainingRows * static_cast<Int32>(data.m_parallelism) >=
+          static_cast<Int32>(data.m_totalRows * data.m_frags_not_started) &&
+          (org->batch_size_bytes - data.m_totalBytes) * data.m_parallelism >=
+          data.m_totalBytes * data.m_frags_not_started)
+      {
+        jam();
+        Uint32 batchRange = maxCorrVal;
+        DEBUG("::scanIndex_execSCAN_FRAGCONF() first batch was not full."
+              " Asking for new batches from " << data.m_frags_not_started <<
+              " fragments with " << 
+              remainingRows / data.m_frags_not_started 
+              <<" rows and " << 
+              (org->batch_size_bytes - data.m_totalBytes)
+              / data.m_frags_not_started 
+              << " bytes.");
+        scanIndex_send(signal,
+                       requestPtr,
+                       treeNodePtr,
+                       data.m_frags_not_started,
+                       (org->batch_size_bytes - data.m_totalBytes)
+                       / data.m_frags_not_started,
+                       remainingRows / data.m_frags_not_started,
+                       batchRange);
+        return;
+      }
+    }
+    
+    if (data.m_rows_received != data.m_rows_expecting)
     {
       jam();
       return;
     }
-    else
+    
+    if (treeNodePtr.p->m_bits & TreeNode::T_REPORT_BATCH_COMPLETE)
     {
-      if (treeNodePtr.p->m_bits & TreeNode::T_REPORT_BATCH_COMPLETE)
-      {
-        jam();
-        reportBatchComplete(signal, requestPtr, treeNodePtr);
-      }
+      jam();
+      reportBatchComplete(signal, requestPtr, treeNodePtr);
     }
 
     checkBatchComplete(signal, requestPtr, 1);
-    return;
-  }
+  } // if (data.m_frags_outstanding == 0)
 }
 
 void

=== modified file 'storage/ndb/src/kernel/ndbd.cpp'
--- a/storage/ndb/src/kernel/ndbd.cpp	2011-09-08 11:49:24 +0000
+++ b/storage/ndb/src/kernel/ndbd.cpp	2011-09-23 09:13:22 +0000
@@ -310,7 +310,9 @@ get_multithreaded_config(EmulatorData& e
     return 0;
 
   ndbout << "NDBMT: workers=" << globalData.ndbMtLqhWorkers
-         << " threads=" << globalData.ndbMtLqhThreads << endl;
+         << " threads=" << globalData.ndbMtLqhThreads
+         << " tc=" << globalData.ndbMtTcThreads
+         << endl;
 
   return 0;
 }

=== modified file 'storage/ndb/src/kernel/vm/ArrayPool.hpp'
--- a/storage/ndb/src/kernel/vm/ArrayPool.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/vm/ArrayPool.hpp	2011-09-28 10:46:30 +0000
@@ -1385,7 +1385,7 @@ UnsafeArrayPool<T>::getPtrForce(ConstPtr
 template <class T>
 class SafeArrayPool : public ArrayPool<T> {
 public:
-  SafeArrayPool(NdbMutex* mutex = 0);
+  SafeArrayPool();
   ~SafeArrayPool();
   int lock();
   int unlock();
@@ -1393,6 +1393,8 @@ public:
   void release(Uint32 i);
   void release(Ptr<T>&);
 
+  void setMutex(NdbMutex* mutex = 0);
+
 private:
   NdbMutex* m_mutex;
   bool m_mutex_owner;
@@ -1403,7 +1405,16 @@ private:
 
 template <class T>
 inline
-SafeArrayPool<T>::SafeArrayPool(NdbMutex* mutex)
+SafeArrayPool<T>::SafeArrayPool()
+{
+  m_mutex = 0;
+  m_mutex_owner = false;
+}
+
+template <class T>
+inline
+void
+SafeArrayPool<T>::setMutex(NdbMutex* mutex)
 {
   if (mutex != 0) {
     m_mutex = mutex;

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2011-09-14 13:49:19 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2011-09-26 08:03:20 +0000
@@ -436,18 +436,21 @@ Configuration::setupConfiguration(){
                 m_thr_config.getErrorMessage());
     }
   }
-  if (thrconfigstring)
+  if (NdbIsMultiThreaded())
   {
-    ndbout_c("ThreadConfig: input: %s LockExecuteThreadToCPU: %s => parsed: %s",
-             thrconfigstring,
-             lockmask ? lockmask : "",
-             m_thr_config.getConfigString());
-  }
-  else
-  {
-    ndbout_c("ThreadConfig (old ndb_mgmd) LockExecuteThreadToCPU: %s => parsed: %s",
-             lockmask ? lockmask : "",
-             m_thr_config.getConfigString());
+    if (thrconfigstring)
+    {
+      ndbout_c("ThreadConfig: input: %s LockExecuteThreadToCPU: %s => parsed: %s",
+               thrconfigstring,
+               lockmask ? lockmask : "",
+               m_thr_config.getConfigString());
+    }
+    else
+    {
+      ndbout_c("ThreadConfig (old ndb_mgmd) LockExecuteThreadToCPU: %s => parsed: %s",
+               lockmask ? lockmask : "",
+               m_thr_config.getConfigString());
+    }
   }
 
   ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config);
@@ -466,6 +469,7 @@ Configuration::setupConfiguration(){
     if (!globalData.isNdbMt)
       break;
 
+    globalData.ndbMtTcThreads = m_thr_config.getThreadCount(THRConfig::T_TC);
     globalData.isNdbMtLqh = true;
     {
       if (m_thr_config.getMtClassic())

=== modified file 'storage/ndb/src/kernel/vm/Emulator.cpp'
--- a/storage/ndb/src/kernel/vm/Emulator.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/vm/Emulator.cpp	2011-09-27 06:44:06 +0000
@@ -89,6 +89,7 @@ EmulatorData::create(){
   theSimBlockList  = new SimBlockList();
   m_socket_server  = new SocketServer();
   m_mem_manager    = new Ndbd_mem_manager();
+  globalData.m_global_page_pool.setMutex();
 
   if (theConfiguration == NULL ||
       theWatchDog == NULL ||

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.cpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2011-09-04 08:52:42 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2011-09-23 09:13:22 +0000
@@ -32,7 +32,8 @@ static const struct THRConfig::Entries m
   { "ldm",   THRConfig::T_LDM,   1, MAX_NDBMT_LQH_THREADS },
   { "recv",  THRConfig::T_RECV,  1, 1 },
   { "rep",   THRConfig::T_REP,   1, 1 },
-  { "io",    THRConfig::T_IO,    1, 1 }
+  { "io",    THRConfig::T_IO,    1, 1 },
+  { "tc",    THRConfig::T_TC,    0, MAX_NDBMT_TC_THREADS }
 };
 
 static const struct THRConfig::Param m_params[] =
@@ -140,6 +141,7 @@ THRConfig::do_parse(unsigned MaxNoOfExec
     return do_bindings();
   }
 
+  Uint32 tcthreads = 0;
   Uint32 lqhthreads = 0;
   switch(MaxNoOfExecutionThreads){
   case 0:
@@ -171,6 +173,11 @@ THRConfig::do_parse(unsigned MaxNoOfExec
     add(T_LDM);
   }
 
+  for(Uint32 i = 0; i < tcthreads; i++)
+  {
+    add(T_TC);
+  }
+
   return do_bindings() || do_validate();
 }
 
@@ -283,6 +290,13 @@ THRConfig::do_bindings()
                         "LockExecuteThreadToCPU. Only %u specified "
                         " but %u was needed, this may cause contention.\n",
                         cnt, num_threads);
+
+      if (count_unbound(m_threads[T_TC]))
+      {
+        m_err_msg.assfmt("Too CPU specifed with LockExecuteThreadToCPU. "
+                         "This is not supported when using multiple TC threads");
+        return -1;
+      }
     }
 
     if (cnt >= num_threads)
@@ -867,7 +881,13 @@ THRConfig::do_parse(const char * ThreadC
       add((T_Type)i);
   }
 
-  return do_bindings() || do_validate();
+  int res = do_bindings();
+  if (res != 0)
+  {
+    return res;
+  }
+
+  return do_validate();
 }
 
 unsigned
@@ -916,6 +936,10 @@ THRConfigApplier::find_thread(const unsi
   {
     return &m_threads[T_MAIN][instanceNo];
   }
+  else if ((instanceNo = findBlock(DBTC, instancelist, cnt)) >= 0)
+  {
+    return &m_threads[T_TC][instanceNo - 1]; // remove proxy
+  }
   else if ((instanceNo = findBlock(DBLQH, instancelist, cnt)) >= 0)
   {
     return &m_threads[T_LDM][instanceNo - 1]; // remove proxy...
@@ -1014,6 +1038,8 @@ TAPTEST(mt_thr_config)
         "ldm={count=3,cpubind=1-2,5 },  ldm",
         "ldm={cpuset=1-3,count=3 },ldm",
         "main,ldm={},ldm",
+        "main,ldm={},ldm,tc",
+        "main,ldm={},ldm,tc,tc",
         0
       };
 
@@ -1026,6 +1052,7 @@ TAPTEST(mt_thr_config)
         "main={ keso=88, count=23},ldm,ldm",
         "main={ cpuset=1-3 }, ldm={cpuset=3-4}",
         "main={ cpuset=1-3 }, ldm={cpubind=2}",
+        "tc,tc,tc",
         0
       };
 
@@ -1065,45 +1092,71 @@ TAPTEST(mt_thr_config)
       /** threads, LockExecuteThreadToCPU, answer */
       "1-8",
       "ldm={count=4}",
+      "OK",
       "main={cpubind=1},ldm={cpubind=2},ldm={cpubind=3},ldm={cpubind=4},ldm={cpubind=5},recv={cpubind=6},rep={cpubind=7}",
 
       "1-5",
       "ldm={count=4}",
+      "OK",
       "main={cpubind=5},ldm={cpubind=1},ldm={cpubind=2},ldm={cpubind=3},ldm={cpubind=4},recv={cpubind=5},rep={cpubind=5}",
 
       "1-3",
       "ldm={count=4}",
+      "OK",
       "main={cpubind=1},ldm={cpubind=2},ldm={cpubind=3},ldm={cpubind=2},ldm={cpubind=3},recv={cpubind=1},rep={cpubind=1}",
 
       "1-4",
       "ldm={count=4}",
+      "OK",
       "main={cpubind=1},ldm={cpubind=2},ldm={cpubind=3},ldm={cpubind=4},ldm={cpubind=2},recv={cpubind=1},rep={cpubind=1}",
 
       "1-8",
       "ldm={count=4},io={cpubind=8}",
+      "OK",
       "main={cpubind=1},ldm={cpubind=2},ldm={cpubind=3},ldm={cpubind=4},ldm={cpubind=5},recv={cpubind=6},rep={cpubind=7},io={cpubind=8}",
 
       "1-8",
       "ldm={count=4,cpubind=1,4,5,6}",
+      "OK",
       "main={cpubind=2},ldm={cpubind=1},ldm={cpubind=4},ldm={cpubind=5},ldm={cpubind=6},recv={cpubind=3},rep={cpubind=7}",
 
+      "1-9",
+      "ldm={count=4,cpubind=1,4,5,6},tc,tc",
+      "OK",
+      "main={cpubind=2},ldm={cpubind=1},ldm={cpubind=4},ldm={cpubind=5},ldm={cpubind=6},recv={cpubind=3},rep={cpubind=7},tc={cpubind=8},tc={cpubind=9}",
+
+      "1-8",
+      "ldm={count=4,cpubind=1,4,5,6},tc",
+      "OK",
+      "main={cpubind=2},ldm={cpubind=1},ldm={cpubind=4},ldm={cpubind=5},ldm={cpubind=6},recv={cpubind=3},rep={cpubind=7},tc={cpubind=8}",
+
+      "1-8",
+      "ldm={count=4,cpubind=1,4,5,6},tc,tc",
+      "FAIL",
+      "Too CPU specifed with LockExecuteThreadToCPU. This is not supported when using multiple TC threads",
+
       // END
       0
     };
 
-    for (unsigned i = 0; t[i]; i+= 3)
+    for (unsigned i = 0; t[i]; i+= 4)
     {
       THRConfig tmp;
       tmp.setLockExecuteThreadToCPU(t[i+0]);
-      int res = tmp.do_parse(t[i+1]);
-      int ok = strcmp(tmp.getConfigString(), t[i+2]) == 0;
+      const int _res = tmp.do_parse(t[i+1]);
+      const int expect_res = strcmp(t[i+2], "OK") == 0 ? 0 : -1;
+      const int res = _res == expect_res ? 0 : -1;
+      int ok = expect_res == 0 ?
+        strcmp(tmp.getConfigString(), t[i+3]) == 0:
+        strcmp(tmp.getErrorMessage(), t[i+3]) == 0;
       printf("mask: %s conf: %s => %s(%s) - %s - %s\n",
              t[i+0],
              t[i+1],
-             res == 0 ? "OK" : "FAIL",
-             res == 0 ? "" : tmp.getErrorMessage(),
+             _res == 0 ? "OK" : "FAIL",
+             _res == 0 ? "" : tmp.getErrorMessage(),
              tmp.getConfigString(),
              ok == 1 ? "CORRECT" : "INCORRECT");
+
       OK(res == 0);
       OK(ok == 1);
     }

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.hpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.hpp	2011-09-02 17:24:52 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.hpp	2011-09-23 09:13:22 +0000
@@ -43,8 +43,9 @@ public:
     T_RECV  = 2, /* CMVMI */
     T_REP   = 3, /* SUMA */
     T_IO    = 4, /* FS, SocketServer etc */
+    T_TC    = 5, /* TC+SPJ */
 
-    T_END  = 5
+    T_END  = 6
   };
 
   THRConfig();

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-09-19 19:55:58 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-09-28 10:18:35 +0000
@@ -528,8 +528,6 @@ MgmtSrvr::start()
 {
   DBUG_ENTER("MgmtSrvr::start");
 
-  Guard g(m_local_config_mutex);
-
   /* Start transporter */
   if(!start_transporter(m_local_config))
   {
@@ -3451,15 +3449,17 @@ MgmtSrvr::try_alloc_from_list(NodeId& no
   for (unsigned i = 0; i < nodes.size(); i++)
   {
     const unsigned id= nodes[i].id;
-    if (m_reserved_nodes.get(id))
+    if (theFacade->ext_isConnected(id))
     {
-      // Node is already reserved(locally in this node)
+      // Node is already reserved(connected via transporter)
       continue;
     }
 
-    if (theFacade->ext_isConnected(id))
+    NdbMutex_Lock(m_reserved_nodes_mutex);
+    if (m_reserved_nodes.get(id))
     {
-      // Node is already reserved(connected via transporter)
+      // Node is already reserved(locally in this node)
+      NdbMutex_Unlock(m_reserved_nodes_mutex);
       continue;
     }
 
@@ -3483,16 +3483,14 @@ MgmtSrvr::try_alloc_from_list(NodeId& no
           more than one thread asked for same nodeid) since it's
           now reserved in data node
         */
-        m_reserved_nodes.clear(id);
+        release_local_nodeid_reservation(id);
       }
 
-      NdbMutex_Lock(m_reserved_nodes_mutex);
       return true;
     }
 
     /* Release the local reservation */
-    m_reserved_nodes.clear(id);
-    NdbMutex_Lock(m_reserved_nodes_mutex);
+    release_local_nodeid_reservation(id);
 
     if (res < 0)
     {
@@ -3601,8 +3599,6 @@ MgmtSrvr::alloc_node_id_impl(NodeId& nod
     return false;
   }
 
-  Guard g(m_reserved_nodes_mutex);
-
   /* Check timeout of nodeid reservations for NDB */
   if (type == NDB_MGM_NODE_TYPE_NDB)
   {
@@ -3610,8 +3606,11 @@ MgmtSrvr::alloc_node_id_impl(NodeId& nod
     for (unsigned i = 0; i < nodes.size(); i++)
     {
       const NodeId ndb_nodeid = nodes[i].id;
-      if (!m_reserved_nodes.has_timedout(ndb_nodeid, now))
-        continue;
+      {
+        Guard g(m_reserved_nodes_mutex);
+        if (!m_reserved_nodes.has_timedout(ndb_nodeid, now))
+          continue;
+      }
 
       // Found a timedout reservation
       if (theFacade->ext_isConnected(ndb_nodeid))
@@ -3621,7 +3620,7 @@ MgmtSrvr::alloc_node_id_impl(NodeId& nod
                              "releasing it", ndb_nodeid);
 
       // Clear the reservation
-      m_reserved_nodes.clear(ndb_nodeid);
+      release_local_nodeid_reservation(ndb_nodeid);
     }
   }
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-14 13:56:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-03 08:02:28 +0000
@@ -343,7 +343,8 @@ NdbQueryDef::destroy() const
 void
 NdbQueryDef::print() const
 {
-  m_impl.getQueryOperation(0U).printTree(0, Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32>());
+  m_impl.getQueryOperation(0U)
+    .printTree(0, NdbQueryOperationDefImpl::SiblingMask());
 }
 
 /*************************************************************************
@@ -1188,7 +1189,8 @@ NdbQueryBuilderImpl::prepare()
   if (doPrintQueryTree)
   {
     ndbout << "Query tree:" << endl;
-    def->getQueryOperation(0U).printTree(0, Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32>());
+    def->getQueryOperation(0U)
+      .printTree(0, NdbQueryOperationDefImpl::SiblingMask());
   }
 
   return def;
@@ -2159,7 +2161,8 @@ NdbQueryOperationDefImpl::appendChildPro
  * that connect the tree nodes.
  */
 static void printMargin(Uint32 depth, 
-                        Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> hasMoreSiblingsMask, 
+                        NdbQueryOperationDefImpl::SiblingMask 
+                        hasMoreSiblingsMask, 
                         bool header)
 {
   if (depth > 0)
@@ -2193,11 +2196,10 @@ static void printMargin(Uint32 depth, 
 
 void 
 NdbQueryOperationDefImpl::printTree(Uint32 depth, 
-                                    Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> 
-                                    hasMoreSiblingsMask) const
+                                    SiblingMask hasMoreSiblingsMask) const
 {
   // Print vertical line leading down to this node.
-  Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> firstLineMask = hasMoreSiblingsMask;
+  SiblingMask firstLineMask = hasMoreSiblingsMask;
   firstLineMask.set(depth);
   printMargin(depth, firstLineMask, false);
   ndbout << endl;
@@ -2214,22 +2216,24 @@ NdbQueryOperationDefImpl::printTree(Uint
     printMargin(depth, hasMoreSiblingsMask, false);
     ndbout << " index: " << getIndex()->getName() << endl; 
   }
-  /* For each child but the last one, use a mask with an extra bit set to
-   * indicate that there are more siblings.
-   */
-  hasMoreSiblingsMask.set(depth+1);
+
   for (int childNo = 0; 
-       childNo < static_cast<int>(getNoOfChildOperations()) - 1; 
+       childNo < static_cast<int>(getNoOfChildOperations()); 
        childNo++)
   {
-    getChildOperation(childNo).printTree(depth+1, hasMoreSiblingsMask);
-  }
-  if (getNoOfChildOperations() > 0)
-  {
-    // The last child has no more siblings.
-    hasMoreSiblingsMask.clear(depth+1);
-    getChildOperation(getNoOfChildOperations() - 1)
-      .printTree(depth+1, hasMoreSiblingsMask);
+    if (childNo == 0)
+    {
+      /* For each child but the last one, use a mask with an extra bit set to
+       * indicate that there are more siblings.
+       */
+      hasMoreSiblingsMask.set(depth+1);
+    }
+    if (childNo == static_cast<int>(getNoOfChildOperations()) - 1)
+    {
+      // The last child has no more siblings.
+      hasMoreSiblingsMask.clear(depth+1);
+    }
+    getChildOperation(childNo).printTree(depth+1, hasMoreSiblingsMask); 
   }
 } // NdbQueryOperationDefImpl::printTree()
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-14 13:56:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-03 08:02:28 +0000
@@ -429,6 +429,12 @@ public:
   // Get type of query operation
   virtual NdbQueryOperationDef::Type getType() const = 0;
 
+  /**
+   * Used for telling if parent at depth n has more siblings. (In that case
+   * we need to draw a horisontal line leading to that sibling.)
+   */
+  typedef Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> SiblingMask;
+
   /** Print query tree graph to trace file (using recursion).
    * @param depth Number of ancestor nodes that this node has.
    * @param hasMoreSiblingsMask The n'th bit should be set if the n'th ancestor
@@ -436,7 +442,7 @@ public:
    */
   void printTree(
            Uint32 depth, 
-           Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> hasMoreSiblingsMask) const;
+           SiblingMask hasMoreSiblingsMask) const;
 
 protected:
   // QueryTree building:

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-09-14 13:56:17 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-10-03 08:02:28 +0000
@@ -187,7 +187,7 @@ ErrorBundle ErrorCodes[] = {
   { 805,  DMEC, TR, "Out of attrinfo records in tuple manager" },
   { 830,  DMEC, TR, "Out of add fragment operation records" },
   { 873,  DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
-  { 899,  DMEC, IE, "Internal error: rowid already allocated" },
+  { 899,  DMEC, TR, "Rowid already allocated" },
   { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
   { 1218, DMEC, TR, "Send Buffers overloaded in NDB kernel" },
   { 1220, DMEC, TR, "REDO log files overloaded (increase FragmentLogFileSize)" },

=== modified file 'storage/ndb/src/ndbjtie/test/test/MySqlUtilsCharsetMapTest.java'
--- a/storage/ndb/src/ndbjtie/test/test/MySqlUtilsCharsetMapTest.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/src/ndbjtie/test/test/MySqlUtilsCharsetMapTest.java	2011-09-30 15:21:28 +0000
@@ -118,7 +118,7 @@ public class MySqlUtilsCharsetMapTest ex
         out.println("  <-- Test CharsetMap::getName()");
         
         /* Now we're going to recode. 
-         We test with the string "ülker", which begins with the character
+         We test with a string that begins with the character
          LATIN SMALL LETTER U WITH DIARESIS - unicode code point U+00FC.
          In the latin1 encoding this is a literal 0xFC,
          but in the UTF-8 representation it is 0xC3 0xBC.

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-09-19 20:03:43 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-10-03 07:22:29 +0000
@@ -4580,8 +4580,17 @@ public:
         m_id(id), m_res(res) {
       m_res.lock(m_id);
     }
-    ~Reserve(){
+
+    void unlock() {
       m_res.unlock(m_id);
+      m_id = 0;
+    }
+
+    ~Reserve(){
+      if (m_id)
+      {
+        m_res.unlock(m_id);
+      }
     }
   };
 };
@@ -4644,6 +4653,8 @@ int runNdbClusterConnect(NDBT_Context* c
 {
   const Uint32 api_nodes = ctx->getProperty("API_NODES");
   const Uint32 step_no = step->getStepNo();
+  const Uint32 timeout_after_first_alive = ctx->getProperty("TimeoutAfterFirst",
+                                                            30);
   if (step_no > api_nodes)
   {
     // Don't run with more threads than API node slots
@@ -4652,8 +4663,8 @@ int runNdbClusterConnect(NDBT_Context* c
 
   // Get connectstring from main connection
   char constr[256];
-  if(!ctx->m_cluster_connection.get_connectstring(constr,
-                                                  sizeof(constr)))
+  if (!ctx->m_cluster_connection.get_connectstring(constr,
+                                                   sizeof(constr)))
   {
     g_err << "Too short buffer for connectstring" << endl;
     return NDBT_FAILED;
@@ -4661,9 +4672,17 @@ int runNdbClusterConnect(NDBT_Context* c
 
   Uint32 l = 0;
   const Uint32 loops = ctx->getNumLoops();
-  while (l < loops)
+  while (l < loops && !ctx->isTestStopped())
   {
     g_info << "loop: " << l << endl;
+    if (ctx->getProperty("WAIT") > 0)
+    {
+      ndbout_c("thread %u waiting", step_no);
+      ctx->incProperty("WAITING");
+      while (ctx->getProperty("WAIT") > 0 && !ctx->isTestStopped())
+        NdbSleep_MilliSleep(10);
+      ndbout_c("thread %u waiting complete", step_no);
+    }
     Ndb_cluster_connection con(constr);
 
     const int retries = 12;
@@ -4679,11 +4698,12 @@ int runNdbClusterConnect(NDBT_Context* c
     NodeIdReservations::Reserve res(g_reservations, con.node_id());
 
     const int timeout = 30;
-    const int timeout_after_first_alive = 30;
-    if (con.wait_until_ready(timeout, timeout_after_first_alive) != 0)
+    int ret = con.wait_until_ready(timeout, timeout_after_first_alive);
+    if (! (ret == 0 || (timeout_after_first_alive == 0 && ret > 0)))
     {
       g_err << "Cluster connection was not ready, nodeid: "
             << con.node_id() << endl;
+      abort();
       return NDBT_FAILED;
     }
 
@@ -4699,12 +4719,153 @@ int runNdbClusterConnect(NDBT_Context* c
     NdbSleep_MilliSleep(10 + rand() % max_sleep);
 
     l++;
+    res.unlock(); // make sure it's called before ~Ndb_cluster_connection
+  }
+
+  ctx->incProperty("runNdbClusterConnect_FINISHED");
+
+  return NDBT_OK;
+}
+
+int
+runRestarts(NDBT_Context* ctx, NDBT_Step* step)
+{
+  int result = NDBT_OK;
+  Uint32 threads = ctx->getProperty("API_NODES", (unsigned)0);
+  Uint32 sr = ctx->getProperty("ClusterRestart", (unsigned)0);
+  Uint32 master = ctx->getProperty("Master", (unsigned)0);
+  Uint32 slow = ctx->getProperty("SlowNR", (unsigned)0);
+  NdbRestarter restarter;
+
+  if (restarter.waitClusterStarted() != 0)
+  {
+    g_err << "Cluster failed to start" << endl;
+    return NDBT_FAILED;
+  }
+
+  if (sr == 0 && restarter.getNumDbNodes() < 2)
+    return NDBT_OK;
+
+  while (ctx->getProperty("runNdbClusterConnect_FINISHED") < threads
+         && !ctx->isTestStopped())
+  {
+    ndbout_c("%u %u",
+             ctx->getProperty("runNdbClusterConnect_FINISHED"),
+             threads);
+    if (sr == 0)
+    {
+      int id = rand() % restarter.getNumDbNodes();
+      int nodeId = restarter.getDbNodeId(id);
+      if (master == 1)
+      {
+        nodeId = restarter.getMasterNodeId();
+      }
+      else if (master == 2)
+      {
+        nodeId = restarter.getRandomNotMasterNodeId(rand());
+      }
+      ndbout << "Restart node " << nodeId
+             << "(master: " << restarter.getMasterNodeId() << ")"
+             << endl;
+      if (restarter.restartOneDbNode(nodeId, false, true, true) != 0)
+      {
+        g_err << "Failed to restartNextDbNode" << endl;
+        result = NDBT_FAILED;
+        break;
+      }
+
+      if (restarter.waitNodesNoStart(&nodeId, 1))
+      {
+        g_err << "Failed to waitNodesNoStart" << endl;
+        result = NDBT_FAILED;
+        break;
+      }
+
+      if (slow)
+      {
+        /**
+         * Block starting node in sp4
+         */
+        int dump[] = { 71, 4 };
+        restarter.dumpStateOneNode(nodeId, dump, NDB_ARRAY_SIZE(dump));
+      }
+
+      if (restarter.startNodes(&nodeId, 1))
+      {
+        g_err << "Failed to start node" << endl;
+        result = NDBT_FAILED;
+        break;
+      }
+
+      if (slow)
+      {
+        Uint32 blockTime = 3 * 60 * 1000;
+        Uint64 end = NdbTick_CurrentMillisecond() + blockTime;
+        while (ctx->getProperty("runNdbClusterConnect_FINISHED") < threads
+               && !ctx->isTestStopped() &&
+               NdbTick_CurrentMillisecond() < end)
+        {
+          NdbSleep_MilliSleep(100);
+        }
+
+        // unblock
+        int dump[] = { 71 };
+        restarter.dumpStateOneNode(nodeId, dump, NDB_ARRAY_SIZE(dump));
+      }
+    }
+    else
+    {
+      ndbout << "Blocking threads" << endl;
+      ctx->setProperty("WAITING", Uint32(0));
+      ctx->setProperty("WAIT", 1);
+      while (ctx->getProperty("WAITING") <
+             (threads - ctx->getProperty("runNdbClusterConnect_FINISHED")) &&
+             !ctx->isTestStopped())
+      {
+        NdbSleep_MilliSleep(10);
+      }
+
+      ndbout << "Restart cluster" << endl;
+      if (restarter.restartAll2(Uint32(NdbRestarter::NRRF_NOSTART |
+                                       NdbRestarter::NRRF_ABORT)) != 0)
+      {
+        g_err << "Failed to restartAll" << endl;
+        result = NDBT_FAILED;
+        break;
+      }
+
+      ctx->setProperty("WAITING", Uint32(0));
+      ctx->setProperty("WAIT", Uint32(0));
+
+      ndbout << "Starting cluster" << endl;
+      restarter.startAll();
+    }
+
+    if (restarter.waitClusterStarted() != 0)
+    {
+      g_err << "Cluster failed to start" << endl;
+      result = NDBT_FAILED;
+      break;
+    }
+  }
+
+  return result;
+}
+
+int runCheckAllNodesStarted(NDBT_Context* ctx, NDBT_Step* step){
+  NdbRestarter restarter;
+
+  if (restarter.waitClusterStarted(1) != 0)
+  {
+    g_err << "All nodes was not started " << endl;
+    return NDBT_FAILED;
   }
 
   return NDBT_OK;
 }
 
 
+
 static bool
 check_connect_no_such_host()
 {
@@ -5040,7 +5201,50 @@ TESTCASE("NdbClusterConnectionConnect",
 {
   INITIALIZER(runNdbClusterConnectionConnect);
 }
-
+TESTCASE("NdbClusterConnectNR",
+         "Make sure that every Ndb_cluster_connection get a unique nodeid")
+{
+  TC_PROPERTY("TimeoutAfterFirst", (Uint32)0);
+  INITIALIZER(runNdbClusterConnectInit);
+  STEPS(runNdbClusterConnect, MAX_NODES);
+  STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
+}
+TESTCASE("NdbClusterConnectNR_master",
+         "Make sure that every Ndb_cluster_connection get a unique nodeid")
+{
+  TC_PROPERTY("Master", 1);
+  TC_PROPERTY("TimeoutAfterFirst", (Uint32)0);
+  INITIALIZER(runNdbClusterConnectInit);
+  STEPS(runNdbClusterConnect, MAX_NODES);
+  STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
+}
+TESTCASE("NdbClusterConnectNR_non_master",
+         "Make sure that every Ndb_cluster_connection get a unique nodeid")
+{
+  TC_PROPERTY("Master", 2);
+  TC_PROPERTY("TimeoutAfterFirst", (Uint32)0);
+  INITIALIZER(runNdbClusterConnectInit);
+  STEPS(runNdbClusterConnect, MAX_NODES);
+  STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
+}
+TESTCASE("NdbClusterConnectNR_slow",
+         "Make sure that every Ndb_cluster_connection get a unique nodeid")
+{
+  TC_PROPERTY("Master", 2);
+  TC_PROPERTY("TimeoutAfterFirst", (Uint32)0);
+  TC_PROPERTY("SlowNR", 1);
+  INITIALIZER(runNdbClusterConnectInit);
+  STEPS(runNdbClusterConnect, MAX_NODES);
+  STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
+}
+TESTCASE("NdbClusterConnectSR",
+         "Make sure that every Ndb_cluster_connection get a unique nodeid")
+{
+  TC_PROPERTY("ClusterRestart", (Uint32)1);
+  INITIALIZER(runNdbClusterConnectInit);
+  STEPS(runNdbClusterConnect, MAX_NODES);
+  STEP(runRestarts); // Note after runNdbClusterConnect or else counting wrong
+}
 NDBT_TESTSUITE_END(testNdbApi);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testRestartGci.cpp'
--- a/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-10-03 07:22:29 +0000
@@ -486,7 +486,7 @@ int runNodeInitialRestarts(NDBT_Context*
     int nodeId = restarter.getNode(NdbRestarter::NS_RANDOM);
     ndbout_c("Restarting node %u", nodeId);
 
-    if (restarter.restartOneDbNode(nodeId, NdbRestarter::NRRF_INITIAL) != 0)
+    if (restarter.restartOneDbNode2(nodeId, NdbRestarter::NRRF_INITIAL) != 0)
     {
       ndbout_c("Error restarting node");
       ctx->stopTest();
@@ -546,7 +546,7 @@ int runUpdateVerifyGCI(NDBT_Context* ctx
     CHECK(rowGci != NULL);
 
     /* Define an update op to set the next GCI */
-    CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, loopCount+1) == 0);
+    CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, (int)(loopCount+1)) == 0);
 
     if (hugoOps.execute_Commit(pNdb) != 0)
     {

=== modified file 'storage/ndb/test/ndbapi/test_event.cpp'
--- a/storage/ndb/test/ndbapi/test_event.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/ndbapi/test_event.cpp	2011-09-28 10:46:30 +0000
@@ -168,7 +168,7 @@ Uint32 setAnyValue(Ndb* ndb, NdbTransact
 {
   /* XOR 2 32bit words of transid together */
   Uint64 transId = trans->getTransactionId();
-  return transId ^ (transId >> 32);
+  return (Uint32)(transId ^ (transId >> 32));
 }
 
 bool checkAnyValueTransId(Uint64 transId, Uint32 anyValue)

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-09-09 13:33:52 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-10-03 08:02:28 +0000
@@ -1753,3 +1753,24 @@ max-time: 500
 cmd: testAsynchMultiwait
 args: -n AsynchMultiwaitWakeup T1
 
+# alloc node id
+max-time: 500
+cmd: testNdbApi
+args: -n NdbClusterConnect T1
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbClusterConnectionConnect T1
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbClusterConnectNR_non_master T1
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbClusterConnectNR_slow T1
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbClusterConnectSR T1
+

=== modified file 'storage/ndb/test/src/NDBT_Find.cpp'
--- a/storage/ndb/test/src/NDBT_Find.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/src/NDBT_Find.cpp	2011-10-03 07:22:29 +0000
@@ -83,7 +83,10 @@ NDBT_find_ndb_mgmd(BaseString& path)
 {
   NDBT_find_binary(path, "ndb_mgmd",
                    "../../src/mgmsrv",
-                   "../storage/ndb/src/mgmsrv/",
+                   "../storage/ndb/src/mgmsrv",
+                   "../libexec",
+                   "../sbin",
+                   "../bin",
                    NULL);
 }
 

=== modified file 'storage/ndb/test/tools/hugoJoin.cpp'
--- a/storage/ndb/test/tools/hugoJoin.cpp	2011-04-06 14:16:13 +0000
+++ b/storage/ndb/test/tools/hugoJoin.cpp	2011-09-28 09:54:05 +0000
@@ -192,7 +192,7 @@ int main(int argc, char** argv){
     }
     HugoQueryBuilder builder(&MyNdb, tables.getBase(), mask);
     builder.setJoinLevel(_depth);
-    const NdbQueryDef * q = builder.createQuery(&MyNdb);
+    const NdbQueryDef * q = builder.createQuery();
     if (_verbose >= 2)
     {
       q->print(); ndbout << endl;

=== modified file 'support-files/compiler_warnings.supp'
--- a/support-files/compiler_warnings.supp	2011-07-05 12:46:07 +0000
+++ b/support-files/compiler_warnings.supp	2011-09-29 17:47:44 +0000
@@ -100,10 +100,68 @@ mi_packrec.c : .*result of 32-bit shift 
 ctype-simple.c : .*unary minus operator applied to unsigned type, result still unsigned.*
 
 #
+# Below is suppressions added by Cluster Team
+#
+
+#
 # 5.5-mainline
 # 
 extra/perror.c : .*strerror.*
 plugin/semisync/semisync_master.cc : .*may be used uninitialized in this function.*
-storage/innobase/.* : .*
+.*/innobase/.* : .*
 client/mysqldump.c : .*may be used uninitialized in this function.*
 
+# 5.5-mainline windows
+mysqlslap/mysqlslap.c : .*
+yassl/.* : C4005: 'WIN32_LEAN_AND_MEAN' : macro redefinition
+mytap/tap.c : C4028: formal parameter .* different from declaration
+.*/readline.cc : C4101: 'input_file_stat' : unreferenced local variable
+
+#
+# 
+#
+.*/sys_vars.h             : .*C4244.* conversion .* possible loss of data
+.*/mi_dynrec.c            : .*C4244.* conversion .* possible loss of data
+.*/mi_check.c             : .*C4244.* conversion .* possible loss of data
+.*/sys_vars.cc            : .*C4244.* conversion .* possible loss of data
+.*/partition_info.cc      : .*C4244.* conversion .* possible loss of data
+.*/sql_time.cc            : .*C4244.* conversion .* possible loss of data
+.*/sql_view.cc            : .*C4244.* conversion .* possible loss of data
+.*/event_db_repository.cc : .*C4244.* conversion .* possible loss of data
+.*/sql_trigger.cc         : .*C4244.* conversion .* possible loss of data
+.*/sql_table.cc           : .*C4244.* conversion .* possible loss of data
+.*/sql_show.cc            : .*C4244.* conversion .* possible loss of data
+.*/myisamchk.c            : .*C4244.* conversion .* possible loss of data
+.*/log_event.cc           : .*C4244.* conversion .* possible loss of data
+.*/log.cc                 : .*C4244.* conversion .* possible loss of data
+.*/item_timefunc.cc       : .*C4244.* conversion .* possible loss of data
+.*/mysqld.cc              : .*C4244.* conversion .* possible loss of data
+.*/item.cc                : .*C4244.* conversion .* possible loss of data
+.*/ha_partition.cc        : .*C4244.* conversion .* possible loss of data
+.*/sql_db.cc              : .*C4305.*
+.*/sql_cache.cc           : .*C4244.* conversion .* possible loss of data
+.*/sp.cc                  : .*C4244.* conversion .* possible loss of data
+.*/sp_head.cc             : .*C4244.* conversion .* possible loss of data
+.*/sql_insert.cc          : .*C4244.* conversion .* possible loss of data
+.*/parse_file.cc          : .*C4244.* conversion .* possible loss of data
+.*/sql_acl.cc             : .*C4244.* conversion .* possible loss of data
+.*/handler.cc             : .*C4244.* conversion .* possible loss of data
+.*/discover.cc            : .*C4244.* conversion .* possible loss of data
+.*/ha_archive.cc          : .*C4244.* conversion .* possible loss of data
+.*/client.c               : .*C4244.* conversion .* possible loss of data
+.*/sql_update.cc          : .*C4244.* conversion .* possible loss of data
+
+# const qualifiers
+replace/replace.c                     : .*C4090.*
+my_print_defaults/my_print_defaults.c : .*C4090.*
+comp_err/comp_err.c                   : .*C4090.*
+.*/mi_unique.c                        : .*C4090.*
+.*/ft_update.c                        : .*C4090.*
+mysqlshow/mysqlshow.c                 : .*C4090.*
+mysqldump/mysqldump.c                 : .*C4090.*
+.*/mysql_client_test.c                : .*C4090.*
+myisampack/myisampack.c               : .*C4090.*
+my_print_defaults/my_print_defaults.c : .*C4090.*
+myisamchk/myisamchk.c                 : .*C4090.*
+mysqlimport/mysqlimport.c             : .*C4090.*
+mysqlcheck/mysqlcheck.c               : .*C4090.*

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk-cluster branch (magnus.blaudd:3385 to 3388) magnus.blaudd5 Oct