List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:October 27 2011 12:21pm
Subject:bzr push into mysql-trunk-cluster branch (magnus.blaudd:3401 to 3402)
View as plain text  
 3402 magnus.blaudd@stripped	2011-10-27 [merge]
      Merge 5.5-cluster -> trunk-cluster

    added:
      mysql-test/include/not_ndb_is.inc
      mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result
      mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test
      storage/ndb/cmake/ndb_get_config_value.cmake
    modified:
      mysql-test/r/information_schema.result
      mysql-test/r/information_schema_db.result
      mysql-test/suite/funcs_1/r/is_columns_is.result
      mysql-test/suite/funcs_1/r/is_tables_is.result
      mysql-test/suite/funcs_1/t/is_columns_is.test
      mysql-test/suite/funcs_1/t/is_tables_is.test
      mysql-test/suite/ndb/include/have_clusterj.inc
      mysql-test/suite/ndb/r/ndb_join_pushdown.result
      mysql-test/suite/ndb/t/have_ndb_dist_priv.inc
      mysql-test/suite/ndb/t/ndb_join_pushdown.test
      mysql-test/suite/ndb_big/rqg_spj.test
      mysql-test/t/information_schema.test
      mysql-test/t/information_schema_db.test
      mysql-test/t/mysqlshow.test
      sql/abstract_query_plan.cc
      sql/abstract_query_plan.h
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbcluster_connection.cc
      sql/ha_ndbinfo.cc
      sql/ha_ndbinfo.h
      sql/ndb_local_connection.cc
      sql/ndb_thd_ndb.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/VERSION
      storage/ndb/clusterj/CMakeLists.txt
      storage/ndb/clusterj/clusterj-api/CMakeLists.txt
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java
      storage/ndb/clusterj/clusterj-jpatest/CMakeLists.txt
      storage/ndb/clusterj/clusterj-openjpa/CMakeLists.txt
      storage/ndb/clusterj/clusterj-test/CMakeLists.txt
      storage/ndb/include/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
      storage/ndb/include/ndb_global.h
      storage/ndb/include/ndbapi/Ndb.hpp
      storage/ndb/include/ndbapi/NdbScanOperation.hpp
      storage/ndb/include/util/OutputStream.hpp
      storage/ndb/src/CMakeLists.txt
      storage/ndb/src/common/debugger/EventLogger.cpp
      storage/ndb/src/common/debugger/SignalLoggerManager.cpp
      storage/ndb/src/common/logger/LogHandler.cpp
      storage/ndb/src/common/logger/Logger.cpp
      storage/ndb/src/common/portlib/NdbConfig.c
      storage/ndb/src/common/portlib/NdbDir.cpp
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/common/portlib/ndb_daemon.cc
      storage/ndb/src/common/transporter/TransporterRegistry.cpp
      storage/ndb/src/common/util/BaseString.cpp
      storage/ndb/src/common/util/ConfigValues.cpp
      storage/ndb/src/common/util/File.cpp
      storage/ndb/src/common/util/InputStream.cpp
      storage/ndb/src/common/util/NdbSqlUtil.cpp
      storage/ndb/src/common/util/OutputStream.cpp
      storage/ndb/src/common/util/Parser.cpp
      storage/ndb/src/common/util/Properties.cpp
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/common/util/ndbzio.c
      storage/ndb/src/common/util/socket_io.cpp
      storage/ndb/src/cw/cpcd/APIService.cpp
      storage/ndb/src/cw/cpcd/CPCD.cpp
      storage/ndb/src/cw/cpcd/Monitor.cpp
      storage/ndb/src/cw/cpcd/Process.cpp
      storage/ndb/src/kernel/blocks/backup/read.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/error/ErrorReporter.cpp
      storage/ndb/src/kernel/error/ndbd_exit_codes.c
      storage/ndb/src/kernel/vm/NdbinfoTables.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmapi/ndb_logevent.cpp
      storage/ndb/src/mgmclient/CommandInterpreter.cpp
      storage/ndb/src/mgmsrv/Defragger.hpp
      storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/NdbBlob.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbImpl.hpp
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbScanOperation.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/src/ndbapi/NdbWaitGroup.cpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/TransporterFacade.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/NDBT_Table.hpp
      storage/ndb/test/include/NdbMgmd.hpp
      storage/ndb/test/ndbapi/ScanFunctions.hpp
      storage/ndb/test/ndbapi/testDict.cpp
      storage/ndb/test/ndbapi/testMgm.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/rqg/parseargs.sh
      storage/ndb/test/rqg/run_rqg.sh
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/files.cpp
      storage/ndb/test/run-test/main.cpp
      storage/ndb/test/src/DbUtil.cpp
      storage/ndb/test/src/HugoQueries.cpp
      storage/ndb/test/src/HugoQueryBuilder.cpp
      storage/ndb/test/src/NDBT_Test.cpp
      storage/ndb/test/src/NdbBackup.cpp
      storage/ndb/test/src/NdbRestarter.cpp
      storage/ndb/test/src/getarg.c
      storage/ndb/test/tools/cpcc.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/ndb_dump_frm_data.cpp
      storage/ndb/tools/ndb_index_stat.cpp
      storage/ndb/tools/restore/consumer_restore.cpp
      storage/ndb/tools/waiter.cpp
      support-files/compiler_warnings.supp
 3401 Ole John Aske	2011-10-26
      Remove windows compiler warning introduced when 'pushed joins' was 
      intergrated into new MRR interface.

    modified:
      sql/ha_ndbcluster.cc
=== added file 'mysql-test/include/not_ndb_is.inc'
--- a/mysql-test/include/not_ndb_is.inc	1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/not_ndb_is.inc	2011-10-17 14:16:56 +0000
@@ -0,0 +1,27 @@
+#
+# Check if cluster is available by selecting from is.engines
+# if an error about no such table occurs bail out
+#
+
+disable_result_log;
+disable_query_log;
+
+--error 0, 1109
+select @have_ndb_is:= count(*) from information_schema.plugins
+where plugin_name like '%ndb%'
+  and PLUGIN_TYPE = 'INFORMATION SCHEMA';
+
+
+if ($mysql_errno){
+  # For backward compatibility, implement old fashioned way
+  # to check here ie. use SHOW VARIABLES LIKE "have_ndb"
+  die Can not determine if server supports ndb without is.engines table;
+}
+
+
+if (`select @have_ndb_is`){
+  skip NDB information schema table installed;
+}
+
+enable_query_log;
+enable_result_log;

=== modified file 'mysql-test/r/information_schema.result'
--- a/mysql-test/r/information_schema.result	2011-10-05 09:49:18 +0000
+++ b/mysql-test/r/information_schema.result	2011-10-27 12:19:57 +0000
@@ -1317,7 +1317,8 @@ count(*) as num1
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+t.table_schema = 'information_schema' and
 c1.ordinal_position =
 (select isnull(c2.column_type) -
 isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/r/information_schema_db.result'
--- a/mysql-test/r/information_schema_db.result	2011-07-19 15:11:15 +0000
+++ b/mysql-test/r/information_schema_db.result	2011-10-27 12:19:57 +0000
@@ -3,7 +3,7 @@ drop view if exists v1,v2;
 drop function if exists f1;
 drop function if exists f2;
 use INFORMATION_SCHEMA;
-show tables where Tables_in_information_schema NOT LIKE 'Innodb%';
+show tables where Tables_in_information_schema NOT LIKE 'Innodb%' and Tables_in_information_schema NOT LIKE 'ndb%';
 Tables_in_information_schema
 CHARACTER_SETS
 COLLATIONS

=== modified file 'mysql-test/suite/funcs_1/r/is_columns_is.result'
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result	2011-09-06 12:43:05 +0000
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result	2011-10-27 12:19:57 +0000
@@ -1,6 +1,6 @@
 SELECT * FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 ORDER BY table_schema, table_name, column_name;
 TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	ORDINAL_POSITION	COLUMN_DEFAULT	IS_NULLABLE	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	NUMERIC_PRECISION	NUMERIC_SCALE	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE	COLUMN_KEY	EXTRA	PRIVILEGES	COLUMN_COMMENT
 def	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	1		NO	varchar	32	96	NULL	NULL	utf8	utf8_general_ci	varchar(32)			select	
@@ -347,7 +347,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH = 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -359,7 +359,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH <> 1
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -371,7 +371,7 @@ CHARACTER_SET_NAME,
 COLLATION_NAME
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 AND CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH IS NULL
 ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
 COL_CML	DATA_TYPE	CHARACTER_SET_NAME	COLLATION_NAME
@@ -393,7 +393,7 @@ COLLATION_NAME,
 COLUMN_TYPE
 FROM information_schema.columns
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 ORDER BY TABLE_SCHEMA, TABLE_NAME, ORDINAL_POSITION;
 COL_CML	TABLE_SCHEMA	TABLE_NAME	COLUMN_NAME	DATA_TYPE	CHARACTER_MAXIMUM_LENGTH	CHARACTER_OCTET_LENGTH	CHARACTER_SET_NAME	COLLATION_NAME	COLUMN_TYPE
 3.0000	information_schema	CHARACTER_SETS	CHARACTER_SET_NAME	varchar	32	96	utf8	utf8_general_ci	varchar(32)

=== modified file 'mysql-test/suite/funcs_1/r/is_tables_is.result'
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result	2011-07-19 15:11:15 +0000
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result	2011-10-27 12:19:57 +0000
@@ -11,7 +11,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	def
 TABLE_SCHEMA	information_schema
@@ -718,7 +718,7 @@ AS "user_comment",
 '-----------------------------------------------------' AS "Separator"
 FROM information_schema.tables
 WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%'
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%'
 ORDER BY table_schema,table_name;
 TABLE_CATALOG	def
 TABLE_SCHEMA	information_schema

=== modified file 'mysql-test/suite/funcs_1/t/is_columns_is.test'
--- a/mysql-test/suite/funcs_1/t/is_columns_is.test	2009-08-07 20:04:53 +0000
+++ b/mysql-test/suite/funcs_1/t/is_columns_is.test	2011-10-18 07:53:49 +0000
@@ -18,5 +18,5 @@
 --source include/not_embedded.inc
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%';
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%';
 --source suite/funcs_1/datadict/columns.inc

=== modified file 'mysql-test/suite/funcs_1/t/is_tables_is.test'
--- a/mysql-test/suite/funcs_1/t/is_tables_is.test	2009-08-07 20:04:53 +0000
+++ b/mysql-test/suite/funcs_1/t/is_tables_is.test	2011-10-18 07:53:49 +0000
@@ -13,6 +13,6 @@
 #
 
 let $my_where = WHERE table_schema = 'information_schema'
-AND table_name <> 'profiling' AND table_name not like 'innodb_%';
+AND table_name <> 'profiling' AND table_name not like 'innodb_%' AND table_name not like 'ndb%';
 --source suite/funcs_1/datadict/tables1.inc
 

=== modified file 'mysql-test/suite/ndb/include/have_clusterj.inc'
--- a/mysql-test/suite/ndb/include/have_clusterj.inc	2010-03-19 14:30:27 +0000
+++ b/mysql-test/suite/ndb/include/have_clusterj.inc	2011-10-24 12:45:30 +0000
@@ -22,16 +22,18 @@ my $basedir = dirname($mysql_test_dir);
 #
 
 my $clusterj_jar = my_find_file($basedir,
-                                ["storage/ndb/clusterj", 
-                                 "share/mysql/java",             # install unix
-                                 "lib/java"],                    # install windows
+                                [ "storage/ndb/clusterj", 
+                                  "share/java",
+                                  "share/mysql/java",
+                                  "lib/java" ],
                                 "clusterj-*.jar", NOT_REQUIRED);
 
 my $clusterj_test_jar = my_find_file($basedir,
-                                    ["storage/ndb/clusterj/clusterj-test",
-                                     "share/mysql/java",             # install unix
-                                     "lib/java"],                    # install windows
-                                    "clusterj-test-*.jar", NOT_REQUIRED);
+                                     [ "storage/ndb/clusterj/clusterj-test",
+                                       "share/java",
+                                       "share/mysql/java",
+                                       "lib/java" ],
+                                     "clusterj-test-*.jar", NOT_REQUIRED);
 
 my $ndbclient_lib = my_find_file($basedir,
                                 ["storage/ndb/src/.libs", 

=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-25 08:09:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-27 12:19:57 +0000
@@ -2096,6 +2096,7 @@ insert into t1 values (1, 2);
 insert into t1 values (2, 3);
 insert into t1 values (3, 1);
 set ndb_join_pushdown=true;
+set autocommit=off;
 explain extended
 select *
 from t1, t1 as t2
@@ -2113,12 +2114,17 @@ and t2.a = t1.b;
 a	b	a	b
 1	2	2	3
 3	1	1	2
+@ndb_execute_count:=VARIABLE_VALUE-@ndb_init_execute_count
+3
+This should yield 3 executes (for now...buh)
+set autocommit=on;
 drop table t1;
 create table t1 (a int, b int, primary key(a)) engine = ndb;
 insert into t1 values (1, 2);
 insert into t1 values (2, 3);
 insert into t1 values (3, 1);
 set ndb_join_pushdown=true;
+set autocommit=off;
 explain extended
 select *
 from t1, t1 as t2
@@ -2136,6 +2142,10 @@ and t2.a = t1.b;
 a	b	a	b
 1	2	2	3
 3	1	1	2
+@ndb_execute_count:=VARIABLE_VALUE-@ndb_init_execute_count
+1
+This should yield 1 execute (but inefficient since it's based on scan)
+set autocommit=on;
 explain extended
 select *
 from t1, t1 as t2
@@ -5468,6 +5478,38 @@ select count(*) from t1 as x1 join t1 as
 count(*)
 3
 drop table t1;
+create table t1 
+(a int not null,
+b int not null, 
+c int not null,
+d int not null,
+primary key(a,b,c,d)) engine=ndb partition by key (b,c);
+insert into t1 values (0x4f, 0x4f, 0x4f, 0x4f);
+explain select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=x2.c;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	2	Using where with pushed condition
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	8	func,test.x1.b	1	Parent of 2 pushed join@1; Using where
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	12	test.x2.d,test.x1.d,test.x2.c	1	Child of 'x2' in pushed join@1
+select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=x2.c;
+a	b	c	d	a	b	c	d	a	b	c	d
+79	79	79	79	79	79	79	79	79	79	79	79
+explain select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=0x4f;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	2	Using where with pushed condition
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	8	func,test.x1.b	1	Parent of 2 pushed join@1; Using where
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	12	test.x2.d,test.x1.d,const	1	Child of 'x2' in pushed join@1
+select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=0x4f;
+a	b	c	d	a	b	c	d	a	b	c	d
+79	79	79	79	79	79	79	79	79	79	79	79
+drop table t1;
 create temporary table spj_counts_at_end
 select counter_name, sum(val) as val 
 from ndbinfo.counters 
@@ -5484,10 +5526,10 @@ and spj_counts_at_end.counter_name <> 'R
        and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
        and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
-CONST_PRUNED_RANGE_SCANS_RECEIVED	6
+CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	250
-PRUNED_RANGE_SCANS_RECEIVED	25
-RANGE_SCANS_RECEIVED	726
+PRUNED_RANGE_SCANS_RECEIVED	27
+RANGE_SCANS_RECEIVED	730
 READS_RECEIVED	58
 TABLE_SCANS_RECEIVED	250
 drop table spj_counts_at_startup;
@@ -5499,9 +5541,9 @@ pruned_scan_count
 sorted_scan_count
 35
 pushed_queries_defined
-401
+405
 pushed_queries_dropped
 9
 pushed_queries_executed
-547
+549
 set ndb_join_pushdown = @save_ndb_join_pushdown;

=== modified file 'mysql-test/suite/ndb/t/have_ndb_dist_priv.inc'
--- a/mysql-test/suite/ndb/t/have_ndb_dist_priv.inc	2011-09-01 12:36:37 +0000
+++ b/mysql-test/suite/ndb/t/have_ndb_dist_priv.inc	2011-10-24 10:06:44 +0000
@@ -21,7 +21,7 @@ my $basedir = dirname($mysql_test_dir);
 # Check if the needed tests are available
 #
 my $sql_file = my_find_file($basedir,
-                            ["storage/ndb/tools", "share/mysql/"],
+                            ["storage/ndb/tools", "share/mysql/", "share" ],
                             "ndb_dist_priv.sql", NOT_REQUIRED);
 
 my $F = IO::File->new("$vardir/tmp/have_ndb_dist_priv_result.inc", "w") or die;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-09-29 13:11:52 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-10-24 08:50:10 +0000
@@ -1070,16 +1070,31 @@ insert into t1 values (3, 1);
 
 set ndb_join_pushdown=true;
 
+##
+# In ps-protocol, server will adds calls to execute(Commit)
+#   (these are optimized away by ndbapi, since there is nothing to commit)
+#   and these generates a diff in execute-count
+# To not have to investigate problem futher, I simply set autocommit=off
+#   (and back further down where we don't track execute-count any longer)
+# It would probably be good to changes these tests to instead use frazers new
+#   ndbapi counters, and instead measure #round-trips
+set autocommit=off;
+
 explain extended
 select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_init_execute_count.inc
 --sorted_result
 select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_execute_count.inc
+--echo This should yield 3 executes (for now...buh)
+
+set autocommit=on;
 
 connection ddl;
 drop table t1;
@@ -1095,17 +1110,31 @@ insert into t1 values (3, 1);
 
 set ndb_join_pushdown=true;
 
+##
+# In ps-protocol, server will adds calls to execute(Commit)
+#   (these are optimized away by ndbapi, since there is nothing to commit)
+#   and these generates a diff in execute-count
+# To not have to investigate problem futher, I simply set autocommit=off
+#   (and back further down where we don't track execute-count any longer)
+# It would probably be good to changes these tests to instead use frazers new
+#   ndbapi counters, and instead measure #round-trips
+set autocommit=off;
+
 explain extended
 select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_init_execute_count.inc
 --sorted_result
 select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_execute_count.inc
+--echo This should yield 1 execute (but inefficient since it's based on scan)
 
+set autocommit=on;
 
 ## Adding and 'order by ... desc' trigger the usage
 ## of QUICK_SELECT_DESC which somehow prepares a 
@@ -3837,6 +3866,40 @@ select count(*) from t1 as x1 join t1 as
 connection ddl;
 drop table t1;
 
+####################
+# Test pruned child scans using parameter values (known regression).
+####################
+create table t1 
+       (a int not null,
+       b int not null, 
+       c int not null,
+       d int not null,
+       primary key(a,b,c,d)) engine=ndb partition by key (b,c);
+
+connection spj;
+insert into t1 values (0x4f, 0x4f, 0x4f, 0x4f);
+
+# Prune key depends on parent row.
+explain select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=x2.c;
+
+select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=x2.c;
+
+# Prune key is fixed.
+explain select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=0x4f;
+
+select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=0x4f;
+
+connection ddl;
+drop table t1;
+
 ########################################
 # Verify DBSPJ counters for entire test:
 # Note: These tables are 'temporary' withing 'connection spj'

=== modified file 'mysql-test/suite/ndb_big/rqg_spj.test'
--- a/mysql-test/suite/ndb_big/rqg_spj.test	2011-10-14 11:24:08 +0000
+++ b/mysql-test/suite/ndb_big/rqg_spj.test	2011-10-20 07:57:41 +0000
@@ -20,7 +20,7 @@ create temporary table spj_counts_at_sta
 # Load simple.zz
 # -o => with "oj-extensions"
 --echo Calling: $LOAD_RQG -d simple.zz -o
---exec $LOAD_RQG -d simple.zz -o
+--system $LOAD_RQG -d simple.zz -o
 
 ##
 # run spj_test.yy for 3600 seconds
@@ -28,8 +28,19 @@ create temporary table spj_counts_at_sta
 # If you want to reproduce an "run"
 #   add -s <seed value> to command below
 #
---echo Calling: $RUN_RQG -g spj_test.yy -t 3600
---exec $RUN_RQG -g spj_test.yy -t 3600
+# NOTE: CluB get unhappy if a testcase doesnt print for 1800 seconds
+#       So call program several times instead
+#       (having --exec print as it goes would also be nice...instead of
+#        just at the end)
+#
+let $cmd = $RUN_RQG -g spj_test.yy -t 600;
+--echo Calling: $cmd (6 times)
+--system $cmd
+--system $cmd
+--system $cmd
+--system $cmd
+--system $cmd
+--system $cmd
 
 drop database spj_myisam;
 drop database spj_ndb;

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	2011-10-20 12:31:31 +0000
@@ -0,0 +1,196 @@
+include/master-slave.inc
+[connection master]
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT '0000-00-00',
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT '0000-00-00',
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, 1);
+INSERT INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, 4);
+INSERT INTO t1(a) VALUES (5);
+INSERT INTO t2(a,b) VALUES (1, null);
+INSERT INTO t2(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t2(a) VALUES (3);
+INSERT INTO t3(a,b) VALUES (1, null);
+INSERT INTO t3(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t3(a) VALUES (3);
+INSERT INTO t4(a,b,c) VALUES (1, null, 1);
+INSERT INTO t4(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t4(a,b) VALUES (3, null);
+INSERT INTO t4(a,c) VALUES (4, 4);
+INSERT INTO t4(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+include/diff_tables.inc [master:t2, slave:t2]
+TABLES t2 and t3 must be different.
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	NULL	500
+2	1111-11-11	500
+3	NULL	500
+SELECT * FROM t4 ORDER BY a;
+a	b	c
+1	NULL	1
+2	1111-11-11	2
+3	NULL	NULL
+4	NULL	4
+5	NULL	NULL
+SELECT * FROM t4 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+4	NULL
+5	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,'1111-11-11', 1);
+REPLACE INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+UPDATE t1 set b= NULL, c= 300 where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, 300);
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+************* CLEANING *************
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT NULL, `c` BIT DEFAULT NULL, 
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT b'01', `c` BIT DEFAULT NULL,
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, b'01');
+INSERT INTO t1(a,b,c) VALUES (2,b'00', b'01');
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, b'01');
+INSERT INTO t1(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be different.
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,b'00', b'01');
+REPLACE INTO t1(a,b,c) VALUES (2,b'00',b'01');
+UPDATE t1 set b= NULL, c= b'00' where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, b'00');
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+DROP TABLE t1;
+################################################################################
+#                       NULL ---> NOT NULL (STRICT MODE)
+#                    UNCOMMENT THIS AFTER FIXING BUG#43992
+################################################################################
+################################################################################
+#                       NULL ---> NOT NULL (NON-STRICT MODE)
+################################################################################
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT NOT NULL, 
+`c` INT NOT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a) VALUES (1);
+INSERT INTO t1(a, b) VALUES (2, NULL);
+INSERT INTO t1(a, b) VALUES (3, 1);
+INSERT INTO t2(a) VALUES (1);
+INSERT INTO t2(a, b) VALUES (2, NULL);
+INSERT INTO t2(a, b) VALUES (3, 1);
+INSERT INTO t3(a) VALUES (1);
+INSERT INTO t3(a, b) VALUES (2, NULL);
+INSERT INTO t3(a, b) VALUES (3, 1);
+INSERT INTO t3(a, b) VALUES (4, 1);
+REPLACE INTO t3(a, b) VALUES (5, null);
+REPLACE INTO t3(a, b) VALUES (3, null);
+UPDATE t3 SET b = NULL where a = 4;
+************* SHOWING THE RESULT SETS *************
+SELECT * FROM t1 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t1 ORDER BY a;
+a	b	c
+SELECT * FROM t2 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t2 ORDER BY a;
+a	b	c
+1	0	NULL
+2	0	NULL
+3	1	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	NULL
+4	NULL
+5	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	0	500
+2	0	500
+3	0	500
+4	0	500
+5	0	500
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	2011-10-20 12:31:31 +0000
@@ -0,0 +1,8 @@
+-- source include/have_binlog_format_row.inc
+-- source include/have_ndb.inc
+-- source include/master-slave.inc
+
+let $engine = 'NDB';
+-- source extra/rpl_tests/rpl_not_null.test
+
+--source include/rpl_end.inc

=== modified file 'mysql-test/t/information_schema.test'
--- a/mysql-test/t/information_schema.test	2011-09-23 10:55:10 +0000
+++ b/mysql-test/t/information_schema.test	2011-10-27 12:19:57 +0000
@@ -1013,7 +1013,8 @@ select t.table_name, group_concat(t.tabl
 from information_schema.tables t
 inner join information_schema.columns c1
 on t.table_schema = c1.table_schema AND t.table_name = c1.table_name
-where t.table_schema = 'information_schema' and
+where t.table_name not like 'ndb%' and
+      t.table_schema = 'information_schema' and
         c1.ordinal_position =
         (select isnull(c2.column_type) -
          isnull(group_concat(c2.table_schema, '.', c2.table_name)) +

=== modified file 'mysql-test/t/information_schema_db.test'
--- a/mysql-test/t/information_schema_db.test	2010-11-19 13:43:13 +0000
+++ b/mysql-test/t/information_schema_db.test	2011-10-17 18:13:57 +0000
@@ -16,7 +16,7 @@ drop function if exists f2;
 
 use INFORMATION_SCHEMA;
 --replace_result Tables_in_INFORMATION_SCHEMA Tables_in_information_schema
-show tables where Tables_in_INFORMATION_SCHEMA NOT LIKE 'Innodb%';
+show tables where Tables_in_INFORMATION_SCHEMA NOT LIKE 'Innodb%' and Tables_in_INFORMATION_SCHEMA NOT LIKE 'ndb%';
 --replace_result 'Tables_in_INFORMATION_SCHEMA (T%)' 'Tables_in_information_schema (T%)'
 show tables from INFORMATION_SCHEMA like 'T%';
 create database `inf%`;

=== modified file 'mysql-test/t/mysqlshow.test'
--- a/mysql-test/t/mysqlshow.test	2011-05-04 09:54:04 +0000
+++ b/mysql-test/t/mysqlshow.test	2011-10-27 12:19:57 +0000
@@ -5,6 +5,9 @@
 # Don't test when thread_pool active
 --source include/not_threadpool.inc
 
+# Test lists tables in Information_schema, and ndb adds some
+-- source include/not_ndb_is.inc
+
 --disable_warnings
 DROP TABLE IF EXISTS t1,t2,test1,test2;
 --enable_warnings

=== modified file 'sql/abstract_query_plan.cc'
--- a/sql/abstract_query_plan.cc	2011-10-05 07:24:39 +0000
+++ b/sql/abstract_query_plan.cc	2011-10-27 12:19:57 +0000
@@ -1,6 +1,5 @@
 /*
-   Copyright 2010 Sun Microsystems, Inc.
-    All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by

=== modified file 'sql/abstract_query_plan.h'
--- a/sql/abstract_query_plan.h	2011-09-28 10:55:58 +0000
+++ b/sql/abstract_query_plan.h	2011-10-20 19:52:11 +0000
@@ -1,6 +1,5 @@
 /*
-   Copyright 2010 Sun Microsystems, Inc.
-    All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by

=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-10-12 11:33:43 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-10-20 19:41:56 +0000
@@ -227,7 +227,7 @@ ndb_index_stat_opt2str(const Ndb_index_s
     const Ndb_index_stat_opt::Val& v= opt.val[i];
     ptr+= strlen(ptr);
     const char* sep= (ptr == buf ? "" : ",");
-    const uint sz= ptr < end ? end - ptr : 0;
+    const uint sz= ptr < end ? (uint)(end - ptr) : 0;
 
     switch (v.unit) {
     case Ndb_index_stat_opt::Ubool:

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-10-26 07:22:10 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-27 12:19:57 +0000
@@ -1410,8 +1410,7 @@ int ha_ndbcluster::ndb_err(NdbTransactio
       {
         const NDBINDEX *unique_index=
           (const NDBINDEX *) m_index[i].unique_index;
-        if (unique_index &&
-            (char *) unique_index->getObjectId() == error_data)
+        if (unique_index && UintPtr(unique_index->getObjectId()) == UintPtr(error_data))
         {
           dupkey= i;
           break;
@@ -1454,7 +1453,7 @@ bool ha_ndbcluster::get_error_message(in
 
   const NdbError err= ndb->getNdbError(error);
   bool temporary= err.status==NdbError::TemporaryError;
-  buf->set(err.message, strlen(err.message), &my_charset_bin);
+  buf->set(err.message, (uint32)strlen(err.message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
   DBUG_RETURN(temporary);
 }
@@ -2006,7 +2005,7 @@ void ha_ndbcluster::release_blobs_buffer
 */
 
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length)
+            size_t pack_length)
 {
   DBUG_ENTER("cmp_frm");
   /*
@@ -3957,7 +3956,7 @@ count_key_columns(const KEY *key_info, c
       break;
     length+= key_part->store_length;
   }
-  return key_part - first_key_part;
+  return (uint)(key_part - first_key_part);
 }
 
 /* Helper method to compute NDB index bounds. Note: does not set range_no. */
@@ -6536,7 +6535,7 @@ int ha_ndbcluster::ndb_update_row(const 
   uint blob_count= 0;
   if (uses_blob_value(table->write_set))
   {
-    int row_offset= new_data - table->record[0];
+    int row_offset= (int)(new_data - table->record[0]);
     int res= set_blob_values(op, row_offset, table->write_set, &blob_count,
                              (batch_allowed && !need_flush));
     if (res != 0)
@@ -8311,7 +8310,7 @@ static int ndbcluster_update_apply_statu
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,
-                   group_master_log_name, strlen(group_master_log_name));
+                   group_master_log_name, (int)strlen(group_master_log_name));
   r|= op->setValue(2u, tmp_buf);
   DBUG_ASSERT(r == 0);
   // start_pos
@@ -10028,7 +10027,7 @@ int ha_ndbcluster::create(const char *na
     if ((my_errno= write_ndb_file(name)))
       DBUG_RETURN(my_errno);
 
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, form);
     DBUG_RETURN(my_errno);
   }
@@ -10930,7 +10929,7 @@ int ha_ndbcluster::rename_table(const ch
       this is a "real" rename table, i.e. not tied to an offline alter table
       - send new name == "to" in query field
     */
-    ndbcluster_log_schema_op(thd, to, strlen(to),
+    ndbcluster_log_schema_op(thd, to, (int)strlen(to),
                              old_dbname, m_tabname,
                              ndb_table_id, ndb_table_version,
                              SOT_RENAME_TABLE_PREPARE,
@@ -11119,10 +11118,10 @@ do_drop:
 /* static version which does not need a handler */
 
 int
-ha_ndbcluster::drop_table(THD *thd, ha_ndbcluster *h, Ndb *ndb,
-                          const char *path,
-                          const char *db,
-                          const char *table_name)
+ha_ndbcluster::drop_table_impl(THD *thd, ha_ndbcluster *h, Ndb *ndb,
+                               const char *path,
+                               const char *db,
+                               const char *table_name)
 {
   DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
   NDBDICT *dict= ndb->getDictionary();
@@ -11314,8 +11313,8 @@ int ha_ndbcluster::delete_table(const ch
     If it was already gone it might have been dropped
     remotely, give a warning and then drop .ndb file.
    */
-  if (!(error= drop_table(thd, this, ndb, name,
-                          m_dbname, m_tabname)) ||
+  if (!(error= drop_table_impl(thd, this, ndb, name,
+                               m_dbname, m_tabname)) ||
       error == HA_ERR_NO_SUCH_TABLE)
   {
     /* Call ancestor function to delete .ndb file */
@@ -11591,7 +11590,7 @@ int ha_ndbcluster::open(const char *name
                             name);
     }
     Ndb* ndb= check_ndb_in_thd(thd);
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, table);
     if ((m_share=get_share(name, table, FALSE)) == 0)
     {
@@ -12120,8 +12119,8 @@ int ndbcluster_drop_database_impl(THD *t
   List_iterator_fast<char> it(drop_list);
   while ((tabname=it++))
   {
-    tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
-    if (ha_ndbcluster::drop_table(thd, 0, ndb, full_path, dbname, tabname))
+    tablename_to_filename(tabname, tmp, (uint)(FN_REFLEN - (tmp - full_path)-1));
+    if (ha_ndbcluster::drop_table_impl(thd, 0, ndb, full_path, dbname, tabname))
     {
       const NdbError err= dict->getNdbError();
       if (err.code != 709 && err.code != 723)
@@ -12259,7 +12258,7 @@ int ndbcluster_find_all_files(THD *thd)
       }
       /* finalize construction of path */
       end+= tablename_to_filename(elmt.name, end,
-                                  sizeof(key)-(end-key));
+                                  (uint)(sizeof(key)-(end-key)));
       uchar *data= 0, *pack_data= 0;
       size_t length, pack_length;
       int discover= 0;
@@ -12307,7 +12306,7 @@ int ndbcluster_find_all_files(THD *thd)
       else
       {
         /* set up replication for this table */
-        ndbcluster_create_binlog_setup(thd, ndb, key, end-key,
+        ndbcluster_create_binlog_setup(thd, ndb, key, (uint)(end-key),
                                        elmt.database, elmt.name,
                                        0);
       }
@@ -12475,8 +12474,8 @@ ndbcluster_find_files(handlerton *hton, 
     {
       file_name_str= (char*)my_hash_element(&ok_tables, i);
       end= end1 +
-        tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name));
-      ndbcluster_create_binlog_setup(thd, ndb, name, end-name,
+        tablename_to_filename(file_name_str, end1, (uint)(sizeof(name) - (end1 - name)));
+      ndbcluster_create_binlog_setup(thd, ndb, name, (uint)(end-name),
                                      db, file_name_str, 0);
     }
   }
@@ -12550,7 +12549,7 @@ ndbcluster_find_files(handlerton *hton, 
     {
       LEX_STRING *tmp_file_name= 0;
       tmp_file_name= thd->make_lex_string(tmp_file_name, file_name_str,
-                                          strlen(file_name_str), TRUE);
+                                          (uint)strlen(file_name_str), TRUE);
       files->push_back(tmp_file_name); 
     }
   }
@@ -12991,7 +12990,7 @@ void ha_ndbcluster::set_dbname(const cha
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, name_len);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, dbname, sizeof(tmp_buff) - 1);
@@ -13023,7 +13022,7 @@ ha_ndbcluster::set_tabname(const char *p
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, end - ptr);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, tabname, sizeof(tmp_buff) - 1);
@@ -13788,7 +13787,7 @@ int handle_trailing_share(THD *thd, NDB_
       share->key_length= min_key_length;
     }
     share->key_length=
-      my_snprintf(share->key, min_key_length + 1, "#leak%lu",
+      (uint)my_snprintf(share->key, min_key_length + 1, "#leak%lu",
                   trailing_share_id++);
   }
   /* Keep it for possible the future trailing free */
@@ -15913,7 +15912,7 @@ ha_ndbcluster::update_table_comment(
         const char*     comment)/* in:  table comment defined by user */
 {
   THD *thd= current_thd;
-  uint length= strlen(comment);
+  uint length= (uint)strlen(comment);
   if (length > 64000 - 3)
   {
     return((char*)comment); /* string too long */
@@ -15934,7 +15933,7 @@ ha_ndbcluster::update_table_comment(
 
   char *str;
   const char *fmt="%s%snumber_of_replicas: %d";
-  const unsigned fmt_len_plus_extra= length + strlen(fmt);
+  const unsigned fmt_len_plus_extra= length + (uint)strlen(fmt);
   if ((str= (char*) my_malloc(fmt_len_plus_extra, MYF(0))) == NULL)
   {
     sql_print_error("ha_ndbcluster::update_table_comment: "
@@ -16330,7 +16329,7 @@ ndbcluster_show_status(handlerton *hton,
   else
     update_status_variables(NULL, &ns, g_ndb_cluster_connection);
 
-  buflen=
+  buflen= (uint)
     my_snprintf(buf, sizeof(buf),
                 "cluster_node_id=%ld, "
                 "connected_host=%s, "
@@ -16353,8 +16352,8 @@ ndbcluster_show_status(handlerton *hton,
     if (ns.transaction_hint_count[i] > 0 ||
         ns.transaction_no_hint_count[i] > 0)
     {
-      uint namelen= my_snprintf(name, sizeof(name), "node[%d]", i);
-      buflen= my_snprintf(buf, sizeof(buf),
+      uint namelen= (uint)my_snprintf(name, sizeof(name), "node[%d]", i);
+      buflen= (uint)my_snprintf(buf, sizeof(buf),
                           "transaction_hint=%ld, transaction_no_hint=%ld",
                           ns.transaction_hint_count[i],
                           ns.transaction_no_hint_count[i]);
@@ -16370,12 +16369,12 @@ ndbcluster_show_status(handlerton *hton,
     tmp.m_name= 0;
     while (ndb->get_free_list_usage(&tmp))
     {
-      buflen=
+      buflen= (uint)
         my_snprintf(buf, sizeof(buf),
                   "created=%u, free=%u, sizeof=%u",
                   tmp.m_created, tmp.m_free, tmp.m_sizeof);
       if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                     tmp.m_name, strlen(tmp.m_name), buf, buflen))
+                     tmp.m_name, (uint)strlen(tmp.m_name), buf, buflen))
         DBUG_RETURN(TRUE);
     }
   }
@@ -17896,19 +17895,19 @@ static int ndbcluster_fill_files_table(h
       }
 
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("DATAFILE",8,
                                               system_charset_info);
       table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
       table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(),
-                                                    strlen(df.getTablespace()),
+                                                    (uint)strlen(df.getTablespace()),
                                                     system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->
         store(ts.getDefaultLogfileGroup(),
-              strlen(ts.getDefaultLogfileGroup()),
+              (uint)strlen(ts.getDefaultLogfileGroup()),
               system_charset_info);
       table->field[IS_FILES_ENGINE]->set_notnull();
       table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
@@ -17934,7 +17933,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_ROW_FORMAT]->store("FIXED", 5, system_charset_info);
 
       char extra[30];
-      int len= my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
+      int len= (int)my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
       schema_table_store_record(thd, table);
@@ -17967,12 +17966,12 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
     table->field[IS_FILES_TABLESPACE_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->
       store(ts.getDefaultLogfileGroup(),
-           strlen(ts.getDefaultLogfileGroup()),
+           (uint)strlen(ts.getDefaultLogfileGroup()),
            system_charset_info);
 
     table->field[IS_FILES_ENGINE]->set_notnull();
@@ -18027,7 +18026,7 @@ static int ndbcluster_fill_files_table(h
 
       init_fill_schema_files_row(table);
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8,
@@ -18036,7 +18035,7 @@ static int ndbcluster_fill_files_table(h
       uf.getLogfileGroupId(&objid);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(),
-                                                  strlen(uf.getLogfileGroup()),
+                                                  (uint)strlen(uf.getLogfileGroup()),
                                                        system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId(), true);
@@ -18059,7 +18058,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_VERSION]->store(uf.getObjectVersion(), true);
 
       char extra[100];
-      int len= my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
+      int len= (int)my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
                            id, (ulong) lfg.getUndoBufferSize());
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
@@ -18094,7 +18093,7 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.getObjectId(), true);
@@ -18112,7 +18111,7 @@ static int ndbcluster_fill_files_table(h
     table->field[IS_FILES_VERSION]->store(lfg.getObjectVersion(), true);
 
     char extra[100];
-    int len= my_snprintf(extra,sizeof(extra),
+    int len= (int)my_snprintf(extra,sizeof(extra),
                          "UNDO_BUFFER_SIZE=%lu",
                          (ulong) lfg.getUndoBufferSize());
     table->field[IS_FILES_EXTRA]->set_notnull();
@@ -18552,12 +18551,8 @@ struct st_mysql_storage_engine ndbcluste
 { MYSQL_HANDLERTON_INTERFACE_VERSION };
 
 
-#include "ha_ndbinfo.h"
-
-extern struct st_mysql_sys_var* ndbinfo_system_variables[];
-
-struct st_mysql_storage_engine ndbinfo_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+extern struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin;
+extern struct st_mysql_plugin ndbinfo_plugin;
 
 mysql_declare_plugin(ndbcluster)
 {
@@ -18575,21 +18570,9 @@ mysql_declare_plugin(ndbcluster)
   NULL,                       /* config options */
   0                           /* flags */
 },
-{
-  MYSQL_STORAGE_ENGINE_PLUGIN,
-  &ndbinfo_storage_engine,
-  "ndbinfo",
-  "Sun Microsystems Inc.",
-  "MySQL Cluster system information storage engine",
-  PLUGIN_LICENSE_GPL,
-  ndbinfo_init,               /* plugin init */
-  ndbinfo_deinit,             /* plugin deinit */
-  0x0001,                     /* plugin version */
-  NULL,                       /* status variables */
-  ndbinfo_system_variables,   /* system variables */
-  NULL,                       /* config options */
-  0                           /* flags */
-}
+ndbinfo_plugin, /* ndbinfo plugin */
+/* IS plugin table which maps between mysql connection id and ndb trans-id */
+i_s_ndb_transid_mysql_connection_map_plugin
 mysql_declare_plugin_end;
 
 #endif

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-10-18 10:43:35 +0000
+++ b/sql/ha_ndbcluster.h	2011-10-27 12:19:57 +0000
@@ -573,10 +573,10 @@ private:
                                       NDB_SHARE *share);
 
   void check_read_before_write_removal();
-  static int drop_table(THD *thd, ha_ndbcluster *h, Ndb *ndb,
-                        const char *path,
-                        const char *db,
-                        const char *table_name);
+  static int drop_table_impl(THD *thd, ha_ndbcluster *h, Ndb *ndb,
+                             const char *path,
+                             const char *db,
+                             const char *table_name);
 
   int add_index_impl(THD *thd, TABLE *table_arg,
                      KEY *key_info, uint num_of_keys);

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-10-05 07:24:39 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-27 12:19:57 +0000
@@ -295,9 +295,9 @@ static void run_query(THD *thd, char *bu
 }
 
 static void
-ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share)
+ndb_binlog_close_shadow_table(THD *thd, NDB_SHARE *share)
 {
-  DBUG_ENTER("ndbcluster_binlog_close_table");
+  DBUG_ENTER("ndb_binlog_close_shadow_table");
   Ndb_event_data *event_data= share->event_data;
   if (event_data)
   {
@@ -1259,7 +1259,7 @@ static int ndbcluster_find_all_databases
             /* create missing database */
             sql_print_information("NDB: Discovered missing database '%s'", db);
             const int no_print_error[1]= {0};
-            name_len= my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
+            name_len= (unsigned)my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
             run_query(thd, name, name + name_len,
                       no_print_error);
             run_query(thd, query, query + query_length,
@@ -1591,12 +1591,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
     
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -1634,12 +1634,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
 
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2010,12 +2010,12 @@ int ndbcluster_log_schema_op(THD *thd,
       DBUG_ASSERT(r == 0);
       
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, (int)strlen(log_db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab,
-                       strlen(log_tab));
+                       (int)strlen(log_tab));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2251,6 +2251,7 @@ end:
   Handle _non_ data events from the storage nodes
 */
 
+static
 int
 ndb_handle_schema_change(THD *thd, Ndb *is_ndb, NdbEventOperation *pOp,
                          Ndb_event_data *event_data)
@@ -2416,6 +2417,23 @@ static void ndb_binlog_query(THD *thd, C
   thd->db= thd_db_save;
 }
 
+
+class Mutex_guard
+{
+public:
+  Mutex_guard(pthread_mutex_t &mutex) : m_mutex(mutex)
+  {
+    pthread_mutex_lock(&m_mutex);
+  };
+  ~Mutex_guard()
+  {
+    pthread_mutex_unlock(&m_mutex);
+  };
+private:
+  pthread_mutex_t &m_mutex;
+};
+
+
 static int
 ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *s_ndb,
                                       NdbEventOperation *pOp,
@@ -2469,7 +2487,14 @@ ndb_binlog_thread_handle_schema_event(TH
       }
 
       if ((schema->db[0] == 0) && (schema->name[0] == 0))
+      {
+        /**
+         * This happens if there is a schema event on a table (object)
+         *   that this mysqld does not know about.
+         *   E.g it had a local table shadowing a ndb table...
+         */
         DBUG_RETURN(0);
+      }
       switch (schema_type)
       {
       case SOT_CLEAR_SLOCK:
@@ -2480,17 +2505,15 @@ ndb_binlog_thread_handle_schema_event(TH
         */
         post_epoch_log_list->push_back(schema, mem_root);
         DBUG_RETURN(0);
+
       case SOT_ALTER_TABLE_COMMIT:
-        // fall through
       case SOT_RENAME_TABLE_PREPARE:
-        // fall through
       case SOT_ONLINE_ALTER_TABLE_PREPARE:
-        // fall through
       case SOT_ONLINE_ALTER_TABLE_COMMIT:
         post_epoch_log_list->push_back(schema, mem_root);
         post_epoch_unlock_list->push_back(schema, mem_root);
         DBUG_RETURN(0);
-        break;
+
       default:
         break;
       }
@@ -2498,34 +2521,12 @@ ndb_binlog_thread_handle_schema_event(TH
       if (schema->node_id != node_id)
       {
         int log_query= 0, post_epoch_unlock= 0;
-        char errmsg[MYSQL_ERRMSG_SIZE];
-
+ 
         switch (schema_type)
         {
         case SOT_RENAME_TABLE:
-          // fall through
         case SOT_RENAME_TABLE_NEW:
-        {
-          uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
-                                "NDB Binlog: Skipping renaming locally "
-                                "defined table '%s.%s' from binlog schema "
-                                "event '%s' from node %d. ",
-                                schema->db, schema->name, schema->query,
-                                schema->node_id);
-          errmsg[end]= '\0';
-        }
-        // fall through
         case SOT_DROP_TABLE:
-          if (schema_type == SOT_DROP_TABLE)
-          {
-            uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
-                                  "NDB Binlog: Skipping dropping locally "
-                                  "defined table '%s.%s' from binlog schema "
-                                  "event '%s' from node %d. ",
-                                  schema->db, schema->name, schema->query,
-                                  schema->node_id);
-            errmsg[end]= '\0';
-          }
           if (! ndbcluster_check_if_local_table(schema->db, schema->name))
           {
             thd_ndb_options.set(TNO_NO_LOCK_SCHEMA_OP);
@@ -2541,9 +2542,15 @@ ndb_binlog_thread_handle_schema_event(TH
           }
           else
           {
-            /* Tables exists as a local table, leave it */
-            DBUG_PRINT("info", ("%s", errmsg));
-            sql_print_error("%s", errmsg);
+            /* Tables exists as a local table, print error and leave it */
+            DBUG_PRINT("info", ("Found local table '%s.%s', leaving it",
+                                schema->db, schema->name));
+            sql_print_error("NDB Binlog: Skipping %sing locally "
+                            "defined table '%s.%s' from binlog schema "
+                            "event '%s' from node %d. ",
+                            (schema_type == SOT_DROP_TABLE ? "dropp" : "renam"),
+                            schema->db, schema->name, schema->query,
+                            schema->node_id);
             log_query= 1;
           }
           // Fall through
@@ -2601,6 +2608,7 @@ ndb_binlog_thread_handle_schema_event(TH
           }
           log_query= 1;
           break;
+
         case SOT_DROP_DB:
           /* Drop the database locally if it only contains ndb tables */
           thd_ndb_options.set(TNO_NO_LOCK_SCHEMA_OP);
@@ -2625,11 +2633,8 @@ ndb_binlog_thread_handle_schema_event(TH
             log_query= 1;
           }
           break;
+
         case SOT_CREATE_DB:
-          if (opt_ndb_extra_logging > 9)
-            sql_print_information("SOT_CREATE_DB %s", schema->db);
-          
-          /* fall through */
         case SOT_ALTER_DB:
         {
           thd_ndb_options.set(TNO_NO_LOCK_SCHEMA_OP);
@@ -2640,6 +2645,7 @@ ndb_binlog_thread_handle_schema_event(TH
           log_query= 1;
           break;
         }
+
         case SOT_CREATE_USER:
         case SOT_DROP_USER:
         case SOT_RENAME_USER:
@@ -2660,16 +2666,22 @@ ndb_binlog_thread_handle_schema_event(TH
           log_query= 1;
 	  break;
         }
+
         case SOT_TABLESPACE:
         case SOT_LOGFILE_GROUP:
           log_query= 1;
           break;
+
         case SOT_ALTER_TABLE_COMMIT:
         case SOT_RENAME_TABLE_PREPARE:
         case SOT_ONLINE_ALTER_TABLE_PREPARE:
         case SOT_ONLINE_ALTER_TABLE_COMMIT:
         case SOT_CLEAR_SLOCK:
+          // Impossible to come here, the above types has already
+          // been handled and caused the function to return 
           abort();
+          break;
+
         }
         if (log_query && ndb_binlog_running)
           ndb_binlog_query(thd, schema);
@@ -3049,7 +3061,7 @@ ndb_binlog_thread_handle_schema_event_po
             sql_print_information("NDB Binlog: handeling online alter/rename");
 
           pthread_mutex_lock(&share->mutex);
-          ndbcluster_binlog_close_table(thd, share);
+          ndb_binlog_close_shadow_table(thd, share);
 
           if ((error= ndb_binlog_open_shadow_table(thd, share)))
             sql_print_error("NDB Binlog: Failed to re-open shadow table %s.%s",
@@ -3295,7 +3307,7 @@ ndb_binlog_index_table__write_rows(THD *
 
     ndb_binlog_index->field[0]->store(first->master_log_pos, true);
     ndb_binlog_index->field[1]->store(first->master_log_file,
-                                      strlen(first->master_log_file),
+                                      (uint)strlen(first->master_log_file),
                                       &my_charset_bin);
     ndb_binlog_index->field[2]->store(epoch= first->epoch, true);
     if (ndb_binlog_index->s->fields > 7)
@@ -4046,7 +4058,7 @@ parse_conflict_fn_spec(const char* confl
   {
     const st_conflict_fn_def &fn= conflict_fns[i];
 
-    uint len= strlen(fn.name);
+    uint len= (uint)strlen(fn.name);
     if (strncmp(ptr, fn.name, len))
       continue;
 
@@ -4118,7 +4130,7 @@ parse_conflict_fn_spec(const char* confl
         }
       }
 
-      uint len= end_arg - start_arg;
+      uint len= (uint)(end_arg - start_arg);
       args[no_args].type=    type;
       args[no_args].ptr=     start_arg;
       args[no_args].len=     len;
@@ -4447,9 +4459,9 @@ ndbcluster_read_replication_table(THD *t
       DBUG_PRINT("info", ("reading[%u]: %s,%s,%u", i, db, table_name, id));
       if ((_op= trans->getNdbOperation(reptab)) == NULL) abort();
       if (_op->readTuple(NdbOperation::LM_CommittedRead)) abort();
-      ndb_pack_varchar(col_db, tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col_db, tmp_buf, db, (int)strlen(db));
       if (_op->equal(col_db->getColumnNo(), tmp_buf)) abort();
-      ndb_pack_varchar(col_table_name, tmp_buf, table_name, strlen(table_name));
+      ndb_pack_varchar(col_table_name, tmp_buf, table_name, (int)strlen(table_name));
       if (_op->equal(col_table_name->getColumnNo(), tmp_buf)) abort();
       if (_op->equal(col_server_id->getColumnNo(), id)) abort();
       if ((col_binlog_type_rec_attr[i]=
@@ -5205,7 +5217,7 @@ ndbcluster_create_event_ops(THD *thd, ND
   Ndb_event_data *event_data= share->event_data;
   int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0;
 #ifdef HAVE_NDB_BINLOG
-  uint len= strlen(share->table_name);
+  uint len= (int)strlen(share->table_name);
 #endif
   if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 &&
       strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
@@ -5709,8 +5721,7 @@ static void ndb_unpack_record(TABLE *tab
   Handle error states on events from the storage nodes
 */
 static int
-ndb_binlog_thread_handle_error(Ndb *ndb,
-                               NdbEventOperation *pOp)
+ndb_binlog_thread_handle_error(NdbEventOperation *pOp)
 {
   Ndb_event_data *event_data= (Ndb_event_data *) pOp->getCustomData();
   NDB_SHARE *share= event_data->share;
@@ -5812,13 +5823,13 @@ ndb_binlog_thread_handle_non_data_event(
                         share->key, (long) share, (long) pOp,
                         (long) share->op, (long) share->new_op));
     break;
+
   case NDBEVENT::TE_NODE_FAILURE:
-    /* fall through */
   case NDBEVENT::TE_SUBSCRIBE:
-    /* fall through */
   case NDBEVENT::TE_UNSUBSCRIBE:
     /* ignore */
     return 0;
+
   default:
     sql_print_error("NDB Binlog: unknown non data event %d for %s. "
                     "Ignoring...", (unsigned) type, share->key);
@@ -5930,10 +5941,10 @@ ndb_binlog_thread_handle_data_event(THD*
       switch(pOp->getEventType())
       {
       case NDBEVENT::TE_INSERT:
-        // fall through
       case NDBEVENT::TE_UPDATE:
         event_has_data = true;
         break;
+
       case NDBEVENT::TE_DELETE:
         break;
       default:
@@ -6579,7 +6590,7 @@ restart_cluster_failure:
     {
       LOG_INFO log_info;
       mysql_bin_log.get_current_log(&log_info);
-      int len=  strlen(log_info.log_file_name);
+      int len=  (uint)strlen(log_info.log_file_name);
       uint no= 0;
       if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
           no == 1)
@@ -7088,7 +7099,7 @@ restart_cluster_failure:
           event_count++;
 #endif
           if (pOp->hasError() &&
-              ndb_binlog_thread_handle_error(i_ndb, pOp) < 0)
+              ndb_binlog_thread_handle_error(pOp) < 0)
             goto err;
 
 #ifndef DBUG_OFF
@@ -7438,7 +7449,7 @@ ndbcluster_show_status_binlog(THD* thd, 
     ndb_latest_epoch= injector_ndb->getLatestGCI();
     pthread_mutex_unlock(&injector_mutex);
 
-    buflen=
+    buflen= (uint)
       my_snprintf(buf, sizeof(buf),
                   "latest_epoch=%s, "
                   "latest_trans_epoch=%s, "
@@ -7451,7 +7462,7 @@ ndbcluster_show_status_binlog(THD* thd, 
                   llstr(ndb_latest_handled_binlog_epoch, buff4),
                   llstr(ndb_latest_applied_binlog_epoch, buff5));
     if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                   "binlog", strlen("binlog"),
+                   "binlog", (uint)strlen("binlog"),
                    buf, buflen))
       DBUG_RETURN(TRUE);
   }

=== modified file 'sql/ha_ndbcluster_binlog.h'
--- a/sql/ha_ndbcluster_binlog.h	2011-10-05 07:24:39 +0000
+++ b/sql/ha_ndbcluster_binlog.h	2011-10-27 12:19:57 +0000
@@ -101,23 +101,6 @@ static const char *ha_ndb_ext=".ndb";
 const uint error_conflict_fn_violation= 9999;
 #endif /* HAVE_NDB_BINLOG */
 
-
-class Mutex_guard
-{
-public:
-  Mutex_guard(pthread_mutex_t &mutex) : m_mutex(mutex)
-  {
-    pthread_mutex_lock(&m_mutex);
-  };
-  ~Mutex_guard()
-  {
-    pthread_mutex_unlock(&m_mutex);
-  };
-private:
-  pthread_mutex_t &m_mutex;
-};
-
-
 extern Ndb_cluster_connection* g_ndb_cluster_connection;
 
 extern unsigned char g_node_id_map[max_ndb_nodes];
@@ -228,7 +211,7 @@ ndbcluster_show_status_binlog(THD* thd, 
   the ndb binlog code
 */
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length);
+            size_t pack_length);
 int ndbcluster_find_all_files(THD *thd);
 
 char *ndb_pack_varchar(const NDBCOL *col, char *buf,

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-10-05 07:24:39 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-10-27 12:19:57 +0000
@@ -303,4 +303,120 @@ void ndb_get_connection_stats(Uint64* st
   }
 }
 
+static ST_FIELD_INFO ndb_transid_mysql_connection_map_fields_info[] =
+{
+  {
+    "mysql_connection_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  {
+    "node_id",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+  {
+    "ndb_transid",
+    MY_INT64_NUM_DECIMAL_DIGITS,
+    MYSQL_TYPE_LONGLONG,
+    0,
+    MY_I_S_UNSIGNED,
+    "",
+    SKIP_OPEN_TABLE
+  },
+
+  { 0, 0, MYSQL_TYPE_NULL, 0, 0, "", SKIP_OPEN_TABLE }
+};
+
+#include <mysql/innodb_priv.h>
+
+static
+int
+ndb_transid_mysql_connection_map_fill_table(THD* thd, TABLE_LIST* tables, Item* cond)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
+
+  const bool all = check_global_access(thd, PROCESS_ACL);
+  const ulonglong self = thd_get_thread_id(thd);
+
+  TABLE* table= tables->table;
+  for (uint i = 0; i<g_pool_alloc; i++)
+  {
+    if (g_pool[i])
+    {
+      g_pool[i]->lock_ndb_objects();
+      const Ndb * p = g_pool[i]->get_next_ndb_object(0);
+      while (p)
+      {
+        Uint64 connection_id = p->getCustomData64();
+        if ((connection_id == self) || all)
+        {
+          table->field[0]->set_notnull();
+          table->field[0]->store(p->getCustomData64(), true);
+          table->field[1]->set_notnull();
+          table->field[1]->store(g_pool[i]->node_id());
+          table->field[2]->set_notnull();
+          table->field[2]->store(p->getNextTransactionId(), true);
+          schema_table_store_record(thd, table);
+        }
+        p = g_pool[i]->get_next_ndb_object(p);
+      }
+      g_pool[i]->unlock_ndb_objects();
+    }
+  }
+
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_init(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_init");
+  ST_SCHEMA_TABLE* schema = reinterpret_cast<ST_SCHEMA_TABLE*>(p);
+  schema->fields_info = ndb_transid_mysql_connection_map_fields_info;
+  schema->fill_table = ndb_transid_mysql_connection_map_fill_table;
+  DBUG_RETURN(0);
+}
+
+static
+int
+ndb_transid_mysql_connection_map_deinit(void *p)
+{
+  DBUG_ENTER("ndb_transid_mysql_connection_map_deinit");
+  DBUG_RETURN(0);
+}
+
+#include <mysql/plugin.h>
+static struct st_mysql_information_schema i_s_info =
+{
+  MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin i_s_ndb_transid_mysql_connection_map_plugin =
+{
+  MYSQL_INFORMATION_SCHEMA_PLUGIN,
+  &i_s_info,
+  "ndb_transid_mysql_connection_map",
+  "Oracle Corporation",
+  "Map between mysql connection id and ndb transaction id",
+  PLUGIN_LICENSE_GPL,
+  ndb_transid_mysql_connection_map_init,
+  ndb_transid_mysql_connection_map_deinit,
+  0x0001,
+  NULL,
+  NULL,
+  NULL,
+  0
+};
+
 #endif /* WITH_NDBCLUSTER_STORAGE_ENGINE */

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2011-09-07 10:08:09 +0000
+++ b/sql/ha_ndbinfo.cc	2011-10-27 12:19:57 +0000
@@ -246,7 +246,7 @@ bool ha_ndbinfo::get_error_message(int e
   if (!message)
     DBUG_RETURN(false);
 
-  buf->set(message, strlen(message), &my_charset_bin);
+  buf->set(message, (uint32)strlen(message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s", buf->ptr()));
   DBUG_RETURN(false);
 }
@@ -737,7 +737,9 @@ ndbinfo_find_files(handlerton *hton, THD
 
 handlerton* ndbinfo_hton;
 
-int ndbinfo_init(void *plugin)
+static
+int
+ndbinfo_init(void *plugin)
 {
   DBUG_ENTER("ndbinfo_init");
 
@@ -782,7 +784,9 @@ int ndbinfo_init(void *plugin)
   DBUG_RETURN(0);
 }
 
-int ndbinfo_deinit(void *plugin)
+static
+int
+ndbinfo_deinit(void *plugin)
 {
   DBUG_ENTER("ndbinfo_deinit");
 
@@ -807,6 +811,28 @@ struct st_mysql_sys_var* ndbinfo_system_
   NULL
 };
 
+struct st_mysql_storage_engine ndbinfo_storage_engine=
+{
+  MYSQL_HANDLERTON_INTERFACE_VERSION
+};
+
+struct st_mysql_plugin ndbinfo_plugin =
+{
+  MYSQL_STORAGE_ENGINE_PLUGIN,
+  &ndbinfo_storage_engine,
+  "ndbinfo",
+  "Sun Microsystems Inc.",
+  "MySQL Cluster system information storage engine",
+  PLUGIN_LICENSE_GPL,
+  ndbinfo_init,               /* plugin init */
+  ndbinfo_deinit,             /* plugin deinit */
+  0x0001,                     /* plugin version */
+  NULL,                       /* status variables */
+  ndbinfo_system_variables,   /* system variables */
+  NULL,                       /* config options */
+  0
+};
+
 template class Vector<const NdbInfoRecAttr*>;
 
 #endif

=== modified file 'sql/ha_ndbinfo.h'
--- a/sql/ha_ndbinfo.h	2011-07-05 12:46:07 +0000
+++ b/sql/ha_ndbinfo.h	2011-10-17 18:13:57 +0000
@@ -21,9 +21,6 @@
 
 #include <mysql/plugin.h>
 
-int ndbinfo_init(void *plugin);
-int ndbinfo_deinit(void *plugin);
-
 class ha_ndbinfo: public handler
 {
 public:

=== modified file 'sql/ndb_local_connection.cc'
--- a/sql/ndb_local_connection.cc	2011-09-06 12:43:05 +0000
+++ b/sql/ndb_local_connection.cc	2011-10-27 12:19:57 +0000
@@ -192,11 +192,11 @@ Ndb_local_connection::truncate_table(con
   DBUG_PRINT("enter", ("db: '%s', table: '%s'", db, table));
 
   // Create the SQL string
-  String sql_text(db_length + table_length + 100);
+  String sql_text((uint32)(db_length + table_length + 100));
   sql_text.append(STRING_WITH_LEN("TRUNCATE TABLE "));
-  sql_text.append(db, db_length);
+  sql_text.append(db, (uint32)db_length);
   sql_text.append(STRING_WITH_LEN("."));
-  sql_text.append(table, table_length);
+  sql_text.append(table, (uint32)table_length);
 
   // Setup list of errors to ignore
   uint ignore_mysql_errors[2] = {0, 0};
@@ -217,11 +217,11 @@ Ndb_local_connection::flush_table(const 
   DBUG_PRINT("enter", ("db: '%s', table: '%s'", db, table));
 
   // Create the SQL string
-  String sql_text(db_length + table_length + 100);
+  String sql_text((uint32)(db_length + table_length + 100));
   sql_text.append(STRING_WITH_LEN("FLUSH TABLES "));
-  sql_text.append(db, db_length);
+  sql_text.append(db, (uint32)db_length);
   sql_text.append(STRING_WITH_LEN("."));
-  sql_text.append(table, table_length);
+  sql_text.append(table, (uint32)table_length);
 
   DBUG_RETURN(execute_query_iso(sql_text.lex_string(),
                                 NULL,
@@ -239,11 +239,11 @@ Ndb_local_connection::delete_rows(const 
   DBUG_PRINT("enter", ("db: '%s', table: '%s'", db, table));
 
   // Create the SQL string
-  String sql_text(db_length + table_length + 100);
+  String sql_text((uint32)(db_length + table_length + 100));
   sql_text.append(STRING_WITH_LEN("DELETE FROM "));
-  sql_text.append(db, db_length);
+  sql_text.append(db, (uint32)db_length);
   sql_text.append(STRING_WITH_LEN("."));
-  sql_text.append(table, table_length);
+  sql_text.append(table, (uint32)table_length);
   sql_text.append(" WHERE ");
 
   va_list args;
@@ -376,9 +376,9 @@ Ndb_local_connection::create_sys_table(c
 
   if (create_if_not_exists)
     sql_text.append(STRING_WITH_LEN("IF NOT EXISTS "));
-  sql_text.append(db, db_length);
+  sql_text.append(db, (uint32)db_length);
   sql_text.append(STRING_WITH_LEN("."));
-  sql_text.append(table, table_length);
+  sql_text.append(table, (uint32)table_length);
 
   sql_text.append(STRING_WITH_LEN(" ( "));
   sql_text.append(create_definitions);

=== modified file 'sql/ndb_thd_ndb.cc'
--- a/sql/ndb_thd_ndb.cc	2011-03-08 22:08:44 +0000
+++ b/sql/ndb_thd_ndb.cc	2011-10-17 18:13:57 +0000
@@ -43,6 +43,10 @@ Thd_ndb::seize(THD* thd)
     delete thd_ndb;
     thd_ndb= NULL;
   }
+  else
+  {
+    thd_ndb->ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(thd_ndb);
 }
 
@@ -81,6 +85,10 @@ Thd_ndb::recycle_ndb(THD* thd)
                          ndb->getNdbError().message));
     DBUG_RETURN(false);
   }
+  else
+  {
+    ndb->setCustomData64(thd_get_thread_id(thd));
+  }
   DBUG_RETURN(true);
 }
 

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-10-17 13:30:56 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-10-27 12:19:57 +0000
@@ -82,7 +82,7 @@ SET(NDBCLUSTER_SOURCES
   ../../sql/ndb_conflict_trans.cc
 )
 
-# Inlude directories used when building ha_ndbcluster
+# Include directories used when building ha_ndbcluster
 INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/ndb/include)
 
 IF(NOT MYSQL_CLUSTER_VERSION)
@@ -186,7 +186,7 @@ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} 
 # Check for Java and JDK needed by ndbjtie and clusterj
 INCLUDE(FindJava)
 INCLUDE(FindJNI)
-INCLUDE("${CMAKE_SOURCE_DIR}/storage/ndb/config/type_JAVA.cmake")
+INCLUDE("${NDB_SOURCE_DIR}/config/type_JAVA.cmake")
 
 IF(JAVA_COMPILE AND JAVA_ARCHIVE)
   MESSAGE(STATUS "Found Java")

=== modified file 'storage/ndb/VERSION'
--- a/storage/ndb/VERSION	2011-10-05 07:24:39 +0000
+++ b/storage/ndb/VERSION	2011-10-27 12:19:57 +0000
@@ -4,3 +4,6 @@ NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=2
 NDB_VERSION_BUILD=2
 NDB_VERSION_STATUS=""
+NDB_SHARED_LIB_VERSION_MAJOR=6
+NDB_SHARED_LIB_VERSION_MINOR=0
+NDB_SHARED_LIB_VERSION_BUILD=0

=== modified file 'storage/ndb/clusterj/CMakeLists.txt'
--- a/storage/ndb/clusterj/CMakeLists.txt	2011-09-06 14:49:02 +0000
+++ b/storage/ndb/clusterj/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -61,3 +61,6 @@ CREATE_JAR(clusterj
   MERGE_JARS ${JARS}
   MANIFEST ${CMAKE_CURRENT_SOURCE_DIR}/clusterj_manifest.mf
   DEPENDENCIES ${DEP} )
+
+INSTALL(FILES clusterj-${MYSQL_CLUSTER_VERSION}.jar
+        DESTINATION ${INSTALL_MYSQLSHAREDIR}/java COMPONENT ClusterJ)

=== modified file 'storage/ndb/clusterj/clusterj-api/CMakeLists.txt'
--- a/storage/ndb/clusterj/clusterj-api/CMakeLists.txt	2011-07-31 10:04:20 +0000
+++ b/storage/ndb/clusterj/clusterj-api/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -30,3 +30,6 @@ CREATE_MANIFEST(manifest.mf ${CLUSTERJ_A
 CREATE_JAR(clusterj-api ${JAVA_SOURCES}
   CLASSPATH target/classes
   MANIFEST ${CMAKE_CURRENT_SOURCE_DIR}/manifest.mf)
+
+INSTALL(FILES clusterj-api-${MYSQL_CLUSTER_VERSION}.jar
+        DESTINATION ${INSTALL_MYSQLSHAREDIR}/java COMPONENT ClusterJ)

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-08-29 08:17:26 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-10-22 00:40:34 +0000
@@ -174,12 +174,9 @@ public class SessionImpl implements Sess
      */
     public <T> T find(Class<T> cls, Object key) {
         DomainTypeHandler<T> domainTypeHandler = getDomainTypeHandler(cls);
-        T instance = (T) factory.newInstance(cls, dictionary);
         ValueHandler keyHandler = domainTypeHandler.createKeyValueHandler(key);
-        ValueHandler instanceHandler = domainTypeHandler.getValueHandler(instance);
         // initialize from the database using the key
-        return (T) initializeFromDatabase(
-                domainTypeHandler, instance, instanceHandler, keyHandler);
+        return initializeFromDatabase(domainTypeHandler, null, null, keyHandler);
     }
 
     /** Initialize fields from the database. The keyHandler must

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java	2011-10-20 19:41:56 +0000
@@ -61,11 +61,13 @@ public class AndPredicateImpl extends Pr
         }
     }
 
+    @Override
     public Predicate or(Predicate predicate) {
         throw new UnsupportedOperationException(
                 local.message("ERR_NotImplemented"));
     }
 
+    @Override
     public Predicate not() {
         throw new UnsupportedOperationException(
                 local.message("ERR_NotImplemented"));
@@ -109,6 +111,7 @@ public class AndPredicateImpl extends Pr
     /** Set the keys into the operation for each predicate.
      * Each predicate must be an equal predicate for a primary or unique key.
      */
+    @Override
     public void operationEqual(QueryExecutionContext context,
             Operation op) {
         for (PredicateImpl predicate: predicates) {
@@ -120,17 +123,6 @@ public class AndPredicateImpl extends Pr
         }
     }
 
-    /** Get the best index for the operation. Delegate to the method
-     * in the superclass, passing the array of predicates.
-     *
-     * @return the best index
-     */
-    @Override
-    public CandidateIndexImpl getBestCandidateIndex(QueryExecutionContext context) {
-        return getBestCandidateIndexFor(context, predicates.toArray(
-                new PredicateImpl[predicates.size()]));
-    }
-
     /** Get the number of conditions in the top level predicate.
      * This is used to determine whether a hash index can be used. If there
      * are exactly the number of conditions as index columns, then the
@@ -144,4 +136,14 @@ public class AndPredicateImpl extends Pr
     protected int getNumberOfConditionsInPredicate() {
         return predicates.size();
     }
+
+    /** Return an array of top level predicates that might be used with indices.
+     * 
+     * @return an array of top level predicates (defaults to {this}).
+     */
+    @Override
+    protected PredicateImpl[] getTopLevelPredicates() {
+        return predicates.toArray(new PredicateImpl[predicates.size()]);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -46,11 +46,13 @@ public class BetweenPredicateImpl extend
         upper.setProperty(property);
     }
 
+    @Override
     public void markParameters() {
         lower.mark();
         upper.mark();
     }
 
+    @Override
     public void unmarkParameters() {
         lower.unmark();
         upper.unmark();
@@ -66,19 +68,36 @@ public class BetweenPredicateImpl extend
         property.markUpperBound(candidateIndices, this, false);
     }
 
+    @Override
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markLowerBound(candidateIndices, this, false);
+        property.markUpperBound(candidateIndices, this, false);
+    }
+
     /** Set the upper and lower bounds for the operation.
      * Delegate to the property to actually call the setBounds for each
      * of upper and lower bound.
      * @param context the query context that contains the parameter values
      * @param op the index scan operation on which to set bounds
+     * @return an indicator of which bound(s) were actually set
      */
     @Override
-    public void operationSetBounds(QueryExecutionContext context,
+    public int operationSetBounds(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(lower.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundLE, op);
-        property.operationSetBounds(upper.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundGE, op);
+        int result = NO_BOUND_SET;
+        Object lowerValue = lower.getParameterValue(context);
+        Object upperValue = upper.getParameterValue(context);
+        if (lowerValue != null) {
+            property.operationSetBounds(lowerValue,
+                    IndexScanOperation.BoundType.BoundLE, op);
+            result |= LOWER_BOUND_SET;
+        }
+        if (upperValue != null) {
+            property.operationSetBounds(upperValue,
+                    IndexScanOperation.BoundType.BoundGE, op);
+            result |= UPPER_BOUND_SET;
+        }
+        return result;
     }
 
     /** Set the upper bound for the operation.
@@ -88,10 +107,15 @@ public class BetweenPredicateImpl extend
      * @param op the index scan operation on which to set bounds
      */
     @Override
-    public void operationSetUpperBound(QueryExecutionContext context,
+    public int operationSetUpperBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(upper.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundGE, op);
+        Object upperValue = upper.getParameterValue(context);
+        if (upperValue != null) {
+            property.operationSetBounds(upperValue,
+                    IndexScanOperation.BoundType.BoundGE, op);
+            return UPPER_BOUND_SET;
+        }
+        return NO_BOUND_SET;
     }
 
     /** Set the lower bound for the operation.
@@ -101,10 +125,15 @@ public class BetweenPredicateImpl extend
      * @param op the index scan operation on which to set bounds
      */
     @Override
-    public void operationSetLowerBound(QueryExecutionContext context,
+    public int operationSetLowerBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(lower.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundLE, op);
+        Object lowerValue = lower.getParameterValue(context);
+        if (lowerValue != null) {
+            property.operationSetBounds(lowerValue,
+                    IndexScanOperation.BoundType.BoundLE, op);
+            return LOWER_BOUND_SET;
+        }
+        return NO_BOUND_SET;
     }
 
     /** Create a filter for the operation. Set the condition into the
@@ -140,4 +169,9 @@ public class BetweenPredicateImpl extend
                 ScanFilter.BinaryCondition.COND_LE, filter);
     }
 
+    @Override 
+    public boolean isUsable(QueryExecutionContext context) {
+        return !(lower.getParameterValue(context) == null || upper.getParameterValue(context) == null);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java	2011-10-18 22:54:36 +0000
@@ -46,7 +46,7 @@ import java.util.List;
  * one for each index containing the column referenced by the query term.
  * 
  */
-public class CandidateIndexImpl {
+public final class CandidateIndexImpl {
 
     /** My message translator */
     static final I18NHelper local = I18NHelper.getInstance(CandidateIndexImpl.class);
@@ -63,6 +63,7 @@ public class CandidateIndexImpl {
     private CandidateColumnImpl[] candidateColumns = null;
     private ScanType scanType = PredicateImpl.ScanType.TABLE_SCAN;
     private int fieldScore = 1;
+    protected int score = 0;
 
     public CandidateIndexImpl(
             String className, Index storeIndex, boolean unique, AbstractDomainFieldHandlerImpl[] fields) {
@@ -114,7 +115,7 @@ public class CandidateIndexImpl {
 
     @Override
     public String toString() {
-        StringBuffer buffer = new StringBuffer();
+        StringBuilder buffer = new StringBuilder();
         buffer.append("CandidateIndexImpl for class: ");
         buffer.append(className);
         buffer.append(" index: ");
@@ -174,13 +175,12 @@ public class CandidateIndexImpl {
      * The last query term (candidate column) for each of the lower and upper bound is noted.
      * The method is synchronized because the method modifies the state of the instance,
      * which might be shared by multiple threads.
-     * @return the score of this index.
      */
-    synchronized int getScore() {
+    synchronized void score() {
+        score = 0;
         if (candidateColumns == null) {
-            return 0;
+            return;
         }
-        int result = 0;
         boolean lowerBoundDone = false;
         boolean upperBoundDone = false;
         if (unique) {
@@ -188,7 +188,7 @@ public class CandidateIndexImpl {
             for (CandidateColumnImpl column: candidateColumns) {
                 if (!(column.equalBound)) {
                     // not equal bound; can't use unique index
-                    return result;
+                    return;
                 }
             }
             if ("PRIMARY".equals(indexName)) {
@@ -196,7 +196,8 @@ public class CandidateIndexImpl {
             } else {
                 scanType = PredicateImpl.ScanType.UNIQUE_KEY;
             }
-            return 100;
+            score = 100;
+            return;
         } else {
             // range index
             // leading columns need any kind of bound
@@ -205,22 +206,22 @@ public class CandidateIndexImpl {
                 if ((candidateColumn.equalBound)) {
                     scanType = PredicateImpl.ScanType.INDEX_SCAN;
                     if (!lowerBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastLowerBoundColumn = candidateColumn;
                     }
                     if (!upperBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastUpperBoundColumn = candidateColumn;
                     }
                 } else if ((candidateColumn.inBound)) {
                     scanType = PredicateImpl.ScanType.INDEX_SCAN;
                     multiRange = true;
                     if (!lowerBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastLowerBoundColumn = candidateColumn;
                     }
                     if (!upperBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastUpperBoundColumn = candidateColumn;
                     }
                 } else if (!(lowerBoundDone && upperBoundDone)) {
@@ -233,7 +234,7 @@ public class CandidateIndexImpl {
                     }
                     if (!lowerBoundDone) {
                         if (hasLowerBound) {
-                            result += fieldScore;
+                            score += fieldScore;
                             lastLowerBoundColumn = candidateColumn;
                         } else {
                             lowerBoundDone = true;
@@ -241,7 +242,7 @@ public class CandidateIndexImpl {
                     }
                     if (!upperBoundDone) {
                         if (hasUpperBound) {
-                            result += fieldScore;
+                            score += fieldScore;
                             lastUpperBoundColumn = candidateColumn;
                         } else {
                             upperBoundDone = true;
@@ -259,7 +260,7 @@ public class CandidateIndexImpl {
                 lastUpperBoundColumn.markLastUpperBoundColumn();
             }
         }
-        return result;
+        return;
     }
 
     public ScanType getScanType() {
@@ -350,6 +351,7 @@ public class CandidateIndexImpl {
     class CandidateColumnImpl {
 
         protected AbstractDomainFieldHandlerImpl domainFieldHandler;
+        protected PredicateImpl predicate;
         protected PredicateImpl lowerBoundPredicate;
         protected PredicateImpl upperBoundPredicate;
         protected PredicateImpl equalPredicate;
@@ -375,7 +377,6 @@ public class CandidateIndexImpl {
         }
 
         public int getParameterSize(QueryExecutionContext context) {
-            // TODO Auto-generated method stub
             return inPredicate.getParameterSize(context);
         }
 
@@ -402,21 +403,25 @@ public class CandidateIndexImpl {
         private void markLowerBound(PredicateImpl predicate, boolean strict) {
             lowerBoundStrict = strict;
             this.lowerBoundPredicate = predicate;
+            this.predicate = predicate;
         }
 
         private void markUpperBound(PredicateImpl predicate, boolean strict) {
             upperBoundStrict = strict;
             this.upperBoundPredicate = predicate;
+            this.predicate = predicate;
         }
 
         private void markEqualBound(PredicateImpl predicate) {
             equalBound = true;
             this.equalPredicate = predicate;
+            this.predicate = predicate;
         }
 
         public void markInBound(InPredicateImpl predicate) {
             inBound = true;
             this.inPredicate = predicate;
+            this.predicate = predicate;
         }
 
         /** Set bounds into each predicate that has been defined.
@@ -428,6 +433,8 @@ public class CandidateIndexImpl {
         private int operationSetBounds(
                 QueryExecutionContext context, IndexScanOperation op, int index, int boundStatus) {
 
+            int boundSet = PredicateImpl.NO_BOUND_SET;
+
             if (logger.isDetailEnabled()) logger.detail("column: " + domainFieldHandler.getName() 
                     + " boundStatus: " + boundStatus
                     + " lastLowerBoundColumn: " + lastLowerBoundColumn
@@ -439,51 +446,53 @@ public class CandidateIndexImpl {
                 case BOUND_STATUS_NO_BOUND_DONE:
                     // can set either/both lower or upper bound
                     if (equalPredicate != null) {
-                        equalPredicate.operationSetBounds(context, op, true);
+                        boundSet |= equalPredicate.operationSetBounds(context, op, true);
                     }
                     if (inPredicate != null) {
-                        inPredicate.operationSetBound(context, op, index, true);
+                        boundSet |= inPredicate.operationSetBound(context, op, index, true);
                     }
                     if (lowerBoundPredicate != null) {
-                        lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
+                        boundSet |= lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
                     }
                     if (upperBoundPredicate != null) {
-                        upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
+                        boundSet |= upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
                     }
                     break;
                 case BOUND_STATUS_LOWER_BOUND_DONE:
                     // cannot set lower, only upper bound
                     if (equalPredicate != null) {
-                        equalPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
+                        boundSet |= equalPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
                     }
                     if (inPredicate != null) {
-                        inPredicate.operationSetUpperBound(context, op, index);
+                        boundSet |= inPredicate.operationSetUpperBound(context, op, index);
                     }
                     if (upperBoundPredicate != null) {
-                        upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
+                        boundSet |= upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
                     }
                     break;
                 case BOUND_STATUS_UPPER_BOUND_DONE:
                     // cannot set upper, only lower bound
                     if (equalPredicate != null) {
-                        equalPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
+                        boundSet |= equalPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
                     }
                     if (inPredicate != null) {
-                        inPredicate.operationSetLowerBound(context, op, index);
+                        boundSet |= inPredicate.operationSetLowerBound(context, op, index);
                     }
                     if (lowerBoundPredicate != null) {
-                        lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
+                        boundSet |= lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
                     }
                     break;
             }
-            if (!hasLowerBound()) {
-                // if this has no lower bound, set lower bound done
+            if (0 == (boundSet & PredicateImpl.LOWER_BOUND_SET)) {
+                // didn't set lower bound
                 boundStatus |= BOUND_STATUS_LOWER_BOUND_DONE;
             }
-            if (!hasUpperBound()) {
-                // if this has no upper bound, set upper bound done
+                
+            if (0 == (boundSet & PredicateImpl.UPPER_BOUND_SET)) {
+                // didn't set upper bound
                 boundStatus |= BOUND_STATUS_UPPER_BOUND_DONE;
             }
+                
             return boundStatus;
         }
 
@@ -511,8 +520,33 @@ public class CandidateIndexImpl {
         return storeIndex;
     }
 
+    public int getScore() {
+        return score;
+    }
+
     public boolean isMultiRange() {
         return multiRange;
     }
 
+    public boolean isUnique() {
+        return unique;
+    }
+
+    /** Is this index usable in the current context?
+     * If a primary or unique index, all parameters must be non-null.
+     * If a btree index, the parameter for the first comparison must be non-null
+     * @param context the query execution context
+     * @return true if all relevant parameters in the context are non-null
+     */
+    public boolean isUsable(QueryExecutionContext context) {
+        if (unique) {
+            return context.hasNoNullParameters();
+        } else {
+            // the first parameter must not be null
+            CandidateColumnImpl candidateColumn = candidateColumns[0];
+            PredicateImpl predicate = candidateColumn.predicate;
+            return predicate.isUsable(context);
+        }
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -46,10 +46,12 @@ public abstract class ComparativePredica
         param.setProperty(property);
     }
 
+    @Override
     public void markParameters() {
         param.mark();
     }
 
+    @Override
     public void unmarkParameters() {
         param.unmark();
     }
@@ -61,17 +63,27 @@ public abstract class ComparativePredica
     }
 
     @Override
-    public void operationSetLowerBound(QueryExecutionContext context,
+    public int operationSetLowerBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         // delegate to setBounds for most operations
-        operationSetBounds(context, op, lastColumn);
+        return operationSetBounds(context, op, lastColumn);
     }
 
     @Override
-    public void operationSetUpperBound(QueryExecutionContext context,
+    public int operationSetUpperBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         // delegate to setBounds for most operations
-        operationSetBounds(context, op, lastColumn);
+        return operationSetBounds(context, op, lastColumn);
+    }
+
+    @Override
+    public ParameterImpl getParameter() {
+        return param;
+    }
+
+    @Override 
+    public boolean isUsable(QueryExecutionContext context) {
+        return param.getParameterValue(context) != null;
     }
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -40,21 +40,44 @@ public class EqualPredicateImpl extends 
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markEqualBound(candidateIndices, this);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
         // can always set boundEQ
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundEQ, op);
+        Object value = param.getParameterValue(context);
+        if (value != null) {
+            property.operationSetBounds(value, IndexScanOperation.BoundType.BoundEQ, op);
+            return BOTH_BOUNDS_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     @Override
-    public void operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+    public int operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
         // only set lower bound
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLE, op);
+        Object value = param.getParameterValue(context);
+        if (value != null) {
+            property.operationSetBounds(value, IndexScanOperation.BoundType.BoundLE, op);
+            return LOWER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     @Override
-    public void operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+    public int operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
         // only set upper bound
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+        Object value = param.getParameterValue(context);
+        if (value != null) {
+            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+            return UPPER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     @Override

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,8 +39,19 @@ public class GreaterEqualPredicateImpl e
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLE, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markLowerBound(candidateIndices, this, false);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object lowerBound = param.getParameterValue(context);
+        if (lowerBound != null) {
+            property.operationSetBounds(lowerBound, IndexScanOperation.BoundType.BoundLE, op);
+            return LOWER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     /** Set the condition into the filter.

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,13 +39,24 @@ public class GreaterThanPredicateImpl ex
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        if (lastColumn) {
-            // last column may be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLT, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markLowerBound(candidateIndices, this, true);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object lowerValue = param.getParameterValue(context);
+        if (lowerValue != null) {
+            if (lastColumn) {
+                // last column may be strict
+                property.operationSetBounds(lowerValue, IndexScanOperation.BoundType.BoundLT, op);
+            } else {
+                // not-last column must not be strict
+                property.operationSetBounds(lowerValue, IndexScanOperation.BoundType.BoundLE, op);
+            }
+            return LOWER_BOUND_SET;
         } else {
-            // not-last column must not be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLE, op);
+            return NO_BOUND_SET;
         }
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -58,7 +58,7 @@ public class InPredicateImpl extends Pre
     }
 
     @Override
-    void markBoundsForCandidateIndices(QueryExecutionContext context,
+    public void markBoundsForCandidateIndices(QueryExecutionContext context,
             CandidateIndexImpl[] candidateIndices) {
         if (parameter.getParameterValue(context) == null) {
             // null parameters cannot be used with index scans
@@ -67,6 +67,11 @@ public class InPredicateImpl extends Pre
         property.markInBound(candidateIndices, this);
     }
 
+    @Override
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markInBound(candidateIndices, this);
+    }
+
     /** Set bound for the multi-valued parameter identified by the index.
      * 
      * @param context the query execution context
@@ -74,27 +79,27 @@ public class InPredicateImpl extends Pre
      * @param index the index into the parameter list
      * @param lastColumn if true, can set strict bound
      */
-    public void operationSetBound(
+    public int operationSetBound(
             QueryExecutionContext context, IndexScanOperation op, int index, boolean lastColumn) {
         if (lastColumn) {
             // last column can be strict
-            operationSetBound(context, op, index, BoundType.BoundEQ);
+            return operationSetBound(context, op, index, BoundType.BoundEQ);
         } else {
             // not last column cannot be strict
-            operationSetBound(context, op, index, BoundType.BoundLE);
-            operationSetBound(context, op, index, BoundType.BoundGE);
+            return operationSetBound(context, op, index, BoundType.BoundLE) +
+                    operationSetBound(context, op, index, BoundType.BoundGE);
         }
     }
 
-    public void operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, int index) {
-        operationSetBound(context, op, index, BoundType.BoundGE);
+    public int operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, int index) {
+        return operationSetBound(context, op, index, BoundType.BoundGE);
     }
 
-    public void operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, int index) {
-        operationSetBound(context, op, index, BoundType.BoundLE);
+    public int operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, int index) {
+        return operationSetBound(context, op, index, BoundType.BoundLE);
     }
 
-    private void operationSetBound(
+    private int operationSetBound(
             QueryExecutionContext context, IndexScanOperation op, int index, BoundType boundType) {
     Object parameterValue = parameter.getParameterValue(context);
         if (parameterValue == null) {
@@ -103,8 +108,8 @@ public class InPredicateImpl extends Pre
         } else if (parameterValue instanceof List<?>) {
             List<?> parameterList = (List<?>)parameterValue;
             Object value = parameterList.get(index);
-            property.operationSetBounds(value, boundType, op);
             if (logger.isDetailEnabled()) logger.detail("InPredicateImpl.operationSetBound for " + property.fmd.getName() + " List index: " + index + " value: " + value + " boundType: " + boundType);
+            property.operationSetBounds(value, boundType, op);
         } else if (parameterValue.getClass().isArray()) {
             Object[] parameterArray = (Object[])parameterValue;
             Object value = parameterArray[index];
@@ -115,6 +120,7 @@ public class InPredicateImpl extends Pre
                     local.message("ERR_Parameter_Wrong_Type", parameter.parameterName,
                             parameterValue.getClass().getName(), "List<?> or Object[]"));
         }
+        return BOTH_BOUNDS_SET;
     }
 
     /** Set bounds for the multi-valued parameter identified by the index.
@@ -157,6 +163,7 @@ public class InPredicateImpl extends Pre
      * @param context the query execution context with the parameter values
      * @param op the operation
      */
+    @Override
     public void filterCmpValue(QueryExecutionContext context,
             ScanOperation op) {
         try {
@@ -176,6 +183,7 @@ public class InPredicateImpl extends Pre
      * @param op the operation
      * @param filter the existing filter
      */
+    @Override
     public void filterCmpValue(QueryExecutionContext context, ScanOperation op, ScanFilter filter) {
         try {
             filter.begin(Group.GROUP_OR);
@@ -190,8 +198,8 @@ public class InPredicateImpl extends Pre
                 }
             } else if (parameterValue.getClass().isArray()) {
                 Object[] parameterArray = (Object[])parameterValue;
-                for (Object parameter: parameterArray) {
-                    property.filterCmpValue(parameter, BinaryCondition.COND_EQ, filter);
+                for (Object value: parameterArray) {
+                    property.filterCmpValue(value, BinaryCondition.COND_EQ, filter);
                 }
             } else {
                 throw new ClusterJUserException(
@@ -230,4 +238,9 @@ public class InPredicateImpl extends Pre
         return result;
     }
 
+    @Override 
+    public boolean isUsable(QueryExecutionContext context) {
+        return parameter.getParameterValue(context) != null;
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,8 +39,19 @@ public class LessEqualPredicateImpl exte
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markUpperBound(candidateIndices, this, false);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object upperValue = param.getParameterValue(context);
+        if (upperValue != null) {
+            property.operationSetBounds(upperValue, IndexScanOperation.BoundType.BoundGE, op);
+            return UPPER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     /** Set the condition into the filter.

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,13 +39,24 @@ public class LessThanPredicateImpl exten
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        if (lastColumn) {
-            // last column may be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGT, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markUpperBound(candidateIndices, this, true);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object upperValue = param.getParameterValue(context);
+        if (upperValue != null) {
+            if (lastColumn) {
+                // last column may be strict
+                property.operationSetBounds(upperValue, IndexScanOperation.BoundType.BoundGT, op);
+            } else {
+                // not-last column must not be strict
+                property.operationSetBounds(upperValue, IndexScanOperation.BoundType.BoundGE, op);
+            }
+            return UPPER_BOUND_SET;
         } else {
-            // not-last column must not be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+            return NO_BOUND_SET;
         }
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-10-02 21:20:50 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -34,6 +34,9 @@ import com.mysql.clusterj.core.util.Logg
 
 import com.mysql.clusterj.query.Predicate;
 
+import java.util.Comparator;
+import java.util.TreeSet;
+
 public abstract class PredicateImpl implements Predicate {
 
     /** My message translator */
@@ -45,6 +48,20 @@ public abstract class PredicateImpl impl
     /** My domain object. */
     protected QueryDomainTypeImpl<?> dobj;
 
+    /** The primary/unique index for this query if it exists */
+    CandidateIndexImpl uniqueIndex;
+
+    /** The comparator for candidate indices, ordered descending by score */
+    Comparator<CandidateIndexImpl> candidateIndexComparator = new Comparator<CandidateIndexImpl>() {
+        public int compare(CandidateIndexImpl o1, CandidateIndexImpl o2) {
+            return o2.score - o1.score;
+        }
+    };
+
+    /** The candidate indices ordered by score */
+    private TreeSet<CandidateIndexImpl> scoredCandidateIndices =
+        new TreeSet<CandidateIndexImpl>(candidateIndexComparator);
+
     /** Scan types. */
     protected enum ScanType {
         INDEX_SCAN,
@@ -53,6 +70,15 @@ public abstract class PredicateImpl impl
         PRIMARY_KEY
     }
 
+    /** Indicates no bound set while setting bounds on index operations */
+    public static int NO_BOUND_SET = 0;
+    /** Indicates lower bound set while setting bounds on index operations */
+    public static int LOWER_BOUND_SET = 1;
+    /** Indicates upper bound set while setting bounds on index operations */
+    public static int UPPER_BOUND_SET = 2;
+    /** Indicates both bounds set while setting bounds on index operations */
+    public static int BOTH_BOUNDS_SET = 3;
+
     public PredicateImpl(QueryDomainTypeImpl<?> dobj) {
         this.dobj = dobj;
     }
@@ -84,19 +110,19 @@ public abstract class PredicateImpl impl
         // default is nothing to do
     }
 
-    public void operationSetBounds(QueryExecutionContext context,
+    public int operationSetBounds(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         throw new ClusterJFatalInternalException(
                 local.message("ERR_Implementation_Should_Not_Occur"));
     }
 
-    public void operationSetLowerBound(QueryExecutionContext context,
+    public int operationSetLowerBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         throw new ClusterJFatalInternalException(
                 local.message("ERR_Implementation_Should_Not_Occur"));
     }
 
-    public void operationSetUpperBound(QueryExecutionContext context,
+    public int operationSetUpperBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn){
         throw new ClusterJFatalInternalException(
                 local.message("ERR_Implementation_Should_Not_Occur"));
@@ -181,60 +207,106 @@ public abstract class PredicateImpl impl
     }
 
     public CandidateIndexImpl getBestCandidateIndex(QueryExecutionContext context) {
-        return getBestCandidateIndexFor(context, this);
+        return getBestCandidateIndexFor(context, getTopLevelPredicates());
     }
 
     /** Get the best candidate index for the query, considering all indices
-     * defined and all predicates in the query.
+     * defined and all predicates in the query. If a unique index is usable
+     * (no non-null parameters) then return it. Otherwise, simply choose the
+     * first index for which there is at least one leading non-null parameter.
      * @param predicates the predicates
      * @return the best index for the query
      */
     protected CandidateIndexImpl getBestCandidateIndexFor(QueryExecutionContext context,
             PredicateImpl... predicates) {
-        // Create CandidateIndexImpl to decide how to scan.
+        // if there is a primary/unique index, see if it can be used in the current context
+        if (uniqueIndex != null && uniqueIndex.isUsable(context)) {
+            if (logger.isDebugEnabled()) logger.debug("usable unique index: " + uniqueIndex.getIndexName());
+            return uniqueIndex;
+        }
+        // find the best candidate index by returning the highest scoring index that is usable
+        // in the current context; i.e. has non-null parameters
+        // TODO: it might be better to score indexes again considering the current context
+        for (CandidateIndexImpl index: scoredCandidateIndices) {
+            if (index.isUsable(context)) {
+            if (logger.isDebugEnabled()) logger.debug("usable ordered index: " + index.getIndexName());
+                return index;
+            }
+        }
+        // there is no index that is usable in the current context
+        return CandidateIndexImpl.getIndexForNullWhereClause();
+
+    }
+
+    /** Get the number of conditions in the top level predicate.
+     * This is used to determine whether a hash index can be used. If there
+     * are exactly the number of conditions as index columns, then the
+     * hash index might be used.
+     * By default (for equal, greaterThan, lessThan, greaterEqual, lessEqual)
+     * there is one condition.
+     * AndPredicateImpl overrides this method.
+     * @return the number of conditions
+     */
+    protected int getNumberOfConditionsInPredicate() {
+        return 1;
+    }
+
+    /** Analyze this predicate to determine whether a primary key, unique key, or ordered index
+     * might be used. The result will be used during query execution once the actual parameters
+     * are known.
+     */
+    public void prepare() {
+        // Create CandidateIndexImpls
         CandidateIndexImpl[] candidateIndices = dobj.createCandidateIndexes();
         // Iterate over predicates and have each one register with
         // candidate indexes.
-        for (PredicateImpl predicateImpl : predicates) {
-            predicateImpl.markBoundsForCandidateIndices(context, candidateIndices);
+        for (PredicateImpl predicateImpl : getTopLevelPredicates()) {
+            predicateImpl.markBoundsForCandidateIndices(candidateIndices);
         }
-        // Iterate over candidate indices to find one that is usable.
-        int highScore = 0;
-        // Holder for the best index; default to the index for null where clause
-        CandidateIndexImpl bestCandidateIndexImpl = 
-                CandidateIndexImpl.getIndexForNullWhereClause();
+        // Iterate over candidate indices to find those that are usable.
         // Hash index operations require the predicates to have no extra conditions
         // beyond the index columns.
+        // Btree index operations are ranked by the number of usable conditions
         int numberOfConditions = getNumberOfConditionsInPredicate();
         for (CandidateIndexImpl candidateIndex : candidateIndices) {
             if (candidateIndex.supportsConditionsOfLength(numberOfConditions)) {
-                // opportunity for a user-defined plugin to evaluate indices
+                candidateIndex.score();
                 int score = candidateIndex.getScore();
-                if (logger.isDetailEnabled()) {
-                    logger.detail("Score: " + score + " from " + candidateIndex);
+                if (score != 0) {
+                    if (candidateIndex.isUnique()) {
+                        // there can be only one unique index for a given predicate
+                        uniqueIndex = candidateIndex;
+                    } else {
+                        // add possible indices to ordered map
+                        scoredCandidateIndices.add(candidateIndex);
+                    }
                 }
-                if (score > highScore) {
-                    bestCandidateIndexImpl = candidateIndex;
-                    highScore = score;
+                if (logger.isDetailEnabled()) {
+                    logger.detail("Score: " + score + " from " + candidateIndex.getIndexName());
                 }
             }
         }
-        if (logger.isDetailEnabled()) logger.detail("High score: " + highScore
-                + " from " + bestCandidateIndexImpl.getIndexName());
-        return bestCandidateIndexImpl;
     }
 
-    /** Get the number of conditions in the top level predicate.
-     * This is used to determine whether a hash index can be used. If there
-     * are exactly the number of conditions as index columns, then the
-     * hash index might be used.
-     * By default (for equal, greaterThan, lessThan, greaterEqual, lessEqual)
-     * there is one condition.
-     * AndPredicateImpl overrides this method.
-     * @return the number of conditions
+    protected void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        // default is nothing to do
+    }
+
+    /** Return an array of top level predicates that might be used with indices.
+     * 
+     * @return an array of top level predicates (defaults to {this}).
      */
-    protected int getNumberOfConditionsInPredicate() {
-        return 1;
+    protected PredicateImpl[] getTopLevelPredicates() {
+        return new PredicateImpl[] {this};
+    }
+
+    public ParameterImpl getParameter() {
+        // default is there is no parameter for this predicate
+        return null;
+    }
+
+    public boolean isUsable(QueryExecutionContext context) {
+        return false;
     }
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java	2011-10-02 21:20:50 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java	2011-10-18 22:54:36 +0000
@@ -54,6 +54,11 @@ public class PropertyImpl implements Pre
         this.fmd = fmd;
     }
 
+    @Override
+    public String toString() {
+        return fmd.getName();
+    }
+
     public void setComplexParameter() {
         complexParameter = true;
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2011-10-20 19:41:56 +0000
@@ -118,6 +118,12 @@ public class QueryDomainTypeImpl<T> impl
         }
         this.where = (PredicateImpl)predicate;
         where.markParameters();
+        // statically analyze the where clause, looking for:
+        // primary keys all specified with equal
+        // unique keys all specified with equal
+        // btree index keys partly specified with ranges
+        // none of the above
+        where.prepare();
         return this;
     }
 
@@ -183,12 +189,13 @@ public class QueryDomainTypeImpl<T> impl
      * @throws ClusterJUserException if not all parameters are bound
      */
     public ResultData getResultData(QueryExecutionContext context) {
-	SessionSPI session = context.getSession();
+        SessionSPI session = context.getSession();
         // execute query based on what kind of scan is needed
         // if no where clause, scan the entire table
         CandidateIndexImpl index = where==null?
-            CandidateIndexImpl.getIndexForNullWhereClause():
-            where.getBestCandidateIndex(context);
+                CandidateIndexImpl.getIndexForNullWhereClause():
+                where.getBestCandidateIndex(context);
+
         ScanType scanType = index.getScanType();
         Map<String, Object> explain = newExplain(index, scanType);
         context.setExplain(explain);

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java	2011-10-20 19:41:56 +0000
@@ -338,4 +338,13 @@ public class QueryExecutionContextImpl i
         return boundParameters.get(index);
     }
 
+    public boolean hasNoNullParameters() {
+        for (Object value: boundParameters.values()) {
+            if (value == null) {
+                return false;
+            }
+        }
+        return true;
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java	2011-10-20 19:41:56 +0000
@@ -52,4 +52,6 @@ public interface QueryExecutionContext {
 
     void deleteFilters();
 
+    boolean hasNoNullParameters();
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-jpatest/CMakeLists.txt'
--- a/storage/ndb/clusterj/clusterj-jpatest/CMakeLists.txt	2011-09-16 12:58:08 +0000
+++ b/storage/ndb/clusterj/clusterj-jpatest/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -42,3 +42,6 @@ CREATE_JAR(clusterj-jpatest ${JAVA_SOURC
   DEPENDENCIES clusterjpa.jar
   EXTRA_FILES src/main/resources/META-INF
   BROKEN_JAVAC ${BROKEN_JAVAC})
+
+INSTALL(FILES clusterj-jpatest-${MYSQL_CLUSTER_VERSION}.jar
+        DESTINATION ${INSTALL_MYSQLSHAREDIR}/java COMPONENT ClusterJ)

=== modified file 'storage/ndb/clusterj/clusterj-openjpa/CMakeLists.txt'
--- a/storage/ndb/clusterj/clusterj-openjpa/CMakeLists.txt	2011-09-06 14:49:02 +0000
+++ b/storage/ndb/clusterj/clusterj-openjpa/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -43,3 +43,5 @@ CREATE_JAR(clusterjpa ${JAVA_SOURCES}
   DEPENDENCIES ${DEP}
   MERGE_JARS ${JARS})
 
+INSTALL(FILES clusterjpa-${MYSQL_CLUSTER_VERSION}.jar
+        DESTINATION ${INSTALL_MYSQLSHAREDIR}/java COMPONENT ClusterJ)

=== modified file 'storage/ndb/clusterj/clusterj-test/CMakeLists.txt'
--- a/storage/ndb/clusterj/clusterj-test/CMakeLists.txt	2011-10-04 05:22:45 +0000
+++ b/storage/ndb/clusterj/clusterj-test/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -45,3 +45,6 @@ CREATE_JAR(clusterj-test ${JAVA_SOURCES}
   EXTRA_FILES src/main/resources/META-INF
               src/main/resources/schema.sql
   BROKEN_JAVAC ${BROKEN_JAVAC})
+
+INSTALL(FILES clusterj-test-${MYSQL_CLUSTER_VERSION}.jar
+        DESTINATION ${INSTALL_MYSQLSHAREDIR}/java COMPONENT ClusterJ)

=== added file 'storage/ndb/cmake/ndb_get_config_value.cmake'
--- a/storage/ndb/cmake/ndb_get_config_value.cmake	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/ndb_get_config_value.cmake	2011-10-24 12:45:30 +0000
@@ -0,0 +1,14 @@
+MACRO(NDB_GET_CONFIG_VALUE keyword var)
+ IF(NOT ${var})
+   # Read the line which contains the keyword
+   FILE (STRINGS ${NDB_SOURCE_DIR}/VERSION str
+         REGEX "^[ ]*${keyword}=")
+   IF(str)
+     # Remove the keyword=
+     STRING(REPLACE "${keyword}=" "" str ${str})
+     # Remove whitespace
+     STRING(REGEX REPLACE "[ ].*" "" str "${str}")
+     SET(${var} ${str})
+   ENDIF()
+ ENDIF()
+ENDMACRO()

=== modified file 'storage/ndb/include/CMakeLists.txt'
--- a/storage/ndb/include/CMakeLists.txt	2011-10-03 11:34:40 +0000
+++ b/storage/ndb/include/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -28,27 +28,9 @@ CONFIGURE_FILE(ndb_types.h.in
 LIST(APPEND CPACK_SOURCE_IGNORE_FILES include/ndb_types\\\\.h$)
 
 #
-# Read a value for variable from ndb_configure.m4
-#
-MACRO(NDB_GET_CONFIG_VALUE keyword var)
- IF(NOT ${var})
-   # Read the line which contains the keyword
-   FILE (STRINGS ${NDB_SOURCE_DIR}/VERSION str
-         REGEX "^[ ]*${keyword}=")
-   IF(str)
-     # Remove the keyword=
-     STRING(REPLACE "${keyword}=" "" str ${str})
-     # Remove whitespace
-     STRING(REGEX REPLACE "[ ].*" "" str "${str}")
-     SET(${var} ${str})
-   ENDIF()
- ENDIF()
-ENDMACRO()
-
-#
 # Read ndb_configure.m4 and extract the NDB_VERSION_XX=YY variables
 #
-
+INCLUDE("${NDB_SOURCE_DIR}/cmake/ndb_get_config_value.cmake")
 NDB_GET_CONFIG_VALUE(NDB_VERSION_MAJOR major)
 SET(NDB_VERSION_MAJOR "${major}" CACHE INTERNAL "NDB Major Version" FORCE)
 
@@ -91,3 +73,55 @@ CONFIGURE_FILE(ndb_version.h.in
 # Exclude ndb_version.h from "make dist"
 LIST(APPEND CPACK_SOURCE_IGNORE_FILES include/ndb_version\\\\.h$)
 
+
+# Install public headers
+SET(NDB_GENERAL_HEADERS
+    ndb_constants.h
+    ndb_init.h
+    ndb_types.h
+    ndb_version.h)
+
+
+SET(NDB_NDBAPI_HEADERS
+    ndbapi/ndbapi_limits.h
+    ndbapi/ndb_opt_defaults.h
+    ndbapi/Ndb.hpp
+    ndbapi/NdbApi.hpp
+    ndbapi/NdbTransaction.hpp
+    ndbapi/NdbDictionary.hpp
+    ndbapi/NdbError.hpp
+    ndbapi/NdbEventOperation.hpp
+    ndbapi/NdbIndexOperation.hpp
+    ndbapi/NdbOperation.hpp
+    ndbapi/ndb_cluster_connection.hpp
+    ndbapi/NdbBlob.hpp
+    ndbapi/NdbPool.hpp
+    ndbapi/NdbRecAttr.hpp
+    ndbapi/NdbReceiver.hpp
+    ndbapi/NdbScanFilter.hpp
+    ndbapi/NdbScanOperation.hpp
+    ndbapi/NdbIndexScanOperation.hpp
+    ndbapi/NdbIndexStat.hpp
+    ndbapi/ndberror.h
+    ndbapi/NdbInterpretedCode.hpp)
+
+SET(NDB_MGMAPI_HEADERS
+    mgmapi/mgmapi.h
+    mgmapi/mgmapi_error.h
+    mgmapi/mgmapi_debug.h
+    mgmapi/mgmapi_config_parameters.h
+    mgmapi/mgmapi_config_parameters_debug.h
+    mgmapi/ndb_logevent.h
+    mgmapi/ndbd_exit_codes.h)
+
+INSTALL(FILES ${NDB_GENERAL_HEADERS}
+        DESTINATION ${INSTALL_INCLUDEDIR}/storage/ndb
+        COMPONENT Developement)
+
+INSTALL(FILES ${NDB_NDBAPI_HEADERS}
+        DESTINATION ${INSTALL_INCLUDEDIR}/storage/ndb/ndbapi
+        COMPONENT Developement)
+
+INSTALL(FILES ${NDB_MGMAPI_HEADERS}
+        DESTINATION ${INSTALL_INCLUDEDIR}/storage/ndb/mgmapi
+        COMPONENT Developement)

=== modified file 'storage/ndb/include/kernel/signaldata/DiGetNodes.hpp'
--- a/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-10-23 08:38:06 +0000
@@ -64,7 +64,10 @@ private:
   Uint32 hashValue;
   Uint32 distr_key_indicator;
   Uint32 unused;
-  Uint32 jamBuffer[2];
+  union {
+    void * jamBufferPtr;
+    Uint32 jamBufferStorage[2];
+  };
 };
 
 #endif

=== modified file 'storage/ndb/include/ndb_global.h'
--- a/storage/ndb/include/ndb_global.h	2011-09-06 12:43:05 +0000
+++ b/storage/ndb/include/ndb_global.h	2011-10-27 12:19:57 +0000
@@ -176,7 +176,7 @@ extern "C" {
  * Zero length array not allowed in C
  * Add use of array to avoid compiler warning
  */
-#define STATIC_ASSERT(expr) { char static_assert[(expr)? 1 : 0] = {'\0'}; if (static_assert[0]) {}; }
+#define STATIC_ASSERT(expr) { char a_static_assert[(expr)? 1 : 0] = {'\0'}; if (a_static_assert[0]) {}; }
 #else
 #define STATIC_ASSERT(expr)
 #endif

=== modified file 'storage/ndb/include/ndbapi/Ndb.hpp'
--- a/storage/ndb/include/ndbapi/Ndb.hpp	2011-09-29 09:23:04 +0000
+++ b/storage/ndb/include/ndbapi/Ndb.hpp	2011-10-20 19:52:11 +0000
@@ -1762,7 +1762,19 @@ public:
   /* Get/Set per-Ndb custom data pointer */
   void setCustomData(void*);
   void* getCustomData() const;
-  
+
+  /* Get/Set per-Ndb custom data pointer */
+  /* NOTE: shares storage with void*
+   * i.e can not be used together with setCustomData
+   */
+  void setCustomData64(Uint64);
+  Uint64 getCustomData64() const;
+
+  /**
+   * transid next startTransaction() on this ndb-object will get
+   */
+  Uint64 getNextTransactionId() const;
+
   /* Some client behaviour counters to assist
    * optimisation
    */

=== modified file 'storage/ndb/include/ndbapi/NdbScanOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbScanOperation.hpp	2011-09-29 09:23:04 +0000
+++ b/storage/ndb/include/ndbapi/NdbScanOperation.hpp	2011-10-19 12:19:45 +0000
@@ -252,12 +252,8 @@ public:
 #endif
   
 #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
-  NdbBlob* getBlobHandle(const char* anAttrName);
-  NdbBlob* getBlobHandle(Uint32 anAttrId);
-  /* Const variants not overloaded - underlying 
-   * const NdbOperation::getBlobHandle implementation
-   * only returns existing Blob operations 
-   */
+  virtual NdbBlob* getBlobHandle(const char* anAttrName);
+  virtual NdbBlob* getBlobHandle(Uint32 anAttrId);
 
   /** 
    * setInterpretedCode
@@ -530,12 +526,13 @@ protected:
 
   // Overloaded private methods from NdbOperation
   int init(const NdbTableImpl* tab, NdbTransaction*);
-  int prepareSend(Uint32  TC_ConnectPtr, Uint64  TransactionId);
+  int prepareSend(Uint32  TC_ConnectPtr, Uint64  TransactionId,
+                  NdbOperation::AbortOption);
   int doSend(int ProcessorId);
   virtual void setReadLockMode(LockMode lockMode);
 
-  virtual void setErrorCode(int aErrorCode);
-  virtual void setErrorCodeAbort(int aErrorCode);
+  virtual void setErrorCode(int aErrorCode) const;
+  virtual void setErrorCodeAbort(int aErrorCode) const;
   
   /* This is the transaction which defined this scan
    *   The transaction(connection) used for the scan is
@@ -678,6 +675,17 @@ protected:
 private:
   NdbScanOperation(const NdbScanOperation&); // Not impl.
   NdbScanOperation&operator=(const NdbScanOperation&);
+
+  /**
+   * Const variants overloaded...calling NdbOperation::getBlobHandle()
+   *  (const NdbOperation::getBlobHandle implementation
+   *   only returns existing Blob operations)
+   *
+   * I'm not sure...but these doesn't seem to be an users of this...
+   * so I make them private...
+   */
+  virtual NdbBlob* getBlobHandle(const char* anAttrName) const;
+  virtual NdbBlob* getBlobHandle(Uint32 anAttrId) const;
 };
 
 inline

=== modified file 'storage/ndb/include/util/OutputStream.hpp'
--- a/storage/ndb/include/util/OutputStream.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/util/OutputStream.hpp	2011-10-24 13:14:28 +0000
@@ -33,6 +33,7 @@ public:
     ATTRIBUTE_FORMAT(printf, 2, 3) = 0;
   virtual int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3) = 0;
+  virtual int write(const void * buf, size_t len) = 0;
   virtual void flush() {};
   virtual void reset_timeout() {};
 };
@@ -48,6 +49,7 @@ public:
     ATTRIBUTE_FORMAT(printf, 2, 3);
   int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3);
+  int write(const void * buf, size_t len);
   void flush() { fflush(f); }
 };
 
@@ -67,6 +69,7 @@ public:
     ATTRIBUTE_FORMAT(printf, 2, 3);
   int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3);
+  int write(const void * buf, size_t len);
 };
 
 
@@ -82,6 +85,7 @@ public:
   int println(const char * fmt, ...)
     ATTRIBUTE_FORMAT(printf, 2, 3);
 
+  int write(const void * buf, size_t len);
   void flush();
 };
 
@@ -92,6 +96,7 @@ public:
   virtual ~NullOutputStream() {}
   int print(const char * /* unused */, ...) { return 1;}
   int println(const char * /* unused */, ...) { return 1;}
+  int write(const void * buf, size_t len) { return 1;}
 };
 
 #endif

=== modified file 'storage/ndb/src/CMakeLists.txt'
--- a/storage/ndb/src/CMakeLists.txt	2011-09-16 14:31:26 +0000
+++ b/storage/ndb/src/CMakeLists.txt	2011-10-24 12:45:30 +0000
@@ -64,6 +64,22 @@ IF(HAVE_JDK AND HAVE_JAVA)
   SET(EXTRA_SRC ${CMAKE_SOURCE_DIR}/storage/ndb/src/ndbjtie/ndbjtie_lib.cpp)
 ENDIF()
 
+#
+# Version
+#
+INCLUDE("${NDB_SOURCE_DIR}/cmake/ndb_get_config_value.cmake")
+NDB_GET_CONFIG_VALUE(NDB_SHARED_LIB_VERSION_MAJOR major)
+NDB_GET_CONFIG_VALUE(NDB_SHARED_LIB_VERSION_MINOR minor)
+NDB_GET_CONFIG_VALUE(NDB_SHARED_LIB_VERSION_BUILD build)
+SET(NDB_SHARED_LIB_VERSION "${major}.${minor}.${build}")
+
+#
+# libndbclient.so
+#
 ADD_LIBRARY(ndbclient_so SHARED ${EXTRA_SRC})
 TARGET_LINK_LIBRARIES(ndbclient_so ${NDBCLIENT_SO_LIBS})
-SET_TARGET_PROPERTIES(ndbclient_so PROPERTIES OUTPUT_NAME "ndbclient")
+SET_TARGET_PROPERTIES(ndbclient_so PROPERTIES
+                      OUTPUT_NAME "ndbclient"
+                      SOVERSION ${NDB_SHARED_LIB_VERSION})
+MYSQL_INSTALL_TARGETS(ndbclient_so DESTINATION "${INSTALL_LIBDIR}" COMPONENT Developement)
+

=== modified file 'storage/ndb/src/common/debugger/EventLogger.cpp'
--- a/storage/ndb/src/common/debugger/EventLogger.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp	2011-10-21 12:36:44 +0000
@@ -1524,7 +1524,7 @@ EventLogger::getText(char * dst, size_t 
   if (nodeId != 0)
   {
     BaseString::snprintf(dst, dst_len, "Node %u: ", nodeId);
-    pos= strlen(dst);
+    pos= (int)strlen(dst);
   }
   if (dst_len-pos > 0)
     textF(dst+pos, dst_len-pos, theData, len);

=== modified file 'storage/ndb/src/common/debugger/SignalLoggerManager.cpp'
--- a/storage/ndb/src/common/debugger/SignalLoggerManager.cpp	2011-02-02 15:16:35 +0000
+++ b/storage/ndb/src/common/debugger/SignalLoggerManager.cpp	2011-10-21 12:36:44 +0000
@@ -126,7 +126,7 @@ getParameter(char *blocks[NO_OF_BLOCKS],
   char * tmp = copy;
   bool done = false;
   while(!done){
-    int len = strcspn(tmp, ", ;:\0");
+    int len = (int)strcspn(tmp, ", ;:\0");
     if(len == 0)
       done = true;
     else {

=== modified file 'storage/ndb/src/common/logger/LogHandler.cpp'
--- a/storage/ndb/src/common/logger/LogHandler.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/logger/LogHandler.cpp	2011-10-21 12:36:44 +0000
@@ -175,7 +175,7 @@ LogHandler::parseParams(const BaseString
   bool ret = true;
 
   _params.split(v_args, ",");
-  for(size_t i=0; i < v_args.size(); i++) {
+  for(unsigned i=0; i < v_args.size(); i++) {
     Vector<BaseString> v_param_value;
     if(v_args[i].split(v_param_value, "=", 2) != 2)
     {

=== modified file 'storage/ndb/src/common/logger/Logger.cpp'
--- a/storage/ndb/src/common/logger/Logger.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/logger/Logger.cpp	2011-10-21 12:36:44 +0000
@@ -212,13 +212,12 @@ Logger::addHandler(LogHandler* pHandler)
 
 bool
 Logger::addHandler(const BaseString &logstring, int *err, int len, char* errStr) {
-  size_t i;
   Vector<BaseString> logdest;
   DBUG_ENTER("Logger::addHandler");
 
   logstring.split(logdest, ";");
 
-  for(i = 0; i < logdest.size(); i++) {
+  for(unsigned i = 0; i < logdest.size(); i++) {
     DBUG_PRINT("info",("adding: %s",logdest[i].c_str()));
 
     Vector<BaseString> v_type_args;

=== modified file 'storage/ndb/src/common/portlib/NdbConfig.c'
--- a/storage/ndb/src/common/portlib/NdbConfig.c	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/portlib/NdbConfig.c	2011-10-21 12:36:44 +0000
@@ -31,14 +31,14 @@ NdbConfig_get_path(int *_len)
   const char *path= NdbEnv_GetEnv("NDB_HOME", 0, 0);
   int path_len= 0;
   if (path)
-    path_len= strlen(path);
+    path_len= (int)strlen(path);
   if (path_len == 0 && datadir_path) {
     path= datadir_path;
-    path_len= strlen(path);
+    path_len= (int)strlen(path);
   }
   if (path_len == 0) {
     path= ".";
-    path_len= strlen(path);
+    path_len= (int)strlen(path);
   }
   if (_len)
     *_len= path_len;
@@ -68,7 +68,7 @@ NdbConfig_NdbCfgName(int with_ndb_home){
 
   if (with_ndb_home) {
     buf= NdbConfig_AllocHomePath(PATH_MAX);
-    len= strlen(buf);
+    len= (int)strlen(buf);
   } else
     buf= NdbMem_Allocate(PATH_MAX);
   basestring_snprintf(buf+len, PATH_MAX, "Ndb.cfg");
@@ -87,7 +87,7 @@ char *get_prefix_buf(int len, int node_i
                         NdbHost_GetProcessId());
   tmp_buf[sizeof(tmp_buf)-1]= 0;
 
-  buf= NdbConfig_AllocHomePath(len+strlen(tmp_buf));
+  buf= NdbConfig_AllocHomePath(len+(int)strlen(tmp_buf));
   strcat(buf, tmp_buf);
   return buf;
 }
@@ -95,7 +95,7 @@ char *get_prefix_buf(int len, int node_i
 char* 
 NdbConfig_ErrorFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_error.log");
   return buf;
 }
@@ -103,7 +103,7 @@ NdbConfig_ErrorFileName(int node_id){
 char*
 NdbConfig_ClusterLogFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_cluster.log");
   return buf;
 }
@@ -111,7 +111,7 @@ NdbConfig_ClusterLogFileName(int node_id
 char*
 NdbConfig_SignalLogFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_signal.log");
   return buf;
 }
@@ -119,7 +119,7 @@ NdbConfig_SignalLogFileName(int node_id)
 char*
 NdbConfig_TraceFileName(int node_id, int file_no){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_trace.log.%u", file_no);
   return buf;
 }
@@ -127,7 +127,7 @@ NdbConfig_TraceFileName(int node_id, int
 char*
 NdbConfig_NextTraceFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_trace.log.next");
   return buf;
 }
@@ -135,7 +135,7 @@ NdbConfig_NextTraceFileName(int node_id)
 char*
 NdbConfig_PidFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, ".pid");
   return buf;
 }
@@ -143,7 +143,7 @@ NdbConfig_PidFileName(int node_id){
 char*
 NdbConfig_StdoutFileName(int node_id){
   char *buf= get_prefix_buf(PATH_MAX, node_id);
-  int len= strlen(buf);
+  int len= (int)strlen(buf);
   basestring_snprintf(buf+len, PATH_MAX, "_out.log");
   return buf;
 }

=== modified file 'storage/ndb/src/common/portlib/NdbDir.cpp'
--- a/storage/ndb/src/common/portlib/NdbDir.cpp	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/common/portlib/NdbDir.cpp	2011-10-21 12:36:44 +0000
@@ -280,7 +280,7 @@ NdbDir::remove_recursive(const char* dir
     fprintf(stderr, "Too long path to remove: '%s'\n", dir);
     return false;
   }
-  int start_len = strlen(path);
+  int start_len = (int)strlen(path);
 
   const char* name;
   NdbDir::Iterator iter;
@@ -298,7 +298,7 @@ loop:
       if ((strcmp(".", name) == 0) || (strcmp("..", name) == 0))
         continue;
 
-      int end_len, len = strlen(path);
+      int end_len, len = (int)strlen(path);
       if ((end_len = basestring_snprintf(path + len, sizeof(path) - len,
                                          "%s", name)) < 0)
       {
@@ -329,7 +329,7 @@ loop:
     }
     iter.close();
 
-    int len = strlen(path);
+    int len = (int)strlen(path);
     path[len - 1] = 0; // remove ending slash
 
     char * prev_slash = strrchr(path, IF_WIN('\\', '/'));

=== modified file 'storage/ndb/src/common/portlib/NdbThread.c'
--- a/storage/ndb/src/common/portlib/NdbThread.c	2011-10-07 07:37:47 +0000
+++ b/storage/ndb/src/common/portlib/NdbThread.c	2011-10-21 12:36:44 +0000
@@ -253,7 +253,11 @@ NdbThread_Create(NDB_THREAD_FUNC *p_thre
     thread_stack_size = PTHREAD_STACK_MIN;
 #endif
   DBUG_PRINT("info", ("stack_size: %llu", (ulonglong)thread_stack_size));
+#ifndef _WIN32
   pthread_attr_setstacksize(&thread_attr, thread_stack_size);
+#else
+  pthread_attr_setstacksize(&thread_attr, (DWORD)thread_stack_size);
+#endif
 #ifdef USE_PTHREAD_EXTRAS
   /* Guard stack overflow with a 2k databuffer */
   pthread_attr_setguardsize(&thread_attr, 2048);

=== modified file 'storage/ndb/src/common/portlib/ndb_daemon.cc'
--- a/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-10-20 16:18:28 +0000
@@ -315,7 +315,7 @@ do_files(const char *pidfile_name, const
                 pidfile_name, errno);
 
   char buf[32];
-  int length = my_snprintf(buf, sizeof(buf), "%ld",
+  int length = (int)my_snprintf(buf, sizeof(buf), "%ld",
                            (long)NdbHost_GetProcessId());
   if (write(pidfd, buf, length) != length)
     return ERR1("Failed to write pid to pidfile '%s', errno: %d",

=== modified file 'storage/ndb/src/common/transporter/TransporterRegistry.cpp'
--- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp	2011-09-09 13:09:02 +0000
+++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp	2011-10-21 12:36:44 +0000
@@ -2418,7 +2418,7 @@ TransporterRegistry::print_transporters(
 
   out << "<<" << endl;
 
-  for (size_t i= 0; i < m_transporter_interface.size(); i++){
+  for (unsigned i= 0; i < m_transporter_interface.size(); i++){
     Transporter_interface tf= m_transporter_interface[i];
 
     out << i

=== modified file 'storage/ndb/src/common/util/BaseString.cpp'
--- a/storage/ndb/src/common/util/BaseString.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/BaseString.cpp	2011-10-21 12:36:44 +0000
@@ -50,7 +50,7 @@ BaseString::BaseString(const char* s)
       return;
     }
     memcpy(m_chr, s, n + 1);
-    m_len = n;
+    m_len = (unsigned)n;
 }
 
 BaseString::BaseString(const char * s, size_t n)
@@ -70,7 +70,7 @@ BaseString::BaseString(const char * s, s
   }
   memcpy(m_chr, s, n);
   m_chr[n] = 0;
-  m_len = n;
+  m_len = (unsigned)n;
 }
 
 BaseString::BaseString(const BaseString& str)
@@ -93,7 +93,7 @@ BaseString::BaseString(const BaseString&
     }
     memcpy(t, s, n + 1);
     m_chr = t;
-    m_len = n;
+    m_len = (unsigned)n;
 }
 
 BaseString::~BaseString()
@@ -125,7 +125,7 @@ BaseString::assign(const char* s)
     }
     delete[] m_chr;
     m_chr = t;
-    m_len = n;
+    m_len = (unsigned)n;
     return *this;
 }
 
@@ -145,7 +145,7 @@ BaseString::assign(const char* s, size_t
     }
     delete[] m_chr;
     m_chr = t;
-    m_len = n;
+    m_len = (unsigned)n;
     return *this;
 }
 
@@ -178,7 +178,7 @@ BaseString::append(const char* s)
     }
     delete[] m_chr;
     m_chr = t;
-    m_len += n;
+    m_len += (unsigned)n;
     return *this;
 }
 
@@ -196,7 +196,7 @@ BaseString::append(const BaseString& str
 BaseString&
 BaseString::append(const Vector<BaseString> &vector,
 		   const BaseString &separator) {
-    for(size_t i=0;i<vector.size(); i++) {
+    for(unsigned i=0;i<vector.size(); i++) {
 	append(vector[i]);
 	if(i<vector.size()-1)
 	    append(separator);
@@ -232,7 +232,7 @@ BaseString::assfmt(const char *fmt, ...)
     l = basestring_vsnprintf(m_chr, l, fmt, ap);
     assert(l == (int)strlen(m_chr));
     va_end(ap);
-    m_len = strlen(m_chr);
+    m_len = (unsigned)strlen(m_chr);
     return *this;
 }
 
@@ -279,7 +279,7 @@ BaseString::split(Vector<BaseString> &v,
 		  int maxSize) const {
     char *str = strdup(m_chr);
     int i, start, len, num = 0;
-    len = strlen(str);
+    len = (int)strlen(str);
     for(start = i = 0;
 	(i <= len) && ( (maxSize<0) || ((int)v.size()<=maxSize-1) );
 	i++) {
@@ -360,7 +360,7 @@ BaseString::argify(const char *argv0, co
     char *tmp = new char[strlen(src)+1];
     if (tmp == NULL)
     {
-      for(size_t i = 0; i < vargv.size(); i++)
+      for(unsigned i = 0; i < vargv.size(); i++)
         free(vargv[i]);
       errno = ENOMEM;
       return NULL;
@@ -413,7 +413,7 @@ BaseString::argify(const char *argv0, co
           if (t == NULL)
           {
             delete[] tmp;
-            for(size_t i = 0; i < vargv.size(); i++)
+            for(unsigned i = 0; i < vargv.size(); i++)
               free(vargv[i]);
             errno = ENOMEM;
             return NULL;
@@ -422,7 +422,7 @@ BaseString::argify(const char *argv0, co
           {
             free(t);
             delete[] tmp;
-            for(size_t i = 0; i < vargv.size(); i++)
+            for(unsigned i = 0; i < vargv.size(); i++)
               free(vargv[i]);
             return NULL;
           }
@@ -433,7 +433,7 @@ BaseString::argify(const char *argv0, co
     delete[] tmp;
     if (vargv.push_back(NULL))
     {
-      for(size_t i = 0; i < vargv.size(); i++)
+      for(unsigned i = 0; i < vargv.size(); i++)
         free(vargv[i]);
       return NULL;
     }
@@ -444,13 +444,13 @@ BaseString::argify(const char *argv0, co
     char **argv = (char **)malloc(sizeof(*argv) * (vargv.size()));
     if(argv == NULL)
     {
-        for(size_t i = 0; i < vargv.size(); i++)
+        for(unsigned i = 0; i < vargv.size(); i++)
           free(vargv[i]);
         errno = ENOMEM;
 	return NULL;
     }
     
-    for(size_t i = 0; i < vargv.size(); i++){
+    for(unsigned i = 0; i < vargv.size(); i++){
 	argv[i] = vargv[i];
     }
     
@@ -460,13 +460,13 @@ BaseString::argify(const char *argv0, co
 BaseString&
 BaseString::trim(const char * delim){
     trim(m_chr, delim);
-    m_len = strlen(m_chr);
+    m_len = (unsigned)strlen(m_chr);
     return * this;
 }
 
 char*
 BaseString::trim(char * str, const char * delim){
-    int len = strlen(str) - 1;
+    int len = (int)strlen(str) - 1;
     for(; len > 0 && strchr(delim, str[len]); len--)
       ;
 

=== modified file 'storage/ndb/src/common/util/ConfigValues.cpp'
--- a/storage/ndb/src/common/util/ConfigValues.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/util/ConfigValues.cpp	2011-10-21 12:36:44 +0000
@@ -593,7 +593,7 @@ ConfigValues::getPackedSize() const {
 	break;
       case StringType:
 	size += 8; // key + len
-	size += mod4(strlen(* getString(m_values[i+1])) + 1);
+	size += mod4((unsigned)strlen(* getString(m_values[i+1])) + 1);
 	break;
       case InvalidType:
       default:

=== modified file 'storage/ndb/src/common/util/File.cpp'
--- a/storage/ndb/src/common/util/File.cpp	2011-10-05 07:24:39 +0000
+++ b/storage/ndb/src/common/util/File.cpp	2011-10-27 12:19:57 +0000
@@ -148,13 +148,13 @@ File_class::close()
 int 
 File_class::read(void* buf, size_t itemSize, size_t nitems) const
 {
-  return ::fread(buf, itemSize,  nitems, m_file);
+  return (int)::fread(buf, itemSize,  nitems, m_file);
 }
 
 int 
 File_class::readChar(char* buf, long start, long length) const
 {
-  return ::fread((void*)&buf[start], 1, length, m_file);
+  return (int)::fread((void*)&buf[start], 1, length, m_file);
 }
 
 int 
@@ -166,13 +166,13 @@ File_class::readChar(char* buf)
 int 
 File_class::write(const void* buf, size_t size_arg, size_t nitems)
 {
-  return ::fwrite(buf, size_arg, nitems, m_file);
+  return (int)::fwrite(buf, size_arg, nitems, m_file);
 }
  
 int
 File_class::writeChar(const char* buf, long start, long length)
 {
-  return ::fwrite((const void*)&buf[start], sizeof(char), length, m_file);
+  return (int)::fwrite((const void*)&buf[start], sizeof(char), length, m_file);
 }
 
 int 

=== modified file 'storage/ndb/src/common/util/InputStream.cpp'
--- a/storage/ndb/src/common/util/InputStream.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/InputStream.cpp	2011-10-21 12:36:44 +0000
@@ -57,7 +57,7 @@ SocketInputStream::gets(char * buf, int 
     m_startover= false;
   }
   else
-    offset= strlen(buf);
+    offset= (int)strlen(buf);
 
   int time= 0;
   int res = readln_socket(m_socket, m_timeout_remain, &time,

=== modified file 'storage/ndb/src/common/util/NdbSqlUtil.cpp'
--- a/storage/ndb/src/common/util/NdbSqlUtil.cpp	2011-07-08 09:34:07 +0000
+++ b/storage/ndb/src/common/util/NdbSqlUtil.cpp	2011-10-27 12:19:57 +0000
@@ -693,7 +693,7 @@ NdbSqlUtil::likeChar(const void* info, c
   const char* v2 = (const char*)p2;
   CHARSET_INFO* cs = (CHARSET_INFO*)(info);
   // strip end spaces to match (incorrect) MySQL behaviour
-  n1 = (*cs->cset->lengthsp)(cs, v1, n1);
+  n1 = (unsigned)(*cs->cset->lengthsp)(cs, v1, n1);
   int k = (*cs->coll->wildcmp)(cs, v1, v1 + n1, v2, v2 + n2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many);
   return k == 0 ? 0 : +1;
 }
@@ -980,13 +980,13 @@ NdbSqlUtil::strnxfrm_bug7284(CHARSET_INF
   if (n1 <= 0)
     return -1;
   // strxfrm to binary
-  int n2 = ndb_strnxfrm(cs, xsp, sizeof(xsp), nsp, n1);
+  int n2 = (int)ndb_strnxfrm(cs, xsp, sizeof(xsp), nsp, n1);
   if (n2 <= 0)
     return -1;
   // XXX bug workaround - strnxfrm may not write full string
   memset(dst, 0x0, dstLen);
   // strxfrm argument string - returns no error indication
-  int n3 = ndb_strnxfrm(cs, dst, dstLen, src, srcLen);
+  int n3 = (int)ndb_strnxfrm(cs, dst, dstLen, src, srcLen);
   // pad with strxfrm-ed space chars
   int n4 = n3;
   while (n4 < (int)dstLen) {

=== modified file 'storage/ndb/src/common/util/OutputStream.cpp'
--- a/storage/ndb/src/common/util/OutputStream.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/util/OutputStream.cpp	2011-10-24 07:44:52 +0000
@@ -43,6 +43,12 @@ FileOutputStream::println(const char * f
   return ret + fprintf(f, "\n");
 }
 
+int
+FileOutputStream::write(const void * buf, size_t len)
+{
+  return (int)fwrite(buf, len, 1, f);
+}
+
 SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket,
 				       unsigned write_timeout_ms) :
   m_socket(socket),
@@ -97,6 +103,28 @@ SocketOutputStream::println(const char *
   return ret;
 }
 
+int
+SocketOutputStream::write(const void * buf, size_t len)
+{
+  if (timedout())
+    return -1;
+
+  int time = 0;
+  int ret = write_socket(m_socket, m_timeout_ms, &time,
+                         (const char*)buf, (int)len);
+  if (ret >= 0)
+  {
+    m_timeout_remain -= time;
+  }
+
+  if ((ret < 0 && errno == SOCKET_ETIMEDOUT) || m_timeout_remain <= 0)
+  {
+    m_timedout = true;
+    ret= -1;
+  }
+  return ret;
+}
+
 #include <UtilBuffer.hpp>
 #include <BaseString.hpp>
 
@@ -172,6 +200,12 @@ BufferedSockOutputStream::println(const 
   return 0;
 }
 
+int
+BufferedSockOutputStream::write(const void * buf, size_t len)
+{
+  return m_buffer.append(buf, len);
+}
+
 void BufferedSockOutputStream::flush(){
   int elapsed = 0;
   if (write_socket(m_socket, m_timeout_ms, &elapsed,

=== modified file 'storage/ndb/src/common/util/Parser.cpp'
--- a/storage/ndb/src/common/util/Parser.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/util/Parser.cpp	2011-10-21 12:36:44 +0000
@@ -78,7 +78,7 @@ bool
 Empty(const char * str){
   if(str == 0)
     return true;
-  const int len = strlen(str);
+  const int len = (int)strlen(str);
   if(len == 0)
     return false;
   for(int i = 0; i<len; i++)
@@ -96,7 +96,7 @@ void
 trim(char * str){
   if(str == NULL)
     return;
-  int len = strlen(str);
+  int len = (int)strlen(str);
   for(len--; str[len] == '\n' || str[len] == ' ' || str[len] == '\t'; len--)
     str[len] = 0;
   
@@ -156,7 +156,7 @@ ParserImpl::run(Context * ctx, const cla
     return false;
   }
 
-  int last= strlen(ctx->m_currentToken);
+  int last= (int)strlen(ctx->m_currentToken);
   if(last>0)
     last--;
 

=== modified file 'storage/ndb/src/common/util/Properties.cpp'
--- a/storage/ndb/src/common/util/Properties.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/util/Properties.cpp	2011-10-21 12:36:44 +0000
@@ -662,10 +662,10 @@ PropertiesImpl::getPackedSize(Uint32 pLe
       sz += 4; // Type
       sz += 4; // Name Len
       sz += 4; // Value Len
-      sz += mod4(pLen + strlen(content[i]->name)); // Name
+      sz += mod4(pLen + (unsigned)strlen(content[i]->name)); // Name
       switch(content[i]->valueType){
       case PropertiesType_char:
-	sz += mod4(strlen((char *)content[i]->value));
+	sz += mod4((unsigned)strlen((char *)content[i]->value));
 	break;
       case PropertiesType_Uint32:
 	sz += mod4(4);
@@ -734,7 +734,7 @@ PropertiesImpl::pack(Uint32 *& buf, cons
   CharBuf charBuf;
   
   for(unsigned int i = 0; i<items; i++){
-    const int strLenName      = strlen(content[i]->name);
+    const int strLenName      = (int)strlen(content[i]->name);
     
     if(content[i]->valueType == PropertiesType_Properties){
       charBuf.clear();

=== modified file 'storage/ndb/src/common/util/ndb_init.cpp'
--- a/storage/ndb/src/common/util/ndb_init.cpp	2011-09-27 17:28:13 +0000
+++ b/storage/ndb/src/common/util/ndb_init.cpp	2011-10-20 16:18:28 +0000
@@ -56,7 +56,7 @@ ndb_init_internal()
   {
     {
       const char* err = "ndb_init() failed - exit\n";
-      int res = write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }
@@ -79,7 +79,7 @@ ndb_init()
     if (my_init())
     {
       const char* err = "my_init() failed - exit\n";
-      int res = write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }

=== modified file 'storage/ndb/src/common/util/ndbzio.c'
--- a/storage/ndb/src/common/util/ndbzio.c	2011-09-29 05:44:30 +0000
+++ b/storage/ndb/src/common/util/ndbzio.c	2011-10-20 19:41:56 +0000
@@ -428,7 +428,7 @@ int read_buffer(ndbzio_stream *s)
   my_errno= 0;
   if (s->stream.avail_in == 0)
   {
-    s->stream.avail_in = my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
+    s->stream.avail_in = (uInt)my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
     if(s->stream.avail_in > 0)
       my_errno= 0;
     if (s->stream.avail_in == 0)
@@ -681,7 +681,7 @@ unsigned int ZEXPORT ndbzread ( ndbzio_s
         bytes_read= my_read(s->file, (uchar *)next_out, s->stream.avail_out,
                             MYF(0));
         if(bytes_read>0)
-          s->stream.avail_out -= bytes_read;
+          s->stream.avail_out -= (uInt)bytes_read;
         if (bytes_read == 0)
         {
           s->z_eof = 1;

=== modified file 'storage/ndb/src/common/util/socket_io.cpp'
--- a/storage/ndb/src/common/util/socket_io.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/util/socket_io.cpp	2011-10-21 12:36:44 +0000
@@ -123,7 +123,7 @@ readln_socket(NDB_SOCKET_TYPE socket, in
         *time = 0;
 
 	ptr[0]= 0;
-	return ptr - buf;
+	return (int)(ptr - buf);
       }
     }
     
@@ -226,7 +226,7 @@ vprint_socket(NDB_SOCKET_TYPE socket, in
   } else
     return 0;
 
-  int ret = write_socket(socket, timeout_millis, time, buf2, size);
+  int ret = write_socket(socket, timeout_millis, time, buf2, (int)size);
   if(buf2 != buf)
     free(buf2);
   return ret;
@@ -254,7 +254,7 @@ vprintln_socket(NDB_SOCKET_TYPE socket, 
   }
   buf2[size-1]='\n';
 
-  int ret = write_socket(socket, timeout_millis, time, buf2, size);
+  int ret = write_socket(socket, timeout_millis, time, buf2, (int)size);
   if(buf2 != buf)
     free(buf2);
   return ret;

=== modified file 'storage/ndb/src/cw/cpcd/APIService.cpp'
--- a/storage/ndb/src/cw/cpcd/APIService.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/cw/cpcd/APIService.cpp	2011-10-21 12:36:44 +0000
@@ -182,7 +182,7 @@ CPCDAPISession::runSession(){
 
     switch(ctx.m_status){
     case Parser_t::Ok:
-      for(size_t i = 0; i<ctx.m_aliasUsed.size(); i++)
+      for(unsigned i = 0; i<ctx.m_aliasUsed.size(); i++)
 	ndbout_c("Used alias: %s -> %s", 
 		 ctx.m_aliasUsed[i]->name, ctx.m_aliasUsed[i]->realName);
       break;
@@ -199,7 +199,7 @@ CPCDAPISession::runSession(){
 void
 CPCDAPISession::stopSession(){
   CPCD::RequestStatus rs;
-  for(size_t i = 0; i<m_temporaryProcesses.size(); i++){
+  for(unsigned i = 0; i<m_temporaryProcesses.size(); i++){
     Uint32 id = m_temporaryProcesses[i];
     m_cpcd.undefineProcess(&rs, id);
   }
@@ -215,7 +215,7 @@ CPCDAPISession::loadFile(){
 
     switch(ctx.m_status){
     case Parser_t::Ok:
-      for(size_t i = 0; i<ctx.m_aliasUsed.size(); i++)
+      for(unsigned i = 0; i<ctx.m_aliasUsed.size(); i++)
 	ndbout_c("Used alias: %s -> %s", 
 		 ctx.m_aliasUsed[i]->name, ctx.m_aliasUsed[i]->realName);
       break;
@@ -348,7 +348,7 @@ CPCDAPISession::listProcesses(Parser_t::
   m_output->println("%s", "");
   
 
-  for(size_t i = 0; i < proclist->size(); i++) {
+  for(unsigned i = 0; i < proclist->size(); i++) {
     CPCD::Process *p = (*proclist)[i];
 
     m_output->println("process");

=== modified file 'storage/ndb/src/cw/cpcd/CPCD.cpp'
--- a/storage/ndb/src/cw/cpcd/CPCD.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/cw/cpcd/CPCD.cpp	2011-10-21 12:36:44 +0000
@@ -60,7 +60,7 @@ CPCD::findUniqueId() {
     if(id == 0)
       ok = false;
 
-    for(size_t i = 0; i<m_processes.size(); i++) {
+    for(unsigned i = 0; i<m_processes.size(); i++) {
       if(m_processes[i]->m_id == id)
 	ok = false;
     }
@@ -76,7 +76,7 @@ CPCD::defineProcess(RequestStatus * rs, 
 
   Guard tmp(m_processes);
 
-  for(size_t i = 0; i<m_processes.size(); i++) {
+  for(unsigned i = 0; i<m_processes.size(); i++) {
     Process * proc = m_processes[i];
     
     if((strcmp(arg->m_name.c_str(), proc->m_name.c_str()) == 0) && 
@@ -106,7 +106,7 @@ CPCD::undefineProcess(CPCD::RequestStatu
   Guard tmp(m_processes);
 
   Process * proc = 0;
-  size_t i;
+  unsigned i;
   for(i = 0; i < m_processes.size(); i++) {
     if(m_processes[i]->m_id == id) {
       proc = m_processes[i];
@@ -142,7 +142,7 @@ CPCD::startProcess(CPCD::RequestStatus *
 
     Guard tmp(m_processes);
     
-    for(size_t i = 0; i < m_processes.size(); i++) {
+    for(unsigned i = 0; i < m_processes.size(); i++) {
       if(m_processes[i]->m_id == id) {
 	proc = m_processes[i];
 	break;
@@ -185,7 +185,7 @@ CPCD::stopProcess(CPCD::RequestStatus *r
   Guard tmp(m_processes);
 
   Process * proc = 0;
-  for(size_t i = 0; i < m_processes.size(); i++) {
+  for(unsigned i = 0; i < m_processes.size(); i++) {
     if(m_processes[i]->m_id == id) {
       proc = m_processes[i];
       break;
@@ -264,7 +264,7 @@ CPCD::saveProcessList(){
     return false;
   }
 
-  for(size_t i = 0; i<m_processes.size(); i++){
+  for(unsigned i = 0; i<m_processes.size(); i++){
     m_processes[i]->print(f);
     fprintf(f, "\n");
 
@@ -367,7 +367,7 @@ CPCD::loadProcessList(){
   sess.loadFile();
   loadingProcessList = false;
 
-  size_t i;
+  unsigned i;
   Vector<int> temporary;
   for(i = 0; i<m_processes.size(); i++){
     Process * proc = m_processes[i];

=== modified file 'storage/ndb/src/cw/cpcd/Monitor.cpp'
--- a/storage/ndb/src/cw/cpcd/Monitor.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/cw/cpcd/Monitor.cpp	2011-10-21 12:36:44 +0000
@@ -63,7 +63,7 @@ CPCD::Monitor::run() {
 
     proc.lock();
 
-    for(size_t i = 0; i < proc.size(); i++) {
+    for(unsigned i = 0; i < proc.size(); i++) {
       proc[i]->monitor();
     }
 

=== modified file 'storage/ndb/src/cw/cpcd/Process.cpp'
--- a/storage/ndb/src/cw/cpcd/Process.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/cw/cpcd/Process.cpp	2011-10-21 12:36:44 +0000
@@ -334,7 +334,7 @@ save_environment(const char *env, Vector
 
 void
 CPCD::Process::do_exec() {
-  size_t i;
+  unsigned i;
 
 #ifdef _WIN32
   Vector<BaseString> saved;
@@ -411,7 +411,7 @@ CPCD::Process::do_exec() {
     }
     int f = fds[i]= open(redirects[i]->c_str(), flags, mode);
     if(f == -1){
-      logger.error("Cannot redirect %ld to/from '%s' : %s\n", i,
+      logger.error("Cannot redirect %u to/from '%s' : %s\n", i,
 		   redirects[i]->c_str(), strerror(errno));
       _exit(1);
     }

=== modified file 'storage/ndb/src/kernel/blocks/backup/read.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/read.cpp	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/read.cpp	2011-10-27 12:19:57 +0000
@@ -229,7 +229,7 @@ size_t
 aread(void * buf, size_t sz, size_t n, ndbzio_stream* f)
 {
   int error = 0;
-  unsigned r = ndbzread(f, buf, (sz * n), &error);
+  unsigned r = ndbzread(f, buf, (unsigned)(sz * n), &error);
   if (error || r != (sz * n))
   {
     printf("Failed to read!!");

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-17 13:30:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-27 12:19:57 +0000
@@ -15406,7 +15406,7 @@ Dbdict::createEvent_RT_USER_CREATE(Signa
   }
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
   {
-    int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+    int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
     memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
 #ifdef EVENT_DEBUG
     printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n",
@@ -15434,7 +15434,7 @@ sendref:
   }
   r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME);
   {
-    int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
+    int len = (int)strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
     memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len);
   }
 
@@ -16059,7 +16059,7 @@ Dbdict::createEvent_RT_USER_GET(Signal* 
   }
 
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
-  int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+  int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
   memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
   
   releaseSections(handle);
@@ -17122,7 +17122,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal
   }
   r0.getString(evntRecPtr.p->m_eventRec.NAME);
   {
-    int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+    int len = (int)strlen(evntRecPtr.p->m_eventRec.NAME);
     memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
 #ifdef EVENT_DEBUG
     printf("DropEvntReq; EventName %s, len %u\n",
@@ -20680,31 +20680,6 @@ Dbdict::createFile_parse(Signal* signal,
     return;
   }
 
-  /**
-   * auto-connect
-   */
-  if (f.FilegroupId == RNIL && f.FilegroupVersion == RNIL)
-  {
-    jam();
-    Filegroup_hash::Iterator it;
-    c_filegroup_hash.first(it);
-    while (!it.isNull())
-    {
-      jam();
-      if ((f.FileType == DictTabInfo::Undofile &&
-           it.curr.p->m_type == DictTabInfo::LogfileGroup) ||
-          (f.FileType == DictTabInfo::Datafile &&
-           it.curr.p->m_type == DictTabInfo::Tablespace))
-      {
-        jam();
-        f.FilegroupId = it.curr.p->key;
-        f.FilegroupVersion = it.curr.p->m_version;
-        break;
-      }
-      c_filegroup_hash.next(it);
-    }
-  }
-
   // Get Filegroup
   FilegroupPtr fg_ptr;
   if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId))
@@ -21433,21 +21408,6 @@ Dbdict::createFilegroup_parse(Signal* si
       setError(error, CreateFilegroupRef::InvalidExtentSize, __LINE__);
       return;
     }
-
-    /**
-     * auto-connect
-     */
-    if (fg.TS_LogfileGroupId == RNIL && fg.TS_LogfileGroupVersion == RNIL)
-    {
-      jam();
-      Filegroup_hash::Iterator it;
-      if (c_filegroup_hash.first(it))
-      {
-        jam();
-        fg.TS_LogfileGroupId = it.curr.p->key;
-        fg.TS_LogfileGroupVersion = it.curr.p->m_version;
-      }
-    }
   }
   else if(fg.FilegroupType == DictTabInfo::LogfileGroup)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-05 07:24:39 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-10-27 12:19:57 +0000
@@ -9023,7 +9023,7 @@ void Dbdih::execDIGETNODESREQ(Signal* si
   Uint32 fragId, newFragId = RNIL;
   DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
   TabRecord* regTabDesc = tabRecord;
-  EmulatedJamBuffer * jambuf = * (EmulatedJamBuffer**)(req->jamBuffer);
+  EmulatedJamBuffer * jambuf = (EmulatedJamBuffer*)req->jamBufferPtr;
   thrjamEntry(jambuf);
   ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp	2011-10-21 12:36:44 +0000
@@ -58,7 +58,7 @@ char * getNSString(Uint32 ns){
 
 void
 fill(const char * buf, int mod){
-  int len = strlen(buf)+1;
+  int len = (int)(strlen(buf)+1);
   ndbout << buf << " ";
   while((len % mod) != 0){
     ndbout << " ";

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-17 13:30:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-10-27 12:19:57 +0000
@@ -23447,12 +23447,8 @@ Dblqh::ndbinfo_write_op(Ndbinfo::Row & r
   row.write_uint32(tcPtr.p->tcBlockref); // tcref
   row.write_uint32(tcPtr.p->applRef);    // apiref
 
-  char transid[64];
-  BaseString::snprintf(transid, sizeof(transid),
-                       "%.8x.%.8x",
-                       tcPtr.p->transid[0],
-                       tcPtr.p->transid[1]);
-  row.write_string(transid);
+  row.write_uint32(tcPtr.p->transid[0]);
+  row.write_uint32(tcPtr.p->transid[1]);
   row.write_uint32(tcPtr.p->tableref);
   row.write_uint32(tcPtr.p->fragmentid);
 

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp	2011-10-21 12:36:44 +0000
@@ -85,7 +85,7 @@ NDB_COMMAND(redoLogFileReader,  "redoLog
   {
     MY_STAT buf;
     my_stat(fileName, &buf, MYF(0));
-    NO_MBYTE_IN_FILE = buf.st_size / (1024 * 1024);
+    NO_MBYTE_IN_FILE = (unsigned)(buf.st_size / (1024 * 1024));
     if (NO_MBYTE_IN_FILE != 16)
     {
       ndbout_c("Detected %umb files", NO_MBYTE_IN_FILE);

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-03 08:02:28 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-23 08:38:06 +0000
@@ -3601,6 +3601,7 @@ Dbspj::computeHash(Signal* signal,
     (MAX_KEY_SIZE_IN_WORDS + 1) / 2;
   Uint64 tmp64[MAX_KEY_SIZE_IN_LONG_WORDS];
   Uint32 *tmp32 = (Uint32*)tmp64;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -3639,6 +3640,7 @@ Dbspj::computePartitionHash(Signal* sign
   Uint64 *tmp64 = _space;
   Uint32 *tmp32 = (Uint32*)tmp64;
   Uint32 sz = ptr.sz;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -3681,7 +3683,7 @@ Dbspj::getNodes(Signal* signal, BuildKey
   req->tableId = tableId;
   req->hashValue = dst.hashInfo[1];
   req->distr_key_indicator = 0; // userDefinedPartitioning not supported!
-  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
+  req->jamBufferPtr = jamBuffer();
 
 #if 1
   EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
@@ -4456,6 +4458,12 @@ Dbspj::parseScanIndex(Build_context& ctx
     data.m_firstExecution = true;
     data.m_batch_chunks = 0;
 
+    /**
+     * We will need to look at the parameters again if the scan is pruned and the prune
+     * key uses parameter values. Therefore, we keep a reference to the start of the
+     * parameter buffer.
+     */
+    DABuffer origParam = param;
     err = parseDA(ctx, requestPtr, treeNodePtr,
                   tree, treeBits, param, paramBits);
     if (unlikely(err != 0))
@@ -4482,7 +4490,7 @@ Dbspj::parseScanIndex(Build_context& ctx
         /**
          * Expand pattern into a new pattern (with linked values)
          */
-        err = expand(pattern, treeNodePtr, tree, len, param, cnt);
+        err = expand(pattern, treeNodePtr, tree, len, origParam, cnt);
         if (unlikely(err != 0))
           break;
 
@@ -4501,7 +4509,7 @@ Dbspj::parseScanIndex(Build_context& ctx
          */
         Uint32 prunePtrI = RNIL;
         bool hasNull;
-        err = expand(prunePtrI, tree, len, param, cnt, hasNull);
+        err = expand(prunePtrI, tree, len, origParam, cnt, hasNull);
         if (unlikely(err != 0))
           break;
 
@@ -5078,7 +5086,8 @@ Dbspj::scanIndex_parent_batch_complete(S
       parallelism = (data.m_fragCount - data.m_frags_complete) / roundTrips;
     }
 
-    ndbassert(parallelism <= data.m_fragCount - data.m_frags_complete);
+    ndbassert(parallelism >= 1);
+    ndbassert((Uint32)parallelism + data.m_frags_complete <= data.m_fragCount);
     data.m_parallelism = static_cast<Uint32>(parallelism);
 
 #ifdef DEBUG_SCAN_FRAGREQ
@@ -6189,6 +6198,7 @@ Uint32
 Dbspj::appendToPattern(Local_pattern_store & pattern,
                        DABuffer & tree, Uint32 len)
 {
+  jam();
   if (unlikely(tree.ptr + len > tree.end))
     return DbspjErr::InvalidTreeNodeSpecification;
 
@@ -6203,6 +6213,7 @@ Uint32
 Dbspj::appendParamToPattern(Local_pattern_store& dst,
                             const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6218,6 +6229,7 @@ Uint32
 Dbspj::appendParamHeadToPattern(Local_pattern_store& dst,
                                 const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6235,6 +6247,7 @@ Dbspj::appendTreeToSection(Uint32 & ptrI
   /**
    * TODO handle errors
    */
+  jam();
   Uint32 SZ = 16;
   Uint32 tmp[16];
   while (len > SZ)
@@ -6293,6 +6306,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Section & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6316,6 +6330,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Linear & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6335,6 +6350,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Linear & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6353,6 +6369,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Section & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6378,6 +6395,7 @@ Dbspj::appendAttrinfoToSection(Uint32 & 
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Section & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6400,6 +6418,7 @@ Dbspj::appendPkColToSection(Uint32 & dst
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   Uint32 offset = row.m_header->m_offset[col];
   Uint32 tmp = row.m_data[offset];
   Uint32 len = AttributeHeader::getDataSize(tmp);
@@ -6413,6 +6432,7 @@ Dbspj::appendFromParent(Uint32 & dst, Lo
                         Uint32 levels, const RowPtr & rowptr,
                         bool& hasNull)
 {
+  jam();
   Ptr<TreeNode> treeNodePtr;
   m_treenode_pool.getPtr(treeNodePtr, rowptr.m_src_node_ptrI);
   Uint32 corrVal = rowptr.m_src_correlation;
@@ -6527,6 +6547,7 @@ Dbspj::appendDataToSection(Uint32 & ptrI
                            Local_pattern_store::ConstDataBufferIterator& it,
                            Uint32 len, bool& hasNull)
 {
+  jam();
   if (unlikely(len==0))
   {
     jam();
@@ -6732,6 +6753,7 @@ Uint32
 Dbspj::expand(Uint32 & ptrI, DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle error
    */
@@ -6816,6 +6838,7 @@ Dbspj::expand(Local_pattern_store& dst, 
               DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt)
 {
+  jam();
   /**
    * TODO handle error
    */

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-13 17:13:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-10-23 08:38:06 +0000
@@ -3226,7 +3226,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal
   req->tableId = Ttableref;
   req->hashValue = TdistrHashValue;
   req->distr_key_indicator = regCachePtr->distributionKeyIndicator;
-  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
+  req->jamBufferPtr = jamBuffer();
 
   /*-------------------------------------------------------------*/
   /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
@@ -10926,7 +10926,7 @@ void Dbtc::execDIH_SCAN_TAB_CONF(Signal*
     req->tableId = tabPtr.i;
     req->hashValue = cachePtr.p->distributionKey;
     req->distr_key_indicator = tabPtr.p->get_user_defined_partitioning();
-    * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
+    req->jamBufferPtr = jamBuffer();
     EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
                    DiGetNodesReq::SignalLength, 0);
     UintR TerrorIndicator = signal->theData[0];
@@ -13343,17 +13343,12 @@ Dbtc::ndbinfo_write_trans(Ndbinfo::Row &
     return false;
   }
 
-  char transid[64];
-  BaseString::snprintf(transid, sizeof(transid),
-                       "%.8x.%.8x",
-                       transPtr.p->transid[0],
-                       transPtr.p->transid[1]);
-
   row.write_uint32(getOwnNodeId());
   row.write_uint32(instance());   // block instance
   row.write_uint32(transPtr.i);
   row.write_uint32(transPtr.p->ndbapiBlockref);
-  row.write_string(transid);
+  row.write_uint32(transPtr.p->transid[0]);
+  row.write_uint32(transPtr.p->transid[1]);
   row.write_uint32(conState);
   row.write_uint32(transPtr.p->m_flags);
   row.write_uint32(transPtr.p->lqhkeyreqrec);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp	2011-10-17 18:13:57 +0000
@@ -149,8 +149,9 @@ public:
   // schema trans
   Uint32 c_schemaTransId;
   Uint32 c_schemaTransKey;
-  Uint32 c_hashMapId;
-  Uint32 c_hashMapVersion;
+  // intersignal transient store of: hash_map, logfilegroup, tablesspace
+  Uint32 c_objectId; 
+  Uint32 c_objectVersion;;
 
 public:
   Ndbcntr(Block_context&);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-10-20 19:52:11 +0000
@@ -2204,8 +2204,8 @@ Ndbcntr::execCREATE_HASH_MAP_CONF(Signal
   if (conf->senderData == 0)
   {
     jam();
-    c_hashMapId = conf->objectId;
-    c_hashMapVersion = conf->objectVersion;
+    c_objectId = conf->objectId;
+    c_objectVersion = conf->objectVersion;
   }
 
   createSystableLab(signal, 0);
@@ -2274,8 +2274,8 @@ Ndbcntr::createDDObjects(Signal * signal
     {
       jam();
       fg.TS_ExtentSize = Uint32(entry->size);
-      fg.TS_LogfileGroupId = RNIL;
-      fg.TS_LogfileGroupVersion = RNIL;
+      fg.TS_LogfileGroupId = c_objectId;
+      fg.TS_LogfileGroupVersion = c_objectVersion;
     }
 
     SimpleProperties::UnpackStatus s;
@@ -2310,8 +2310,8 @@ Ndbcntr::createDDObjects(Signal * signal
     DictFilegroupInfo::File f; f.init();
     BaseString::snprintf(f.FileName, sizeof(f.FileName), "%s", entry->name);
     f.FileType = entry->type;
-    f.FilegroupId = RNIL;
-    f.FilegroupVersion = RNIL;
+    f.FilegroupId = c_objectId;
+    f.FilegroupVersion = c_objectVersion;
     f.FileSizeHi = Uint32(entry->size >> 32);
     f.FileSizeLo = Uint32(entry->size);
 
@@ -2371,6 +2371,8 @@ Ndbcntr::execCREATE_FILEGROUP_CONF(Signa
 {
   jamEntry();
   CreateFilegroupConf* conf = (CreateFilegroupConf*)signal->getDataPtr();
+  c_objectId = conf->filegroupId;
+  c_objectVersion = conf->filegroupVersion;
   createDDObjects(signal, conf->senderData + 1);
 }
 
@@ -2433,8 +2435,8 @@ void Ndbcntr::createSystableLab(Signal* 
   //w.add(DictTabInfo::KeyLength, 1);
   w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType);
   w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE);
-  w.add(DictTabInfo::HashMapObjectId, c_hashMapId);
-  w.add(DictTabInfo::HashMapVersion, c_hashMapVersion);
+  w.add(DictTabInfo::HashMapObjectId, c_objectId);
+  w.add(DictTabInfo::HashMapVersion, c_objectVersion);
 
   for (unsigned i = 0; i < table.columnCount; i++) {
     const SysColumn& column = table.columnList[i];

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-10-27 12:19:57 +0000
@@ -110,7 +110,7 @@ AsyncFile::writeReq(Request * request)
     bool write_not_complete = true;
 
     while(write_not_complete) {
-      int totsize = 0;
+      size_t totsize = 0;
       off_t offset = request->par.readWrite.pages[page_num].offset;
       char* bufptr = theWriteBuffer;
 
@@ -128,7 +128,7 @@ AsyncFile::writeReq(Request * request)
           if (((i + 1) < request->par.readWrite.numberOfPages)) {
             // There are more pages to write
             // Check that offsets are consequtive
-            off_t tmp = page_offset + request->par.readWrite.pages[i].size;
+            off_t tmp=(off_t)(page_offset+request->par.readWrite.pages[i].size);
             if (tmp != request->par.readWrite.pages[i+1].offset) {
               // Next page is not aligned with previous, not allowed
               DEBUG(ndbout_c("Page offsets are not aligned"));
@@ -143,7 +143,7 @@ AsyncFile::writeReq(Request * request)
               break;
             }
           }
-          page_offset += request->par.readWrite.pages[i].size;
+          page_offset += (off_t)request->par.readWrite.pages[i].size;
         }
         bufptr = theWriteBuffer;
       } else {

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2011-10-20 13:01:37 +0000
@@ -197,7 +197,7 @@ Filename::set(Ndbfs* fs,
   strcat(theName, fileExtension[type]);
   
   if(dir == true){
-    for(int l = strlen(theName) - 1; l >= 0; l--){
+    for(int l = (int)strlen(theName) - 1; l >= 0; l--){
       if(theName[l] == DIR_SEPARATOR[0]){
 	theName[l] = 0;
 	break;

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp	2011-10-20 18:36:21 +0000
@@ -217,7 +217,7 @@ Win32AsyncFile::readBuffer(Request* req,
     DWORD dwBytesRead;
     BOOL bRead = ReadFile(hFile,
                           buf,
-                          size,
+                          (DWORD)size,
                           &dwBytesRead,
                           &ov);
     if(!bRead){
@@ -248,7 +248,7 @@ Win32AsyncFile::readBuffer(Request* req,
 
     buf += bytes_read;
     size -= bytes_read;
-    offset += bytes_read;
+    offset += (off_t)bytes_read;
   }
   return 0;
 }
@@ -277,7 +277,7 @@ Win32AsyncFile::writeBuffer(const char *
     size_t bytes_written = 0;
 
     DWORD dwWritten;
-    BOOL bWrite = WriteFile(hFile, buf, bytes_to_write, &dwWritten, &ov);
+    BOOL bWrite = WriteFile(hFile, buf, (DWORD)bytes_to_write, &dwWritten, &ov);
     if(!bWrite) {
       return GetLastError();
     }
@@ -288,7 +288,7 @@ Win32AsyncFile::writeBuffer(const char *
 
     buf += bytes_written;
     size -= bytes_written;
-    offset += bytes_written;
+    offset += (off_t)bytes_written;
   }
   return 0;
 }
@@ -393,7 +393,7 @@ loop:
   do {
     if (0 != strcmp(".", ffd.cFileName) && 0 != strcmp("..", ffd.cFileName))
     {
-      int len = strlen(path);
+      int len = (int)strlen(path);
       strcat(path, ffd.cFileName);
       if(DeleteFile(path) || RemoveDirectory(path)) 
       {

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-09-14 13:56:17 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-10-20 19:52:11 +0000
@@ -470,31 +470,34 @@ void Qmgr::setCCDelay(UintR aCCDelay)
 void Qmgr::execCONNECT_REP(Signal* signal)
 {
   jamEntry();
-  const Uint32 nodeId = signal->theData[0];
+  const Uint32 connectedNodeId = signal->theData[0];
 
   if (ERROR_INSERTED(931))
   {
     jam();
-    ndbout_c("Discarding CONNECT_REP(%d)", nodeId);
-    infoEvent("Discarding CONNECT_REP(%d)", nodeId);
+    ndbout_c("Discarding CONNECT_REP(%d)", connectedNodeId);
+    infoEvent("Discarding CONNECT_REP(%d)", connectedNodeId);
     return;
   }
 
-  c_connectedNodes.set(nodeId);
+  c_connectedNodes.set(connectedNodeId);
 
-  NodeRecPtr nodePtr;
-  nodePtr.i = nodeId;
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  nodePtr.p->m_secret = 0;
+  {
+    NodeRecPtr connectedNodePtr;
+    connectedNodePtr.i = connectedNodeId;
+    ptrCheckGuard(connectedNodePtr, MAX_NODES, nodeRec);
+    connectedNodePtr.p->m_secret = 0;
+  }
 
-  nodePtr.i = getOwnNodeId();
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  NodeInfo nodeInfo = getNodeInfo(nodeId);
-  switch(nodePtr.p->phase){
+  NodeRecPtr myNodePtr;
+  myNodePtr.i = getOwnNodeId();
+  ptrCheckGuard(myNodePtr, MAX_NODES, nodeRec);
+  NodeInfo connectedNodeInfo = getNodeInfo(connectedNodeId);
+  switch(myNodePtr.p->phase){
   case ZRUNNING:
-    if (nodeInfo.getType() == NodeInfo::DB)
+    if (connectedNodeInfo.getType() == NodeInfo::DB)
     {
-      ndbrequire(!c_clusterNodes.get(nodeId));
+      ndbrequire(!c_clusterNodes.get(connectedNodeId));
     }
   case ZSTARTING:
     jam();
@@ -504,16 +507,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     return;
   case ZAPI_ACTIVE:
+    ndbrequire(false);
   case ZAPI_INACTIVE:
-    return;
+    ndbrequire(false);
   case ZINIT:
-    ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM);
+    ndbrequire(getNodeInfo(connectedNodeId).m_type == NodeInfo::MGM);
     break;
   default:
     ndbrequire(false);
   }
 
-  if (nodeInfo.getType() != NodeInfo::DB)
+  if (connectedNodeInfo.getType() != NodeInfo::DB)
   {
     jam();
     return;
@@ -522,24 +526,24 @@ void Qmgr::execCONNECT_REP(Signal* signa
   switch(c_start.m_gsn){
   case GSN_CM_REGREQ:
     jam();
-    sendCmRegReq(signal, nodeId);
+    sendCmRegReq(signal, connectedNodeId);
 
     /**
      * We're waiting for CM_REGCONF c_start.m_nodes contains all configured
      *   nodes
      */
-    ndbrequire(nodePtr.p->phase == ZSTARTING);
-    ndbrequire(c_start.m_nodes.isWaitingFor(nodeId));
+    ndbrequire(myNodePtr.p->phase == ZSTARTING);
+    ndbrequire(c_start.m_nodes.isWaitingFor(connectedNodeId));
     return;
   case GSN_CM_NODEINFOREQ:
     jam();
     
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
       ndbrequire(getOwnNodeId() != cpresident);
-      ndbrequire(nodePtr.p->phase == ZSTARTING);
-      sendCmNodeInfoReq(signal, nodeId, nodePtr.p);
+      ndbrequire(myNodePtr.p->phase == ZSTARTING);
+      sendCmNodeInfoReq(signal, connectedNodeId, myNodePtr.p);
       return;
     }
     return;
@@ -547,17 +551,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     
     ndbrequire(getOwnNodeId() != cpresident);
-    ndbrequire(nodePtr.p->phase == ZRUNNING);
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    ndbrequire(myNodePtr.p->phase == ZRUNNING);
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
-      c_start.m_nodes.clearWaitingFor(nodeId);
+      c_start.m_nodes.clearWaitingFor(connectedNodeId);
       c_start.m_gsn = RNIL;
       
       NodeRecPtr addNodePtr;
-      addNodePtr.i = nodeId;
+      addNodePtr.i = connectedNodeId;
       ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
-      cmAddPrepare(signal, addNodePtr, nodePtr.p);
+      cmAddPrepare(signal, addNodePtr, myNodePtr.p);
       return;
     }
   }
@@ -565,11 +569,11 @@ void Qmgr::execCONNECT_REP(Signal* signa
     (void)1;
   }
   
-  ndbrequire(!c_start.m_nodes.isWaitingFor(nodeId));
-  ndbrequire(!c_readnodes_nodes.get(nodeId));
-  c_readnodes_nodes.set(nodeId);
+  ndbrequire(!c_start.m_nodes.isWaitingFor(connectedNodeId));
+  ndbrequire(!c_readnodes_nodes.get(connectedNodeId));
+  c_readnodes_nodes.set(connectedNodeId);
   signal->theData[0] = reference();
-  sendSignal(calcQmgrBlockRef(nodeId), GSN_READ_NODESREQ, signal, 1, JBA);
+  sendSignal(calcQmgrBlockRef(connectedNodeId), GSN_READ_NODESREQ, signal, 1, JBA);
   return;
 }//Qmgr::execCONNECT_REP()
 
@@ -4788,7 +4792,9 @@ void Qmgr::failReport(Signal* signal,
     if (ERROR_INSERTED(938))
     {
       nodeFailCount++;
-      ndbout_c("QMGR : execFAIL_REP : %u nodes have failed", nodeFailCount);
+      ndbout_c("QMGR : execFAIL_REP(Failed : %u Source : %u  Cause : %u) : "
+               "%u nodes have failed", 
+               aFailedNode, sourceNode, aFailCause, nodeFailCount);
       /* Count DB nodes */
       Uint32 nodeCount = 0;
       for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
@@ -6877,6 +6883,12 @@ Qmgr::execNODE_PINGCONF(Signal* signal)
     return;
   }
 
+  if (ERROR_INSERTED(938))
+  {
+    ndbout_c("QMGR : execNODE_PING_CONF() from %u in tick %u",
+             sendersNodeId, m_connectivity_check.m_tick);
+  }
+
   /* Node must have been pinged, we must be waiting for the response,
    * or the node must have already failed
    */

=== modified file 'storage/ndb/src/kernel/error/ErrorReporter.cpp'
--- a/storage/ndb/src/kernel/error/ErrorReporter.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/error/ErrorReporter.cpp	2011-10-21 12:36:44 +0000
@@ -163,7 +163,7 @@ ErrorReporter::formatMessage(int thr_no,
   {
     for (Uint32 i = 1 ; i < num_threads; i++)
     {
-      sofar = strlen(messptr);
+      sofar = (int)strlen(messptr);
       if(sofar < MESSAGE_LENGTH)
       {
 	BaseString::snprintf(messptr + sofar, MESSAGE_LENGTH - sofar,
@@ -172,7 +172,7 @@ ErrorReporter::formatMessage(int thr_no,
     }
   }
 
-  sofar = strlen(messptr);
+  sofar = (int)strlen(messptr);
   if(sofar < MESSAGE_LENGTH)
   {
     BaseString::snprintf(messptr + sofar, MESSAGE_LENGTH - sofar,

=== modified file 'storage/ndb/src/kernel/error/ndbd_exit_codes.c'
--- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c	2011-10-20 13:01:37 +0000
@@ -267,7 +267,7 @@ const char *ndbd_exit_status_message(ndb
 
 int ndbd_exit_string(int err_no, char *str, unsigned int size)
 {
-  unsigned int len;
+  size_t len;
 
   ndbd_exit_classification cl;
   ndbd_exit_status st;
@@ -279,8 +279,8 @@ int ndbd_exit_string(int err_no, char *s
 
     len = my_snprintf(str, size-1, "%s: %s: %s", msg, st_msg, cl_msg);
     str[size-1]= '\0';
-  
-    return len;
+
+    return (int)len;
   }
   return -1;
 }

=== modified file 'storage/ndb/src/kernel/vm/NdbinfoTables.cpp'
--- a/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-13 17:13:02 +0000
+++ b/storage/ndb/src/kernel/vm/NdbinfoTables.cpp	2011-10-17 18:13:57 +0000
@@ -204,14 +204,15 @@ DECLARE_NDBINFO_TABLE(THREADSTAT, 18) =
   }
 };
 
-DECLARE_NDBINFO_TABLE(TRANSACTIONS, 10) =
-{ { "transactions", 10, 0, "transactions" },
+DECLARE_NDBINFO_TABLE(TRANSACTIONS, 11) =
+{ { "transactions", 11, 0, "transactions" },
   {
     {"node_id",             Ndbinfo::Number, "node id"},
     {"block_instance",      Ndbinfo::Number, "TC instance no"},
     {"objid",               Ndbinfo::Number, "Object id of transaction object"},
     {"apiref",              Ndbinfo::Number, "API reference"},
-    {"transid",             Ndbinfo::String, "Transaction id"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
     {"state",               Ndbinfo::Number, "Transaction state"},
     {"flags",               Ndbinfo::Number, "Transaction flags"},
     {"c_ops",               Ndbinfo::Number, "No of operations in transaction" },
@@ -220,15 +221,16 @@ DECLARE_NDBINFO_TABLE(TRANSACTIONS, 10) 
   }
 };
 
-DECLARE_NDBINFO_TABLE(OPERATIONS, 11) =
-{ { "operations", 11, 0, "operations" },
+DECLARE_NDBINFO_TABLE(OPERATIONS, 12) =
+{ { "operations", 12, 0, "operations" },
   {
     {"node_id",             Ndbinfo::Number, "node id"},
     {"block_instance",      Ndbinfo::Number, "LQH instance no"},
     {"objid",               Ndbinfo::Number, "Object id of operation object"},
     {"tcref",               Ndbinfo::Number, "TC reference"},
     {"apiref",              Ndbinfo::Number, "API reference"},
-    {"transid",             Ndbinfo::String, "Transaction id"},
+    {"transid0",            Ndbinfo::Number, "Transaction id"},
+    {"transid1",            Ndbinfo::Number, "Transaction id"},
     {"tableid",             Ndbinfo::Number, "Table id"},
     {"fragmentid",          Ndbinfo::Number, "Fragment id"},
     {"op",                  Ndbinfo::Number, "Operation type"},

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-10-20 19:41:56 +0000
@@ -1827,7 +1827,7 @@ SimulatedBlock::infoEvent(const char * m
   BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
   va_end(ap);
   
-  int len = strlen(buf) + 1;
+  size_t len = strlen(buf) + 1;
   if(len > 96){
     len = 96;
     buf[95] = 0;
@@ -1847,7 +1847,7 @@ SimulatedBlock::infoEvent(const char * m
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
   
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,
@@ -1872,7 +1872,7 @@ SimulatedBlock::warningEvent(const char 
   BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
   va_end(ap);
   
-  int len = strlen(buf) + 1;
+  size_t len = strlen(buf) + 1;
   if(len > 96){
     len = 96;
     buf[95] = 0;
@@ -1892,7 +1892,7 @@ SimulatedBlock::warningEvent(const char 
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
 
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,

=== modified file 'storage/ndb/src/mgmapi/mgmapi.cpp'
--- a/storage/ndb/src/mgmapi/mgmapi.cpp	2011-10-03 12:34:35 +0000
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp	2011-10-27 12:19:57 +0000
@@ -499,7 +499,10 @@ ndb_mgm_call(NdbMgmHandle handle,
   out.println("%s", "");
 
   if (cmd_bulk)
-    out.println(cmd_bulk);
+  {
+    out.write(cmd_bulk, strlen(cmd_bulk));
+    out.write("\n", 1);
+  }
 
   CHECK_TIMEDOUT_RET(handle, in, out, NULL);
 
@@ -2039,7 +2042,7 @@ ndb_mgm_dump_state(NdbMgmHandle handle, 
   char buf[256];
   buf[0] = 0;
   for (int i = 0; i < _num_args; i++){
-    unsigned n = strlen(buf);
+    unsigned n = (unsigned)strlen(buf);
     if (n + 20 > sizeof(buf)) {
       SET_ERROR(handle, NDB_MGM_USAGE_ERROR, "arguments too long");
       DBUG_RETURN(-1);
@@ -2562,7 +2565,7 @@ ndb_mgm_get_configuration2(NdbMgmHandle 
     size_t start = 0;
     do {
       if((read = read_socket(handle->socket, handle->timeout,
-			     &buf64[start], len-start)) < 1){
+			     &buf64[start], (int)(len-start))) < 1){
 	delete[] buf64;
 	buf64 = 0;
         if(read==0)

=== modified file 'storage/ndb/src/mgmapi/ndb_logevent.cpp'
--- a/storage/ndb/src/mgmapi/ndb_logevent.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp	2011-10-20 19:41:56 +0000
@@ -616,7 +616,7 @@ int ndb_logevent_get_next(const NdbLogEv
     BaseString tmp(val);
     Vector<BaseString> list;
     tmp.split(list);
-    for (size_t j = 0; j<list.size(); j++)
+    for (unsigned j = 0; j<list.size(); j++)
     {
       dst->Data[j] = atoi(list[j].c_str());
     }

=== modified file 'storage/ndb/src/mgmclient/CommandInterpreter.cpp'
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2011-02-02 09:20:32 +0000
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2011-10-21 12:36:44 +0000
@@ -1154,7 +1154,7 @@ CommandInterpreter::execute_impl(const c
     }
     // for mysql client compatability remove trailing ';'
     {
-      unsigned last= strlen(line)-1;
+      unsigned last= (unsigned)(strlen(line)-1);
       if (line[last] == ';')
       {
 	line[last]= 0;
@@ -2431,7 +2431,7 @@ CommandInterpreter::executeDumpState(int
     return -1;
   }
 
-  for (size_t i = 0; i < args.size(); i++)
+  for (unsigned i = 0; i < args.size(); i++)
   {
     const char* arg = args[i].c_str();
 

=== modified file 'storage/ndb/src/mgmsrv/Defragger.hpp'
--- a/storage/ndb/src/mgmsrv/Defragger.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/mgmsrv/Defragger.hpp	2011-10-20 19:41:56 +0000
@@ -36,7 +36,7 @@ class Defragger {
   Vector<DefragBuffer*> m_buffers;
 
   DefragBuffer* find_buffer(NodeId nodeId, Uint32 fragId){
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (unsigned i = 0; i < m_buffers.size(); i++)
     {
       DefragBuffer* dbuf = m_buffers[i];
       if (dbuf->m_node_id == nodeId &&
@@ -47,7 +47,7 @@ class Defragger {
   }
 
   void erase_buffer(const DefragBuffer* dbuf){
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (unsigned i = 0; i < m_buffers.size(); i++)
     {
       if (m_buffers[i] == dbuf)
       {
@@ -63,7 +63,7 @@ public:
   Defragger() {};
   ~Defragger()
   {
-    for (size_t i = m_buffers.size(); i > 0; --i)
+    for (unsigned i = m_buffers.size(); i > 0; --i)
     {
       delete m_buffers[i-1]; // free the memory of the fragment
     }
@@ -120,7 +120,7 @@ public:
     clear any unassembled signal buffers from node
   */
   void node_failed(NodeId nodeId) {
-    for (size_t i = m_buffers.size(); i > 0; --i)
+    for (unsigned i = m_buffers.size(); i > 0; --i)
     {
       if (m_buffers[i-1]->m_node_id == nodeId)
       {

=== modified file 'storage/ndb/src/mgmsrv/InitConfigFileParser.cpp'
--- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp	2011-10-27 12:19:57 +0000
@@ -186,7 +186,7 @@ InitConfigFileParser::run_config_rules(C
 						      ConfigInfo::m_ConfigRules[i].m_ruleData))
       return 0;
 
-    for(size_t j = 0; j<tmp.size(); j++){
+    for(unsigned j = 0; j<tmp.size(); j++){
       BaseString::snprintf(ctx.fname, sizeof(ctx.fname),
                            "%s", tmp[j].m_sectionType.c_str());
       ctx.type             = InitConfigFileParser::Section;
@@ -478,7 +478,7 @@ bool InitConfigFileParser::convertString
 //****************************************************************************
 static void
 trim(char * str){
-  int len = strlen(str);
+  int len = (int)strlen(str);
   for(len--;
       (str[len] == '\r' || str[len] == '\n' || 
        str[len] == ' ' || str[len] == '\t') && 
@@ -581,7 +581,7 @@ bool
 InitConfigFileParser::storeSection(Context& ctx){
   if(ctx.m_currentSection == NULL)
     return true;
-  for(int i = strlen(ctx.fname) - 1; i>=0; i--){
+  for(int i = (int)strlen(ctx.fname) - 1; i>=0; i--){
     ctx.fname[i] = toupper(ctx.fname[i]);
   }
   BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "%s", ctx.fname);

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-09-28 10:18:35 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-10-21 12:36:44 +0000
@@ -3576,7 +3576,7 @@ MgmtSrvr::alloc_node_id_impl(NodeId& nod
     return false;
 
   // Print list of possible nodes
-  for (size_t i = 0; i < nodes.size(); i++)
+  for (unsigned i = 0; i < nodes.size(); i++)
   {
     const PossibleNode& node = nodes[i];
     g_eventLogger->debug(" [%u]: %u, '%s', %d",

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2011-10-03 10:43:39 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2011-10-27 12:19:57 +0000
@@ -599,15 +599,17 @@ MgmApiSession::getConfig(Parser_t::Conte
   m_output->println("Content-Transfer-Encoding: base64");
   m_output->print("\n");
 
+  unsigned len = (unsigned)strlen(pack64.c_str());
   if(ERROR_INSERTED(3))
   {
     // Return only half the packed config
     BaseString half64 = pack64.substr(0, pack64.length());
-    m_output->println(half64.c_str());
+    m_output->write(half64.c_str(), (unsigned)strlen(half64.c_str()));
+    m_output->write("\n", 1);
     return;
   }
-  m_output->println(pack64.c_str());
-  m_output->print("\n");
+  m_output->write(pack64.c_str(), len);
+  m_output->write("\n\n", 2);
   return;
 }
 
@@ -1370,12 +1372,12 @@ logevent2str(BaseString& str, int eventT
       str.appfmt("%s=%d\n",ndb_logevent_body[i].token, val);
       if(strcmp(ndb_logevent_body[i].token,"error") == 0)
       {
-        int pretty_text_len= strlen(pretty_text);
+        int pretty_text_len= (int)strlen(pretty_text);
         if(pretty_text_size-pretty_text_len-3 > 0)
         {
           BaseString::snprintf(pretty_text+pretty_text_len, 4 , " - ");
           ndb_error_string(val, pretty_text+(pretty_text_len+3),
-                           pretty_text_size-pretty_text_len-3);
+                           (int)(pretty_text_size-pretty_text_len-3));
         }
       }
     } while (ndb_logevent_body[++i].type == eventType);
@@ -1430,9 +1432,20 @@ Ndb_mgmd_event_service::log(int eventTyp
 
       int r;
       if (m_clients[i].m_parsable)
-        r= out.println(str.c_str());
+      {
+        unsigned len = str.length();
+        r= out.write(str.c_str(), len);
+      }
       else
-        r= out.println(pretty_text);
+      {
+        unsigned len = (unsigned)strlen(pretty_text);
+        r= out.write(pretty_text, len);
+      }
+
+      if (! (r < 0))
+      {
+        r = out.write("\n", 1);
+      }
 
       if (r<0)
       {
@@ -1631,7 +1644,7 @@ MgmApiSession::listen_event(Parser<MgmAp
   Vector<BaseString> list;
   param.trim();
   param.split(list, " ,");
-  for(size_t i = 0; i<list.size(); i++){
+  for(unsigned i = 0; i<list.size(); i++){
     Vector<BaseString> spec;
     list[i].trim();
     list[i].split(spec, "=:");
@@ -1852,7 +1865,7 @@ MgmApiSession::list_session(SocketServer
   lister->m_output->println("session.%llu.m_stop: %d",id,s->m_stop);
   if(s->m_ctx)
   {
-    int l= strlen(s->m_ctx->m_tokenBuffer);
+    int l= (int)strlen(s->m_ctx->m_tokenBuffer);
     char *buf= (char*) malloc(2*l+1);
     char *b= buf;
     for(int i=0; i<l;i++)
@@ -1922,7 +1935,7 @@ MgmApiSession::get_session(SocketServer:
   p->l->m_output->println("m_stop: %d",s->m_stop);
   if(s->m_ctx)
   {
-    int l= strlen(s->m_ctx->m_tokenBuffer);
+    int l= (int)strlen(s->m_ctx->m_tokenBuffer);
     p->l->m_output->println("parser_buffer_len: %u",l);
     p->l->m_output->println("parser_status: %d",s->m_ctx->m_status);
   }
@@ -2019,7 +2032,7 @@ void MgmApiSession::setConfig(Parser_t::
       if((r= read_socket(m_socket,
                          SOCKET_TIMEOUT,
                          &buf64[start],
-                         len64-start)) < 1)
+                         (int)(len64-start))) < 1)
       {
         delete[] buf64;
         result.assfmt("read_socket failed, errno: %d", errno);

=== modified file 'storage/ndb/src/ndbapi/Ndb.cpp'
--- a/storage/ndb/src/ndbapi/Ndb.cpp	2011-09-09 09:30:43 +0000
+++ b/storage/ndb/src/ndbapi/Ndb.cpp	2011-10-17 18:13:57 +0000
@@ -2254,13 +2254,31 @@ Ndb::getNdbErrorDetail(const NdbError& e
 void
 Ndb::setCustomData(void* _customDataPtr)
 {
-  theImpl->customDataPtr = _customDataPtr;
+  theImpl->customData = Uint64(_customDataPtr);
 }
 
 void*
 Ndb::getCustomData() const
 {
-  return theImpl->customDataPtr;
+  return (void*)theImpl->customData;
+}
+
+void
+Ndb::setCustomData64(Uint64 _customData)
+{
+  theImpl->customData = _customData;
+}
+
+Uint64
+Ndb::getCustomData64() const
+{
+  return theImpl->customData;
+}
+
+Uint64
+Ndb::getNextTransactionId() const
+{
+  return theFirstTransId;
 }
 
 Uint32

=== modified file 'storage/ndb/src/ndbapi/NdbBlob.cpp'
--- a/storage/ndb/src/ndbapi/NdbBlob.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp	2011-10-21 12:36:44 +0000
@@ -696,7 +696,7 @@ NdbBlob::copyKeyFromRow(const NdbRecord 
     unpacked+= unpacked_len;
   }
 
-  packedBuf.size= packed - packedBuf.data;
+  packedBuf.size= (Uint32)(packed - packedBuf.data);
   packedBuf.zerorest();
   assert(unpacked == unpackedBuf.data + unpackedBuf.size);
   DBUG_RETURN(0);

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-06-07 12:31:16 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-10-21 12:36:44 +0000
@@ -1260,7 +1260,7 @@ NdbTableImpl::buildColumnHash(){
       Uint32 bucket = hv & m_columnHashMask;
       bucket = (bucket < size ? bucket : bucket - size);
       m_columnHash[bucket] = (sz << 16) | (((size - bucket) + pos) << 1);
-      for(size_t j = 0; j<sz; j++, pos++){
+      for(unsigned j = 0; j<sz; j++, pos++){
 	Uint32 col = chains[i][j];	
 	Uint32 hv = hashValues[col];
 	if (m_columnHash.push_back((col << 16) | hv))

=== modified file 'storage/ndb/src/ndbapi/NdbImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-09-09 13:33:52 +0000
+++ b/storage/ndb/src/ndbapi/NdbImpl.hpp	2011-10-20 19:52:11 +0000
@@ -129,7 +129,7 @@ public:
 
   BaseString m_systemPrefix; // Buffer for preformatted for <sys>/<def>/
   
-  void* customDataPtr;
+  Uint64 customData;
 
   Uint64 clientStats[ Ndb::NumClientStatistics ];
   

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-10-23 07:47:05 +0000
@@ -1744,7 +1744,7 @@ NdbOperation::receiveTCKEYREF(const NdbA
   if (aSignal->getLength() == TcKeyRef::SignalLength)
   {
     // Signal may contain additional error data
-    theError.details = (char *) aSignal->readData(5);
+    theError.details = (char *)UintPtr(aSignal->readData(5));
   }
 
   theStatus = Finished;

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-03 08:02:28 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-20 19:52:11 +0000
@@ -1661,7 +1661,8 @@ NdbQueryIndexScanOperationDefImpl::NdbQu
                            int& error)
   : NdbQueryScanOperationDefImpl(table,options,ident,ix,id,error),
   m_interface(*this), 
-  m_index(index)
+  m_index(index),
+  m_paramInPruneKey(false)
 {
   memset(&m_bound, 0, sizeof m_bound);
   if (bound!=NULL) {
@@ -2316,7 +2317,7 @@ NdbQueryLookupOperationDefImpl::appendKe
 
 
 Uint32
-NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef) const
+NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef)
 {
   Uint32 appendedPattern = 0;
 
@@ -2408,6 +2409,7 @@ NdbQueryIndexScanOperationDefImpl::appen
           }
           case NdbQueryOperandImpl::Param:
             appendedPattern |= QN_ScanIndexNode::SI_PRUNE_PARAMS;
+            m_paramInPruneKey = true;
             serializedDef.append(QueryPattern::param(paramCnt++));
             break;
           default:

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-03 08:02:28 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-20 19:52:11 +0000
@@ -382,6 +382,15 @@ public:
   virtual const IndexBound* getBounds() const
   { return NULL; } 
 
+  /** 
+   * True if this is a prunable scan and there are NdbQueryParamOperands in the
+   * distribution key.
+   */
+  virtual bool hasParamInPruneKey() const
+  {
+    return false;
+  }
+
   // Return 'true' is query type is a multi-row scan
   virtual bool isScanOperation() const = 0;
 
@@ -523,7 +532,7 @@ protected:
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const
   { return 0; }
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef)
   { return 0; }
 
 }; // class NdbQueryScanOperationDefImpl
@@ -553,11 +562,16 @@ public:
   virtual const IndexBound* getBounds() const
   { return &m_bound; } 
 
+  bool hasParamInPruneKey() const
+  {
+    return m_paramInPruneKey;
+  }
+
 protected:
   // Append pattern for creating complete range bounds to serialized code 
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const;
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const;
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef);
 
 private:
 
@@ -583,6 +597,12 @@ private:
 
   /** True if there is a set of bounds.*/
   IndexBound m_bound;
+
+  /** 
+   * True if scan is prunable and there are NdbQueryParamOperands in the 
+   * distribution key.
+   */
+  bool m_paramInPruneKey;
 }; // class NdbQueryIndexScanOperationDefImpl
 
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-09-28 10:55:58 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-23 07:47:05 +0000
@@ -4562,6 +4562,10 @@ NdbQueryOperationImpl::prepareAttrInfo(U
     {
       requestInfo |= QN_ScanIndexParameters::SIP_PARALLEL;
     }
+    if (def.hasParamInPruneKey())
+    {
+      requestInfo |= QN_ScanIndexParameters::SIP_PRUNE_PARAMS;
+    }
     param->requestInfo = requestInfo;
     // Check that both values fit in param->batchSize.
     assert(getMaxBatchRows() < (1<<QN_ScanIndexParameters::BatchRowBits));
@@ -4970,7 +4974,7 @@ NdbQueryOperationImpl::execTCKEYREF(cons
     if (aSignal->getLength() == TcKeyRef::SignalLength)
     {
       // Signal may contain additional error data
-      getQuery().m_error.details = (char *)ref->errorData;
+      getQuery().m_error.details = (char *)UintPtr(ref->errorData);
     }
   }
 

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-10-18 10:43:35 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-10-27 12:19:57 +0000
@@ -57,22 +57,27 @@ NdbScanOperation::~NdbScanOperation()
 }
 
 void
-NdbScanOperation::setErrorCode(int aErrorCode){
+NdbScanOperation::setErrorCode(int aErrorCode) const
+{
+  NdbScanOperation *pnonConstThis=const_cast<NdbScanOperation *>(this);
+
   NdbTransaction* tmp = theNdbCon;
-  theNdbCon = m_transConnection;
+  pnonConstThis->theNdbCon = m_transConnection;
   NdbOperation::setErrorCode(aErrorCode);
-  theNdbCon = tmp;
+  pnonConstThis->theNdbCon = tmp;
 }
 
 void
-NdbScanOperation::setErrorCodeAbort(int aErrorCode){
+NdbScanOperation::setErrorCodeAbort(int aErrorCode) const
+{
+  NdbScanOperation *pnonConstThis=const_cast<NdbScanOperation *>(this);
+
   NdbTransaction* tmp = theNdbCon;
-  theNdbCon = m_transConnection;
+  pnonConstThis->theNdbCon = m_transConnection;
   NdbOperation::setErrorCodeAbort(aErrorCode);
-  theNdbCon = tmp;
+  pnonConstThis->theNdbCon = tmp;
 }
 
-  
 /*****************************************************************************
  * int init();
  *
@@ -2028,7 +2033,9 @@ NdbScanOperation::send_next_scan(Uint32 
 }
 
 int 
-NdbScanOperation::prepareSend(Uint32  TC_ConnectPtr, Uint64  TransactionId)
+NdbScanOperation::prepareSend(Uint32  TC_ConnectPtr,
+                              Uint64  TransactionId,
+                              NdbOperation::AbortOption)
 {
   abort();
   return 0;
@@ -4078,3 +4085,14 @@ NdbScanOperation::getPruned() const
           (m_pruneState == SPS_FIXED));
 }
 
+NdbBlob*
+NdbScanOperation::getBlobHandle(const char* anAttrName) const
+{
+  return NdbOperation::getBlobHandle(anAttrName);
+}
+
+NdbBlob*
+NdbScanOperation::getBlobHandle(Uint32 anAttrId) const
+{
+  return NdbOperation::getBlobHandle(anAttrId);
+}

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-10-23 07:47:05 +0000
@@ -64,10 +64,6 @@ NdbTransaction::NdbTransaction( Ndb* aNd
   theTransactionIsStarted(false),
   theDBnode(0),
   theReleaseOnClose(false),
-  // Composite query operations
-  m_firstQuery(NULL),
-  m_firstExecQuery(NULL),
-  m_firstActiveQuery(NULL),
   // Scan operations
   m_waitForReply(true),
   m_theFirstScanOperation(NULL),
@@ -75,7 +71,6 @@ NdbTransaction::NdbTransaction( Ndb* aNd
   m_firstExecutedScanOp(NULL),
   // Scan operations
   theScanningOp(NULL),
-  m_scanningQuery(NULL),
   theBuddyConPtr(0xFFFFFFFF),
   theBlobFlag(false),
   thePendingBlobOps(0),
@@ -83,8 +78,15 @@ NdbTransaction::NdbTransaction( Ndb* aNd
   maxPendingBlobWriteBytes(~Uint32(0)),
   pendingBlobReadBytes(0),
   pendingBlobWriteBytes(0),
+  // Lock handle
   m_theFirstLockHandle(NULL),
   m_theLastLockHandle(NULL),
+  // Composite query operations
+  m_firstQuery(NULL),
+  m_firstExecQuery(NULL),
+  m_firstActiveQuery(NULL),
+  m_scanningQuery(NULL),
+  //
   m_tcRef(numberToRef(DBTC, 0))
 {
   theListState = NotInList;
@@ -2090,7 +2092,7 @@ transactions.
     if (aSignal->getLength() == TcRollbackRep::SignalLength)
     {
       // Signal may contain additional error data
-      theError.details = (char *) aSignal->readData(5);
+      theError.details = (char *)UintPtr(aSignal->readData(5));
     }
 
     /**********************************************************************/

=== modified file 'storage/ndb/src/ndbapi/NdbWaitGroup.cpp'
--- a/storage/ndb/src/ndbapi/NdbWaitGroup.cpp	2011-09-09 10:48:14 +0000
+++ b/storage/ndb/src/ndbapi/NdbWaitGroup.cpp	2011-10-19 11:56:10 +0000
@@ -25,10 +25,10 @@
 
 NdbWaitGroup::NdbWaitGroup(Ndb_cluster_connection *_conn, int _ndbs) :
   m_conn(_conn),
+  m_multiWaitHandler(0),
   m_array_size(_ndbs),
   m_count(0),
-  m_nodeId(0),
-  m_multiWaitHandler(0)
+  m_nodeId(0)
 {
   /* Allocate the array of Ndbs */
   m_array = new Ndb *[m_array_size];

=== modified file 'storage/ndb/src/ndbapi/Ndbinit.cpp'
--- a/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-09-09 13:09:02 +0000
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-10-17 18:13:57 +0000
@@ -207,7 +207,7 @@ NdbImpl::NdbImpl(Ndb_cluster_connection 
     wakeHandler(0),
     wakeContext(~Uint32(0)),
     m_ev_op(0),
-    customDataPtr(0)
+    customData(0)
 {
   int i;
   for (i = 0; i < MAX_NDB_NODES; i++) {

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.cpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-10-14 14:47:13 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp	2011-10-20 19:52:11 +0000
@@ -548,11 +548,11 @@ TransporterFacade::TransporterFacade(Glo
   currentSendLimit(1),
   dozer(NULL),
   theStopReceive(0),
+  sendThreadWaitMillisec(10),
   theSendThread(NULL),
   theReceiveThread(NULL),
   m_fragmented_signal_id(0),
-  m_globalDictCache(cache),
-  sendThreadWaitMillisec(10)
+  m_globalDictCache(cache)
 {
   DBUG_ENTER("TransporterFacade::TransporterFacade");
   theMutexPtr = NdbMutex_CreateWithName("TTFM");

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-10-07 18:15:59 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-10-20 19:52:11 +0000
@@ -979,17 +979,17 @@ int ndb_error_string(int err_no, char *s
   int len;
 
   assert(size > 1);
-  if(size <= 1) 
+  if(size <= 1)
     return 0;
+
   error.code = err_no;
   ndberror_update(&error);
 
-  len =
-    my_snprintf(str, size-1, "%s: %s: %s", error.message,
+  len = (int)my_snprintf(str, size-1, "%s: %s: %s", error.message,
 		ndberror_status_message(error.status),
 		ndberror_classification_message(error.classification));
   str[size-1]= '\0';
-  
+
   if (error.classification != UE)
     return len;
   return -len;

=== modified file 'storage/ndb/test/include/NDBT_Table.hpp'
--- a/storage/ndb/test/include/NDBT_Table.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/include/NDBT_Table.hpp	2011-10-22 09:47:36 +0000
@@ -93,6 +93,7 @@ public: 
     // validate() might cause initialization order problem with charset
     NdbError error;
     int ret = aggregate(error);
+    (void)ret;
     assert(ret == 0);
   }
   

=== modified file 'storage/ndb/test/include/NdbMgmd.hpp'
--- a/storage/ndb/test/include/NdbMgmd.hpp	2011-09-14 13:49:19 +0000
+++ b/storage/ndb/test/include/NdbMgmd.hpp	2011-10-24 13:14:28 +0000
@@ -227,7 +227,7 @@ public:
 
     SocketOutputStream out(socket());
 
-    if (out.println(cmd)){
+    if (out.println("%s", cmd)){
       error("call: println failed at line %d", __LINE__);
       return false;
     }
@@ -278,9 +278,16 @@ public:
     }
 
     // Send any bulk data
-    if (bulk && out.println(bulk)){
-      error("call: print('<bulk>') failed at line %d", __LINE__);
-      return false;
+    if (bulk)
+    {
+      if (out.write(bulk, strlen(bulk)) >= 0)
+      {
+        if (out.write("\n", 1) < 0)
+        {
+          error("call: print('<bulk>') failed at line %d", __LINE__);
+          return false;
+        }
+      }
     }
 
     BaseString buf;

=== modified file 'storage/ndb/test/ndbapi/ScanFunctions.hpp'
--- a/storage/ndb/test/ndbapi/ScanFunctions.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/ndbapi/ScanFunctions.hpp	2011-10-21 12:36:44 +0000
@@ -29,7 +29,7 @@ class AttribList {
 public:
   AttribList(){};
   ~AttribList(){
-    for(size_t i = 0; i < attriblist.size(); i++){      
+    for(unsigned i = 0; i < attriblist.size(); i++){      
       delete attriblist[i];
     }
   };
@@ -335,7 +335,7 @@ void AttribList::buildAttribList(const N
   attriblist.push_back(attr);  
 
 #if 1
-  for(size_t j = 0; j < attriblist.size(); j++){
+  for(unsigned j = 0; j < attriblist.size(); j++){
 
     g_info << attriblist[j]->numAttribs << ": " ;
     for(int a = 0; a < attriblist[j]->numAttribs; a++)

=== modified file 'storage/ndb/test/ndbapi/testDict.cpp'
--- a/storage/ndb/test/ndbapi/testDict.cpp	2011-04-23 08:21:36 +0000
+++ b/storage/ndb/test/ndbapi/testDict.cpp	2011-10-20 12:21:10 +0000
@@ -8167,6 +8167,8 @@ runBug58277loadtable(NDBT_Context* ctx, 
     int cnt = 0;
     for (int i = 0; i < rows; i++)
     {
+      int retries = 10;
+  retry:
       NdbTransaction* pTx = 0;
       CHK2((pTx = pNdb->startTransaction()) != 0, pNdb->getNdbError());
 
@@ -8183,7 +8185,19 @@ runBug58277loadtable(NDBT_Context* ctx, 
         int x[] = {
          -630
         };
-        CHK3(pTx->execute(Commit) == 0, pTx->getNdbError(), x);
+        int res = pTx->execute(Commit);
+        if (res != 0 &&
+            pTx->getNdbError().status == NdbError::TemporaryError)
+        {
+          retries--;
+          if (retries >= 0)
+          {
+            pTx->close();
+            NdbSleep_MilliSleep(10);
+            goto retry;
+          }
+        }
+        CHK3(res == 0, pTx->getNdbError(), x);
         cnt++;
       }
       while (0);

=== modified file 'storage/ndb/test/ndbapi/testMgm.cpp'
--- a/storage/ndb/test/ndbapi/testMgm.cpp	2011-09-19 20:03:43 +0000
+++ b/storage/ndb/test/ndbapi/testMgm.cpp	2011-10-24 13:14:28 +0000
@@ -736,8 +736,8 @@ get_nodeid_of_type(NdbMgmd& mgmd, ndb_mg
   int noOfNodes = cs->no_of_nodes;
   int randomnode = myRandom48(noOfNodes);
   ndb_mgm_node_state *ns = cs->node_states + randomnode;
-  assert(ns->node_type == (Uint32)type);
-  assert(ns->node_id);
+  assert((Uint32)ns->node_type == (Uint32)type);
+  assert(ns->node_id != 0);
 
   *nodeId = ns->node_id;
   g_info << "Got node id " << *nodeId << " of type " << type << endl;

=== modified file 'storage/ndb/test/ndbapi/testNodeRestart.cpp'
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-09-06 12:43:05 +0000
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp	2011-10-27 12:19:57 +0000
@@ -4726,17 +4726,23 @@ int runSplitLatency25PctFail(NDBT_Contex
   /**
    * Now wait for half of cluster to die...
    */
-  ndbout_c("Waiting for half of cluster to die");
-  int not_started = 0;
   const int node_count = restarter.getNumDbNodes();
+  ndbout_c("Waiting for half of cluster (%u/%u) to die", node_count/2, node_count);
+  int not_started = 0;
   do
   {
     not_started = 0;
     for (int i = 0; i < node_count; i++)
     {
-      if (restarter.getNodeStatus(restarter.getDbNodeId(i)) == NDB_MGM_NODE_STATUS_NOT_STARTED)
+      int nodeId = restarter.getDbNodeId(i);
+      int status = restarter.getNodeStatus(nodeId);
+      ndbout_c("Node %u status %u", nodeId, status);
+      if (status == NDB_MGM_NODE_STATUS_NOT_STARTED)
         not_started++;
     }
+    NdbSleep_MilliSleep(2000);
+    ndbout_c("%u / %u in state NDB_MGM_NODE_STATUS_NOT_STARTED(%u)",
+             not_started, node_count, NDB_MGM_NODE_STATUS_NOT_STARTED);
   } while (2 * not_started != node_count);
 
   ndbout_c("Restarting cluster");

=== modified file 'storage/ndb/test/rqg/parseargs.sh'
--- a/storage/ndb/test/rqg/parseargs.sh	2011-10-14 10:52:20 +0000
+++ b/storage/ndb/test/rqg/parseargs.sh	2011-10-20 07:56:31 +0000
@@ -177,18 +177,24 @@ mysql_exe="$EXE_MYSQL --show-warnings --
 mysqltest_exe="$EXE_MYSQLTEST --user=${user} --host=${host} --port=${port}"
 export RQG_HOME
 
-md5sum="md5sum"
 getepochtime="date +%s"
 if [ `uname` = "SunOS" ]
 then
     getepochtime="nawk 'BEGIN{print srand();}'"
 fi
 
+md5sum="md5sum"
 if [ `uname` = "SunOS" ]
 then
     md5sum="digest -a md5"
 fi
 
+awk_exe=awk
+if [ `uname` = "SunOS" ]
+then
+    awk_exe=gawk
+fi
+
 pre="spj"
 opre="$pre.$$"
 myisam_db="${pre}_myisam"

=== modified file 'storage/ndb/test/rqg/run_rqg.sh'
--- a/storage/ndb/test/rqg/run_rqg.sh	2011-10-14 11:36:51 +0000
+++ b/storage/ndb/test/rqg/run_rqg.sh	2011-10-20 07:56:31 +0000
@@ -61,7 +61,7 @@ EOF
 	$mysqltest_exe ${ndb_db} < $tmp >> ${opre}.$no.ndbpush.$i.txt
     done
 
-    cnt=`$md5sum ${opre}.$no.*.txt | awk '{ print $1;}' | sort | uniq | wc -l`
+    cnt=`$md5sum ${opre}.$no.*.txt | $awk_exe '{ print $1;}' | sort | uniq | wc -l`
     if [ $cnt -ne 1 ]
     then
 	echo -n "$no "
@@ -129,7 +129,7 @@ run_all() {
     md5s=""
     for f in $tmpfiles
     do
-	md5s="$md5s `$md5sum $f | awk '{ print $1;}'`"
+	md5s="$md5s `$md5sum $f | $awk_exe '{ print $1;}'`"
     done
 
     ###
@@ -185,7 +185,7 @@ do
 	echo "--eval set ndb_join_pushdown='\$NDB_JOIN_PUSHDOWN';"
 	echo "$ecp"
 	${gensql} --seed=$us --queries=$queries --dsn="$dsn:database=${myisam_db}" --grammar=$grammar | grep -v "#" |
-        awk '{ print "--sorted_result"; print "--error 0,233,1242,4006"; print; }'
+        $awk_exe '{ print "--sorted_result"; print "--error 0,233,1242,4006"; print; }'
 	echo "--exit"
     ) > ${opre}_test.sql
 

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2011-10-17 13:30:56 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2011-10-27 12:19:57 +0000
@@ -201,6 +201,7 @@ extern const char * g_ndbd_bin_path;
 extern const char * g_ndbmtd_bin_path;
 extern const char * g_mysqld_bin_path;
 extern const char * g_mysql_install_db_bin_path;
+extern const char * g_libmysqlclient_so_path;
 
 extern const char * g_search_path[];
 

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2011-10-05 13:57:58 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2011-10-20 19:52:11 +0000
@@ -122,6 +122,24 @@ printfile(FILE* out, Properties& props, 
   fflush(out);
 }
 
+static
+char *
+dirname(const char * path)
+{
+  char * s = strdup(path);
+  size_t len = strlen(s);
+  for (size_t i = 1; i<len; i++)
+  {
+    if (s[len - i] == '/')
+    {
+      s[len - i] = 0;
+      return s;
+    }
+  }
+  free(s);
+  return 0;
+}
+
 bool
 setup_files(atrt_config& config, int setup, int sshx)
 {
@@ -313,8 +331,23 @@ setup_files(atrt_config& config, int set
         }
         fprintf(fenv, "$PATH\n");
 	keys.push_back("PATH");
+
+        {
+          /**
+           * In 5.5...binaries aren't compiled with rpath
+           * So we need an explicit LD_LIBRARY_PATH
+           *
+           * Use path from libmysqlclient.so
+           */
+          char * dir = dirname(g_libmysqlclient_so_path);
+          fprintf(fenv, "LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n", dir);
+          free(dir);
+          keys.push_back("LD_LIBRARY_PATH");
+        }
+
 	for (size_t k = 0; k<keys.size(); k++)
 	  fprintf(fenv, "export %s\n", keys[k].c_str());
+
 	fflush(fenv);
 	fclose(fenv);
       }

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2011-10-05 13:57:58 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2011-10-20 19:41:56 +0000
@@ -86,6 +86,7 @@ const char * g_ndbd_bin_path = 0;
 const char * g_ndbmtd_bin_path = 0;
 const char * g_mysqld_bin_path = 0;
 const char * g_mysql_install_db_bin_path = 0;
+const char * g_libmysqlclient_so_path = 0;
 
 static struct
 {
@@ -93,11 +94,12 @@ static struct
   const char * exe;
   const char ** var;
 } g_binaries[] = {
-  { true,  "ndb_mgmd",         &g_ndb_mgmd_bin_path},
-  { true,  "ndbd",             &g_ndbd_bin_path },
-  { false, "ndbmtd",           &g_ndbmtd_bin_path },
-  { true,  "mysqld",           &g_mysqld_bin_path },
-  { true,  "mysql_install_db", &g_mysql_install_db_bin_path },
+  { true,  "ndb_mgmd",          &g_ndb_mgmd_bin_path},
+  { true,  "ndbd",              &g_ndbd_bin_path },
+  { false, "ndbmtd",            &g_ndbmtd_bin_path },
+  { true,  "mysqld",            &g_mysqld_bin_path },
+  { true,  "mysql_install_db",  &g_mysql_install_db_bin_path },
+  { true,  "libmysqlclient.so", &g_libmysqlclient_so_path },
   { true, 0, 0 }
 };
 
@@ -108,6 +110,8 @@ g_search_path[] =
   "libexec",
   "sbin",
   "scripts",
+  "lib",
+  "lib/mysql",
   0
 };
 static bool find_binaries();

=== modified file 'storage/ndb/test/src/DbUtil.cpp'
--- a/storage/ndb/test/src/DbUtil.cpp	2011-09-06 12:43:05 +0000
+++ b/storage/ndb/test/src/DbUtil.cpp	2011-10-27 12:19:57 +0000
@@ -215,7 +215,7 @@ DbUtil::mysqlSimplePrepare(const char *q
     printf("Inside DbUtil::mysqlSimplePrepare\n");
   #endif
   MYSQL_STMT *my_stmt= mysql_stmt_init(this->getMysql());
-  if (my_stmt && mysql_stmt_prepare(my_stmt, query, strlen(query))){
+  if (my_stmt && mysql_stmt_prepare(my_stmt, query, (unsigned long)strlen(query))){
     this->printStError(my_stmt,"Prepare Statement Failed");
     mysql_stmt_close(my_stmt);
     return NULL;
@@ -353,7 +353,7 @@ DbUtil::runQuery(const char* sql,
 
 
   MYSQL_STMT *stmt= mysql_stmt_init(m_mysql);
-  if (mysql_stmt_prepare(stmt, sql, strlen(sql)))
+  if (mysql_stmt_prepare(stmt, sql, (unsigned long)strlen(sql)))
   {
     report_error("Failed to prepare: ", m_mysql);
     return false;
@@ -390,7 +390,7 @@ DbUtil::runQuery(const char* sql,
       args.get(name.c_str(), &val_s);
       bind_param[i].buffer_type= MYSQL_TYPE_STRING;
       bind_param[i].buffer= (char*)val_s;
-      bind_param[i].buffer_length= strlen(val_s);
+      bind_param[i].buffer_length= (unsigned long)strlen(val_s);
       g_debug << " param" << name.c_str() << ": " << val_s << endl;
       break;
     default:

=== modified file 'storage/ndb/test/src/HugoQueries.cpp'
--- a/storage/ndb/test/src/HugoQueries.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/src/HugoQueries.cpp	2011-10-21 12:36:44 +0000
@@ -42,7 +42,7 @@ HugoQueries::HugoQueries(const NdbQueryD
 
 HugoQueries::~HugoQueries()
 {
-  for (size_t o = 0; o<m_ops.size(); o++)
+  for (unsigned o = 0; o<m_ops.size(); o++)
   {
     while (m_ops[o].m_rows.size())
     {
@@ -57,14 +57,14 @@ HugoQueries::~HugoQueries()
 void
 HugoQueries::allocRows(int batch)
 {
-  for (size_t o = 0; o<m_ops.size(); o++)
+  for (unsigned o = 0; o<m_ops.size(); o++)
   {
     const NdbQueryOperationDef * pOp =m_query_def->getQueryOperation((Uint32)o);
     const NdbDictionary::Table* tab = pOp->getTable();
 
     if (tab)
     {
-      while (m_ops[o].m_rows.size() < (size_t)batch)
+      while (m_ops[o].m_rows.size() < (unsigned)batch)
       {
         m_ops[o].m_rows.push_back(new NDBT_ResultRow(* tab));
       }
@@ -198,7 +198,7 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
         return NDBT_FAILED;
       }
 
-      for (size_t o = 0; o<m_ops.size(); o++)
+      for (unsigned o = 0; o<m_ops.size(); o++)
       {
         NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
         HugoQueries::getValueForQueryOp(pOp, m_ops[o].m_rows[b]);
@@ -226,7 +226,7 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
       NdbQuery * query = queries[b];
       if (query->nextResult() == NdbQuery::NextResult_gotRow)
       {
-        for (size_t o = 0; o<m_ops.size(); o++)
+        for (unsigned o = 0; o<m_ops.size(); o++)
         {
           NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
           if (!pOp->isRowNULL())
@@ -244,7 +244,7 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
     pTrans->close();
     r += batch;
 
-    for (size_t i = 0; i<batch_rows_found.size(); i++)
+    for (unsigned i = 0; i<batch_rows_found.size(); i++)
       m_rows_found[i] += batch_rows_found[i];
   }
 
@@ -293,7 +293,7 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       return NDBT_FAILED;
     }
 
-    for (size_t o = 0; o<m_ops.size(); o++)
+    for (unsigned o = 0; o<m_ops.size(); o++)
     {
       NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
       HugoQueries::getValueForQueryOp(pOp, m_ops[o].m_rows[0]);
@@ -336,7 +336,7 @@ HugoQueries::runScanQuery(Ndb * pNdb,
       return NDBT_OK;
       }
 
-      for (size_t o = 0; o<m_ops.size(); o++)
+      for (unsigned o = 0; o<m_ops.size(); o++)
       {
         NdbQueryOperation * pOp = query->getQueryOperation((Uint32)o);
         if (!pOp->isRowNULL())

=== modified file 'storage/ndb/test/src/HugoQueryBuilder.cpp'
--- a/storage/ndb/test/src/HugoQueryBuilder.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/test/src/HugoQueryBuilder.cpp	2011-10-21 12:36:44 +0000
@@ -55,7 +55,7 @@ HugoQueryBuilder::init()
 
 HugoQueryBuilder::~HugoQueryBuilder()
 {
-  for (size_t i = 0; i<m_queries.size(); i++)
+  for (unsigned i = 0; i<m_queries.size(); i++)
     m_queries[i]->destroy();
 }
 
@@ -76,7 +76,7 @@ HugoQueryBuilder::fixOptions()
 void
 HugoQueryBuilder::addTable(Ndb* ndb, const NdbDictionary::Table* tab)
 {
-  for (size_t i = 0; i<m_tables.size(); i++)
+  for (unsigned i = 0; i<m_tables.size(); i++)
   {
     if (m_tables[i].m_table == tab)
       return;
@@ -133,7 +133,7 @@ HugoQueryBuilder::getJoinLevel() const
 void
 HugoQueryBuilder::removeTable(const NdbDictionary::Table* tab)
 {
-  for (size_t i = 0; i<m_tables.size(); i++)
+  for (unsigned i = 0; i<m_tables.size(); i++)
   {
     if (m_tables[i].m_table == tab)
     {
@@ -215,11 +215,11 @@ HugoQueryBuilder::checkBindable(Vector<c
                                 Vector<Op> ops,
                                 bool allow_bind_nullable)
 {
-  for (size_t c = 0; c < cols.size(); c++)
+  for (unsigned c = 0; c < cols.size(); c++)
   {
     const NdbDictionary::Column * col = cols[c];
     bool found = false;
-    for (size_t t = 0; !found && t<ops.size(); t++)
+    for (unsigned t = 0; !found && t<ops.size(); t++)
     {
       const NdbDictionary::Table * tab = ops[t].m_op->getTable();
       if (tab)
@@ -274,7 +274,7 @@ HugoQueryBuilder::checkBusyScan(Op op) c
     op = m_query[op.m_parent];
   }
 
-  for (size_t i = op.m_idx + 1; i < m_query.size(); i++)
+  for (unsigned i = op.m_idx + 1; i < m_query.size(); i++)
     if (isAncestor(op, m_query[i]) && isScan(m_query[i].m_op))
       return true;
 
@@ -537,11 +537,11 @@ loop:
       if (op.m_op == 0)
       {
         ndbout << "Failed to add to " << endl;
-        for (size_t i = 0; i<m_query.size(); i++)
+        for (unsigned i = 0; i<m_query.size(); i++)
           ndbout << m_query[i] << endl;
 
         ndbout << "Parents: " << endl;
-        for (size_t i = 0; i<parents.size(); i++)
+        for (unsigned i = 0; i<parents.size(); i++)
           ndbout << parents[i].m_idx << " ";
         ndbout << endl;
       }

=== modified file 'storage/ndb/test/src/NDBT_Test.cpp'
--- a/storage/ndb/test/src/NDBT_Test.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/test/src/NDBT_Test.cpp	2011-10-21 12:36:44 +0000
@@ -414,7 +414,7 @@ NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(N
 NDBT_TestCaseImpl1::~NDBT_TestCaseImpl1(){
   NdbCondition_Destroy(waitThreadsCondPtr);
   NdbMutex_Destroy(waitThreadsMutexPtr);
-  size_t i;
+  unsigned i;
   for(i = 0; i < initializers.size();  i++)
     delete initializers[i];
   initializers.clear();

=== modified file 'storage/ndb/test/src/NdbBackup.cpp'
--- a/storage/ndb/test/src/NdbBackup.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/src/NdbBackup.cpp	2011-10-21 12:36:44 +0000
@@ -308,7 +308,7 @@ NdbBackup::restore(unsigned _backup_id){
   res = execRestore(true, true, ndbNodes[0].node_id, _backup_id);
 
   // Restore data once for each node
-  for(size_t i = 1; i < ndbNodes.size(); i++){
+  for(unsigned i = 1; i < ndbNodes.size(); i++){
     res = execRestore(true, false, ndbNodes[i].node_id, _backup_id);
   }
   

=== modified file 'storage/ndb/test/src/NdbRestarter.cpp'
--- a/storage/ndb/test/src/NdbRestarter.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/NdbRestarter.cpp	2011-10-21 12:36:44 +0000
@@ -59,7 +59,7 @@ int NdbRestarter::getDbNodeId(int _i){
   if (getStatus() != 0)
     return -1;
 
-  for(size_t i = 0; i < ndbNodes.size(); i++){     
+  for(unsigned i = 0; i < ndbNodes.size(); i++){     
     if (i == (unsigned)_i){
       return ndbNodes[i].node_id;
     }
@@ -114,7 +114,7 @@ NdbRestarter::restartNodes(int * nodes, 
     for (int j = 0; j<cnt; j++)
     {
       int _nodeId = nodes[j];
-      for(size_t i = 0; i < ndbNodes.size(); i++)
+      for(unsigned i = 0; i < ndbNodes.size(); i++)
       {
         if(ndbNodes[i].node_id == _nodeId)
         {
@@ -147,7 +147,7 @@ NdbRestarter::getMasterNodeId(){
   
   int min = 0;
   int node = -1;
-  for(size_t i = 0; i < ndbNodes.size(); i++){
+  for(unsigned i = 0; i < ndbNodes.size(); i++){
     if(min == 0 || ndbNodes[i].dynamic_id < min){
       min = ndbNodes[i].dynamic_id;
       node = ndbNodes[i].node_id;
@@ -165,7 +165,7 @@ NdbRestarter::getNodeGroup(int nodeId){
   if (getStatus() != 0)
     return -1;
   
-  for(size_t i = 0; i < ndbNodes.size(); i++)
+  for(unsigned i = 0; i < ndbNodes.size(); i++)
   {
     if(ndbNodes[i].node_id == nodeId)
     {
@@ -184,7 +184,7 @@ NdbRestarter::getNextMasterNodeId(int no
   if (getStatus() != 0)
     return -1;
   
-  size_t i;
+  unsigned i;
   for(i = 0; i < ndbNodes.size(); i++)
   {
     if(ndbNodes[i].node_id == nodeId)
@@ -244,7 +244,7 @@ NdbRestarter::getRandomNodeOtherNodeGrou
     return -1;
   
   int node_group = -1;
-  for(size_t i = 0; i < ndbNodes.size(); i++){
+  for(unsigned i = 0; i < ndbNodes.size(); i++){
     if(ndbNodes[i].node_id == nodeId){
       node_group = ndbNodes[i].node_group;
       break;
@@ -274,7 +274,7 @@ NdbRestarter::getRandomNodeSameNodeGroup
     return -1;
   
   int node_group = -1;
-  for(size_t i = 0; i < ndbNodes.size(); i++){
+  for(unsigned i = 0; i < ndbNodes.size(); i++){
     if(ndbNodes[i].node_id == nodeId){
       node_group = ndbNodes[i].node_group;
       break;
@@ -347,7 +347,7 @@ NdbRestarter::waitClusterState(ndb_mgm_n
   }
   
   // Collect all nodes into nodes
-  for (size_t i = 0; i < ndbNodes.size(); i++){
+  for (unsigned i = 0; i < ndbNodes.size(); i++){
     nodes[i] = ndbNodes[i].node_id;
     numNodes++;
   }
@@ -388,7 +388,7 @@ NdbRestarter::waitNodesState(const int *
 	 * First check if any node is not starting
 	 * then it's no idea to wait anymore
 	 */
-	for (size_t n = 0; n < ndbNodes.size(); n++){
+	for (unsigned n = 0; n < ndbNodes.size(); n++){
 	  if (ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTED &&
 	      ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTING)
 	    waitMore = false;
@@ -426,7 +426,7 @@ NdbRestarter::waitNodesState(const int *
 
     for (int i = 0; i < _num_nodes; i++){
       ndb_mgm_node_state* ndbNode = NULL;
-      for (size_t n = 0; n < ndbNodes.size(); n++){
+      for (unsigned n = 0; n < ndbNodes.size(); n++){
 	if (ndbNodes[n].node_id == _nodes[i])
 	  ndbNode = &ndbNodes[n];
       }
@@ -713,7 +713,7 @@ int NdbRestarter::insertErrorInAllNodes(
 
   int result = 0;
  
-  for(size_t i = 0; i < ndbNodes.size(); i++){     
+  for(unsigned i = 0; i < ndbNodes.size(); i++){     
     g_debug << "inserting error in node " << ndbNodes[i].node_id << endl;
     if (insertErrorInNode(ndbNodes[i].node_id, _error) == -1)
       result = -1;
@@ -751,7 +751,7 @@ int NdbRestarter::dumpStateAllNodes(cons
 
  int result = 0;
  
- for(size_t i = 0; i < ndbNodes.size(); i++){     
+ for(unsigned i = 0; i < ndbNodes.size(); i++){     
    g_debug << "dumping state in node " << ndbNodes[i].node_id << endl;
    if (dumpStateOneNode(ndbNodes[i].node_id, _args, _num_args) == -1)
      result = -1;
@@ -841,7 +841,7 @@ NdbRestarter::checkClusterAlive(const in
   for (int i = 0; i<num_nodes; i++)
     mask.set(deadnodes[i]);
   
-  for (size_t n = 0; n < ndbNodes.size(); n++)
+  for (unsigned n = 0; n < ndbNodes.size(); n++)
   {
     if (mask.get(ndbNodes[n].node_id))
       continue;
@@ -862,7 +862,7 @@ NdbRestarter::rollingRestart(Uint32 flag
   NdbNodeBitmask ng_mask;
   NdbNodeBitmask restart_nodes;
   Vector<int> nodes;
-  for(size_t i = 0; i < ndbNodes.size(); i++)
+  for(unsigned i = 0; i < ndbNodes.size(); i++)
   { 
     if (ng_mask.get(ndbNodes[i].node_group) == false)
     {
@@ -911,7 +911,7 @@ NdbRestarter::getMasterNodeVersion(int& 
   int masterNodeId = getMasterNodeId();
   if (masterNodeId != -1)
   {
-    for(size_t i = 0; i < ndbNodes.size(); i++)
+    for(unsigned i = 0; i < ndbNodes.size(); i++)
     {
       if (ndbNodes[i].node_id == masterNodeId)
       {
@@ -964,7 +964,7 @@ NdbRestarter::getNodeTypeVersionRange(nd
   minVer = 0;
   maxVer = 0;
   
-  for(size_t i = 0; i < nodeVec->size(); i++)
+  for(unsigned i = 0; i < nodeVec->size(); i++)
   {
     int nodeVer = (*nodeVec)[i].version;
     if ((minVer == 0) ||
@@ -984,7 +984,7 @@ NdbRestarter::getNodeStatus(int nodeid)
   if (getStatus() != 0)
     return -1;
 
-  for (size_t n = 0; n < ndbNodes.size(); n++)
+  for (unsigned n = 0; n < ndbNodes.size(); n++)
   {
     if (ndbNodes[n].node_id == nodeid)
       return ndbNodes[n].node_status;

=== modified file 'storage/ndb/test/src/getarg.c'
--- a/storage/ndb/test/src/getarg.c	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/getarg.c	2011-10-21 12:36:44 +0000
@@ -290,7 +290,7 @@ arg_printusage (struct getargs *args,
 	    strlcat(buf, "]", sizeof(buf));
 	    if(args[i].type == arg_strings)
 		strlcat(buf, "...", sizeof(buf));
-	    col = check_column(stderr, col, strlen(buf) + 1, columns);
+	    col = check_column(stderr, col, (int)strlen(buf) + 1, columns);
 	    col += fprintf(stderr, " %s", buf);
 	}
 	if (args[i].short_name) {
@@ -301,7 +301,7 @@ arg_printusage (struct getargs *args,
 	    strlcat(buf, "]", sizeof(buf));
 	    if(args[i].type == arg_strings)
 		strlcat(buf, "...", sizeof(buf));
-	    col = check_column(stderr, col, strlen(buf) + 1, columns);
+	    col = check_column(stderr, col, (int)strlen(buf) + 1, columns);
 	    col += fprintf(stderr, " %s", buf);
 	}
 	if (args[i].long_name && args[i].short_name)
@@ -309,7 +309,7 @@ arg_printusage (struct getargs *args,
 	max_len = max(max_len, len);
     }
     if (extra_string) {
-	col = check_column(stderr, col, strlen(extra_string) + 1, columns);
+	col = check_column(stderr, col, (int)strlen(extra_string) + 1, columns);
 	fprintf (stderr, " %s\n", extra_string);
     } else
 	fprintf (stderr, "\n");
@@ -360,14 +360,14 @@ arg_match_long(struct getargs *args, siz
     int argv_len;
     char *p;
 
-    argv_len = strlen(argv);
+    argv_len = (int)strlen(argv);
     p = strchr (argv, '=');
     if (p != NULL)
-	argv_len = p - argv;
+	argv_len = (int)(p - argv);
 
     for (i = 0; i < num_args; ++i) {
 	if(args[i].long_name) {
-	    int len = strlen(args[i].long_name);
+	    int len = (int)strlen(args[i].long_name);
 	    char *p = argv;
 	    int p_len = argv_len;
 	    negate = 0;
@@ -467,7 +467,7 @@ arg_match_long(struct getargs *args, siz
     }
     case arg_collect:{
 	struct getarg_collect_info *c = current->value;
-	int o = argv - rargv[*optind];
+	int o = (int)(argv - rargv[*optind]);
 	return (*c->func)(FALSE, argc, rargv, optind, &o, c->data);
     }
 

=== modified file 'storage/ndb/test/tools/cpcc.cpp'
--- a/storage/ndb/test/tools/cpcc.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/tools/cpcc.cpp	2011-10-21 12:36:44 +0000
@@ -140,7 +140,7 @@ public:
 
   virtual bool evaluate(SimpleCpcClient* c, const SimpleCpcClient::Process & p){
     bool run = on_empty;
-    for(size_t i = 0; i<m_cond.size(); i++){
+    for(unsigned i = 0; i<m_cond.size(); i++){
       if(m_cond[i]->evaluate(c, p)){
 	run = true;
 	break;
@@ -172,7 +172,7 @@ void
 add_hosts(Vector<SimpleCpcClient*> & hosts, BaseString list){
   Vector<BaseString> split;
   list.split(split);
-  for(size_t i = 0; i<split.size(); i++){
+  for(unsigned i = 0; i<split.size(); i++){
     add_host(hosts, split[i]);
   }
 }
@@ -273,7 +273,7 @@ main(int argc, const char** argv){
 
 int
 connect(Vector<SimpleCpcClient*>& list){
-  for(size_t i = 0; i<list.size(); i++){
+  for(unsigned i = 0; i<list.size(); i++){
     if(list[i]->connect() != 0){
       ndbout_c("Failed to connect to %s:%d", 
 	       list[i]->getHost(), list[i]->getPort());
@@ -285,7 +285,7 @@ connect(Vector<SimpleCpcClient*>& list){
 
 int
 for_each(Vector<SimpleCpcClient*>& list, Expression & expr){
-  for(size_t i = 0; i<list.size(); i++){
+  for(unsigned i = 0; i<list.size(); i++){
     if(list[i] == 0)
       continue;
     Properties p;
@@ -294,7 +294,7 @@ for_each(Vector<SimpleCpcClient*>& list,
       ndbout << "Failed to list processes on " 
 	     << list[i]->getHost() << ":" << list[i]->getPort() << endl;
     }
-    for(size_t j = 0; j<procs.size(); j++)
+    for(unsigned j = 0; j<procs.size(); j++)
       expr.evaluate(list[i], procs[j]);
   }
   return 0;

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-10-05 13:57:58 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-10-24 13:49:09 +0000
@@ -100,3 +100,11 @@ IF (MYSQL_VERSION_ID LESS "50501")
   # Don't build or install this program anymore in 5.5+
   ADD_EXECUTABLE(ndb_test_platform ndb_test_platform.cpp)
 ENDIF()
+
+# Install the ndb_dist_priv.sql script
+INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/ndb_dist_priv.sql
+        DESTINATION ${INSTALL_MYSQLSHAREDIR})
+
+INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/ndb_error_reporter
+              ${CMAKE_CURRENT_SOURCE_DIR}/ndb_size.pl
+        DESTINATION ${INSTALL_BINDIR})

=== modified file 'storage/ndb/tools/ndb_dump_frm_data.cpp'
--- a/storage/ndb/tools/ndb_dump_frm_data.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/tools/ndb_dump_frm_data.cpp	2011-10-20 19:41:56 +0000
@@ -104,7 +104,7 @@ dofile(const char* file)
       break;
     }
     ssize_t size2;
-    if ((size2 = read(fd, data, size)) == -1)
+    if ((size2 = read(fd, data, (unsigned)size)) == -1)
     {
       fprintf(stderr, "%s: read: %s\n", file, strerror(errno));
       break;
@@ -137,7 +137,7 @@ dofile(const char* file)
     printf("  orig: %u\n", (uint)size);
     printf("  pack: %u\n", (uint)pack_len);
     printf("*/\n\n");
-    dodump(name, pack_data, pack_len);
+    dodump(name, pack_data, (uint)pack_len);
     ret = 0;
   }
   while (0);

=== modified file 'storage/ndb/tools/ndb_index_stat.cpp'
--- a/storage/ndb/tools/ndb_index_stat.cpp	2011-08-17 10:36:01 +0000
+++ b/storage/ndb/tools/ndb_index_stat.cpp	2011-10-19 12:20:58 +0000
@@ -211,7 +211,6 @@ doquery()
       {
         NdbIndexStat::Bound& b = (i == 0 ? b_lo : b_hi);
 
-        bool strict = false;
         if (ndb_rand() % 3 != 0)
         {
           if (ndb_rand() % 3 != 0)

=== modified file 'storage/ndb/tools/restore/consumer_restore.cpp'
--- a/storage/ndb/tools/restore/consumer_restore.cpp	2011-09-07 10:08:09 +0000
+++ b/storage/ndb/tools/restore/consumer_restore.cpp	2011-10-27 12:19:57 +0000
@@ -679,7 +679,7 @@ BackupRestore::rebuild_indexes(const Tab
   NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
 
   Vector<NdbDictionary::Index*> & indexes = m_index_per_table[id];
-  for(size_t i = 0; i<indexes.size(); i++)
+  for(unsigned i = 0; i<indexes.size(); i++)
   {
     const NdbDictionary::Index * const idx = indexes[i];
     const char * const idx_name = idx->getName();
@@ -818,7 +818,7 @@ bool BackupRestore::search_replace(char 
                                    const char **data, const char *end_data,
                                    uint *new_data_len)
 {
-  uint search_str_len = strlen(search_str);
+  uint search_str_len = (uint)strlen(search_str);
   uint inx = 0;
   bool in_delimiters = FALSE;
   bool escape_char = FALSE;
@@ -969,7 +969,7 @@ bool BackupRestore::translate_frm(NdbDic
   {
     DBUG_RETURN(TRUE);
   }
-  if (map_in_frm(new_data, (const char*)data, data_len, &new_data_len))
+  if (map_in_frm(new_data, (const char*)data, (uint)data_len, &new_data_len))
   {
     free(new_data);
     DBUG_RETURN(TRUE);
@@ -1997,7 +1997,7 @@ BackupRestore::endOfTables(){
     return true;
 
   NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
-  for(size_t i = 0; i<m_indexes.size(); i++){
+  for(unsigned i = 0; i<m_indexes.size(); i++){
     NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
 
     BaseString db_name, schema_name, table_name;

=== modified file 'storage/ndb/tools/waiter.cpp'
--- a/storage/ndb/tools/waiter.cpp	2011-02-04 09:45:35 +0000
+++ b/storage/ndb/tools/waiter.cpp	2011-10-21 12:36:44 +0000
@@ -322,7 +322,7 @@ waitClusterStatus(const char* _addr,
 	 * First check if any node is not starting
 	 * then it's no idea to wait anymore
 	 */
-	for (size_t n = 0; n < ndbNodes.size(); n++){
+	for (unsigned n = 0; n < ndbNodes.size(); n++){
 	  if (ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTED &&
 	      ndbNodes[n].node_status != NDB_MGM_NODE_STATUS_STARTING)
 	    waitMore = false;
@@ -359,7 +359,7 @@ waitClusterStatus(const char* _addr,
     allInState = (ndbNodes.size() > 0);
 
     /* Loop through all nodes and check their state */
-    for (size_t n = 0; n < ndbNodes.size(); n++) {
+    for (unsigned n = 0; n < ndbNodes.size(); n++) {
       ndb_mgm_node_state* ndbNode = &ndbNodes[n];
 
       assert(ndbNode != NULL);

=== modified file 'support-files/compiler_warnings.supp'
--- a/support-files/compiler_warnings.supp	2011-10-14 08:26:28 +0000
+++ b/support-files/compiler_warnings.supp	2011-10-20 19:41:56 +0000
@@ -59,23 +59,23 @@ db_vrfy.c : .*comparison is always false
 # Ignore all conversion warnings on windows 64
 # (Is safe as we are not yet supporting strings >= 2G)
 #
-.* : conversion from '__int64' to .*int'.*
-.* : conversion from '__int64' to 'uint8'.*
-.* : conversion from '__int64' to 'uint32'.*
-.* : conversion from '__int64' to 'u.*long'.*
-.* : conversion from '__int64' to 'long'.*
-.* : conversion from '__int64' to 'off_t'.*
-.* : conversion from '.*size_t' to .*int'.*
-.* : conversion from '.*size_t' to 'TaoCrypt::word32'.*
-.* : conversion from '.*size_t' to 'u.*long'.*
-.* : conversion from '.*size_t' to 'uint32'.*
-.* : conversion from '.*size_t' to 'off_t'.*
-.* : conversion from '.*size_t' to 'size_s'.*
-.* : conversion from '.*size_t' to 'DWORD'.*
-.* : conversion from '.*size_t' to 'uLongf'.*
-.* : conversion from '.*size_t' to 'UINT'.*
-.* : conversion from '.*size_t' to 'uInt'.*
-.* : conversion from '.*size_t' to 'uint16'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint8'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'TaoCrypt::word32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'size_s'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'DWORD'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uLongf'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'UINT'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uInt'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint16'.*
 
 #
 # The following should be fixed by the ndb team

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk-cluster branch (magnus.blaudd:3401 to 3402) magnus.blaudd28 Oct