List:Commits« Previous MessageNext Message »
From:jonas oreland Date:October 20 2011 7:53pm
Subject:bzr push into mysql-5.5-cluster branch (jonas.oreland:3604 to 3606)
View as plain text  
 3606 jonas oreland	2011-10-20 [merge]
      ndb - merge 70-spj-scan-scan to 72

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.test
      sql/abstract_query_plan.cc
      sql/abstract_query_plan.h
 3605 jonas oreland	2011-10-20 [merge]
      ndb - merge 71 to 72

    added:
      mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result
      mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test
    modified:
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbcluster_connection.cc
      sql/ha_ndbinfo.cc
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java
      storage/ndb/src/common/portlib/ndb_daemon.cc
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/common/util/ndbzio.c
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/error/ndbd_exit_codes.c
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/mgmapi/ndb_logevent.cpp
      storage/ndb/src/mgmsrv/Defragger.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testDict.cpp
      storage/ndb/test/run-test/atrt.hpp
      storage/ndb/test/run-test/files.cpp
      storage/ndb/test/run-test/main.cpp
      storage/ndb/tools/ndb_dump_frm_data.cpp
      support-files/compiler_warnings.supp
 3604 Jonas Oreland	2011-10-20
      ndb - use "--system" in rqg_spj instead of "--exec" as the former doesnt buffer output

    modified:
      mysql-test/suite/ndb_big/rqg_spj.test
=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-03 08:02:28 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown.result	2011-10-20 19:52:11 +0000
@@ -2113,6 +2113,9 @@ and t2.a = t1.b;
 a	b	a	b
 1	2	2	3
 3	1	1	2
+@ndb_execute_count:=VARIABLE_VALUE-@ndb_init_execute_count
+3
+This should yield 3 executes (for now...buh)
 drop table t1;
 create table t1 (a int, b int, primary key(a)) engine = ndb;
 insert into t1 values (1, 2);
@@ -2136,6 +2139,9 @@ and t2.a = t1.b;
 a	b	a	b
 1	2	2	3
 3	1	1	2
+@ndb_execute_count:=VARIABLE_VALUE-@ndb_init_execute_count
+1
+This should yield 1 execute (but inefficient since it's based on scan)
 explain extended
 select *
 from t1, t1 as t2
@@ -5468,6 +5474,38 @@ select count(*) from t1 as x1 join t1 as
 count(*)
 3
 drop table t1;
+create table t1 
+(a int not null,
+b int not null, 
+c int not null,
+d int not null,
+primary key(a,b,c,d)) engine=ndb partition by key (b,c);
+insert into t1 values (0x4f, 0x4f, 0x4f, 0x4f);
+explain select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=x2.c;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	2	Using where with pushed condition
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	8	func,test.x1.b	1	Parent of 2 pushed join@1; Using where
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	12	test.x2.d,test.x1.d,test.x2.c	1	Child of 'x2' in pushed join@1
+select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=x2.c;
+a	b	c	d	a	b	c	d	a	b	c	d
+79	79	79	79	79	79	79	79	79	79	79	79
+explain select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=0x4f;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x1	ALL	NULL	NULL	NULL	NULL	2	Using where with pushed condition
+1	SIMPLE	x2	ref	PRIMARY	PRIMARY	8	func,test.x1.b	1	Parent of 2 pushed join@1; Using where
+1	SIMPLE	x3	ref	PRIMARY	PRIMARY	12	test.x2.d,test.x1.d,const	1	Child of 'x2' in pushed join@1
+select * from t1 as x1 
+join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=0x4f;
+a	b	c	d	a	b	c	d	a	b	c	d
+79	79	79	79	79	79	79	79	79	79	79	79
+drop table t1;
 create temporary table spj_counts_at_end
 select counter_name, sum(val) as val 
 from ndbinfo.counters 
@@ -5484,10 +5522,10 @@ and spj_counts_at_end.counter_name <> 'R
        and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
        and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
-CONST_PRUNED_RANGE_SCANS_RECEIVED	6
+CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	250
-PRUNED_RANGE_SCANS_RECEIVED	25
-RANGE_SCANS_RECEIVED	728
+PRUNED_RANGE_SCANS_RECEIVED	27
+RANGE_SCANS_RECEIVED	732
 READS_RECEIVED	58
 TABLE_SCANS_RECEIVED	250
 drop table spj_counts_at_startup;
@@ -5499,9 +5537,9 @@ pruned_scan_count
 sorted_scan_count
 10
 pushed_queries_defined
-401
+405
 pushed_queries_dropped
 11
 pushed_queries_executed
-548
+550
 set ndb_join_pushdown = @save_ndb_join_pushdown;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-09-29 13:11:52 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-10-20 12:52:58 +0000
@@ -1075,11 +1075,14 @@ select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_init_execute_count.inc
 --sorted_result
 select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_execute_count.inc
+--echo This should yield 3 executes (for now...buh)
 
 connection ddl;
 drop table t1;
@@ -1100,11 +1103,14 @@ select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_init_execute_count.inc
 --sorted_result
 select *
 from t1, t1 as t2
 where t1.a in (1,3,5)
   and t2.a = t1.b;
+--source suite/ndb/include/ndb_execute_count.inc
+--echo This should yield 1 execute (but inefficient since it's based on scan)
 
 
 ## Adding and 'order by ... desc' trigger the usage
@@ -3836,6 +3842,40 @@ select count(*) from t1 as x1 join t1 as
 
 connection ddl;
 drop table t1;
+
+####################
+# Test pruned child scans using parameter values (known regression).
+####################
+create table t1 
+       (a int not null,
+       b int not null, 
+       c int not null,
+       d int not null,
+       primary key(a,b,c,d)) engine=ndb partition by key (b,c);
+
+connection spj;
+insert into t1 values (0x4f, 0x4f, 0x4f, 0x4f);
+
+# Prune key depends on parent row.
+explain select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=x2.c;
+
+select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=x2.c;
+
+# Prune key is fixed.
+explain select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.d and x3.b=x1.d and x3.c=0x4f;
+
+select * from t1 as x1 
+	join t1 as x2 on x1.c=0x4f and x2.a=0+x1.b and x2.b=x1.b 
+	join t1 as x3 on x3.a=x2.c and x3.b=x1.d and x3.c=0x4f;
+
+connection ddl;
+drop table t1;
 
 ########################################
 # Verify DBSPJ counters for entire test:

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_not_null.result	2011-10-20 12:31:31 +0000
@@ -0,0 +1,196 @@
+include/master-slave.inc
+[connection master]
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
+`c` INT DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT, `b` DATE DEFAULT '0000-00-00',
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t4(`a` INT, `b` DATE DEFAULT '0000-00-00',
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, 1);
+INSERT INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, 4);
+INSERT INTO t1(a) VALUES (5);
+INSERT INTO t2(a,b) VALUES (1, null);
+INSERT INTO t2(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t2(a) VALUES (3);
+INSERT INTO t3(a,b) VALUES (1, null);
+INSERT INTO t3(a,b) VALUES (2,'1111-11-11');
+INSERT INTO t3(a) VALUES (3);
+INSERT INTO t4(a,b,c) VALUES (1, null, 1);
+INSERT INTO t4(a,b,c) VALUES (2,'1111-11-11', 2);
+INSERT INTO t4(a,b) VALUES (3, null);
+INSERT INTO t4(a,c) VALUES (4, 4);
+INSERT INTO t4(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+include/diff_tables.inc [master:t2, slave:t2]
+TABLES t2 and t3 must be different.
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	NULL	500
+2	1111-11-11	500
+3	NULL	500
+SELECT * FROM t4 ORDER BY a;
+a	b	c
+1	NULL	1
+2	1111-11-11	2
+3	NULL	NULL
+4	NULL	4
+5	NULL	NULL
+SELECT * FROM t4 ORDER BY a;
+a	b
+1	NULL
+2	1111-11-11
+3	NULL
+4	NULL
+5	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,'1111-11-11', 1);
+REPLACE INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
+UPDATE t1 set b= NULL, c= 300 where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, 300);
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+************* CLEANING *************
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE t4;
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT NULL, `c` BIT DEFAULT NULL, 
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT b'01', `c` BIT DEFAULT NULL,
+PRIMARY KEY (`a`)) ENGINE= 'NDB';
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a,b,c) VALUES (1, null, b'01');
+INSERT INTO t1(a,b,c) VALUES (2,b'00', b'01');
+INSERT INTO t1(a,b) VALUES (3, null);
+INSERT INTO t1(a,c) VALUES (4, b'01');
+INSERT INTO t1(a) VALUES (5);
+************* SHOWING THE RESULT SETS WITH INSERTS *************
+TABLES t1 and t2 must be different.
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+SELECT a,b+0,c+0 FROM t1 ORDER BY a;
+a	b+0	c+0
+1	NULL	1
+2	0	1
+3	NULL	NULL
+4	NULL	1
+5	NULL	NULL
+************* EXECUTION WITH UPDATES and REPLACES *************
+DELETE FROM t1;
+INSERT INTO t1(a,b,c) VALUES (1,b'00', b'01');
+REPLACE INTO t1(a,b,c) VALUES (2,b'00',b'01');
+UPDATE t1 set b= NULL, c= b'00' where a= 1;
+REPLACE INTO t1(a,b,c) VALUES (2, NULL, b'00');
+************* SHOWING THE RESULT SETS WITH UPDATES and REPLACES *************
+TABLES t1 and t2 must be equal otherwise an error will be thrown. 
+include/diff_tables.inc [master:t1, slave:t1]
+DROP TABLE t1;
+################################################################################
+#                       NULL ---> NOT NULL (STRICT MODE)
+#                    UNCOMMENT THIS AFTER FIXING BUG#43992
+################################################################################
+################################################################################
+#                       NULL ---> NOT NULL (NON-STRICT MODE)
+################################################################################
+SET SQL_LOG_BIN= 0;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+SET SQL_LOG_BIN= 1;
+CREATE TABLE t1(`a` INT NOT NULL, `b` INT NOT NULL, 
+`c` INT NOT NULL,
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t2(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+CREATE TABLE t3(`a` INT NOT NULL, `b` INT NOT NULL,
+`c` INT DEFAULT 500, 
+PRIMARY KEY(`a`)) ENGINE='NDB' DEFAULT CHARSET=LATIN1;
+************* EXECUTION WITH INSERTS *************
+INSERT INTO t1(a) VALUES (1);
+INSERT INTO t1(a, b) VALUES (2, NULL);
+INSERT INTO t1(a, b) VALUES (3, 1);
+INSERT INTO t2(a) VALUES (1);
+INSERT INTO t2(a, b) VALUES (2, NULL);
+INSERT INTO t2(a, b) VALUES (3, 1);
+INSERT INTO t3(a) VALUES (1);
+INSERT INTO t3(a, b) VALUES (2, NULL);
+INSERT INTO t3(a, b) VALUES (3, 1);
+INSERT INTO t3(a, b) VALUES (4, 1);
+REPLACE INTO t3(a, b) VALUES (5, null);
+REPLACE INTO t3(a, b) VALUES (3, null);
+UPDATE t3 SET b = NULL where a = 4;
+************* SHOWING THE RESULT SETS *************
+SELECT * FROM t1 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t1 ORDER BY a;
+a	b	c
+SELECT * FROM t2 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	1
+SELECT * FROM t2 ORDER BY a;
+a	b	c
+1	0	NULL
+2	0	NULL
+3	1	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b
+1	NULL
+2	NULL
+3	NULL
+4	NULL
+5	NULL
+SELECT * FROM t3 ORDER BY a;
+a	b	c
+1	0	500
+2	0	500
+3	0	500
+4	0	500
+5	0	500
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_not_null.test	2011-10-20 12:31:31 +0000
@@ -0,0 +1,8 @@
+-- source include/have_binlog_format_row.inc
+-- source include/have_ndb.inc
+-- source include/master-slave.inc
+
+let $engine = 'NDB';
+-- source extra/rpl_tests/rpl_not_null.test
+
+--source include/rpl_end.inc

=== modified file 'sql/abstract_query_plan.cc'
--- a/sql/abstract_query_plan.cc	2011-09-30 11:05:03 +0000
+++ b/sql/abstract_query_plan.cc	2011-10-20 19:52:11 +0000
@@ -1,6 +1,5 @@
 /*
-   Copyright 2010 Sun Microsystems, Inc.
-    All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by

=== modified file 'sql/abstract_query_plan.h'
--- a/sql/abstract_query_plan.h	2011-09-28 10:55:58 +0000
+++ b/sql/abstract_query_plan.h	2011-10-20 19:52:11 +0000
@@ -1,6 +1,5 @@
 /*
-   Copyright 2010 Sun Microsystems, Inc.
-    All rights reserved. Use is subject to license terms.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by

=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-10-17 18:13:57 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-10-20 19:41:56 +0000
@@ -227,7 +227,7 @@ ndb_index_stat_opt2str(const Ndb_index_s
     const Ndb_index_stat_opt::Val& v= opt.val[i];
     ptr+= strlen(ptr);
     const char* sep= (ptr == buf ? "" : ",");
-    const uint sz= ptr < end ? end - ptr : 0;
+    const uint sz= ptr < end ? (uint)(end - ptr) : 0;
 
     switch (v.unit) {
     case Ndb_index_stat_opt::Ubool:

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-10-19 11:49:07 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-20 19:52:11 +0000
@@ -1454,7 +1454,7 @@ bool ha_ndbcluster::get_error_message(in
 
   const NdbError err= ndb->getNdbError(error);
   bool temporary= err.status==NdbError::TemporaryError;
-  buf->set(err.message, strlen(err.message), &my_charset_bin);
+  buf->set(err.message, (uint32)strlen(err.message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
   DBUG_RETURN(temporary);
 }
@@ -2006,7 +2006,7 @@ void ha_ndbcluster::release_blobs_buffer
 */
 
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length)
+            size_t pack_length)
 {
   DBUG_ENTER("cmp_frm");
   /*
@@ -3957,7 +3957,7 @@ count_key_columns(const KEY *key_info, c
       break;
     length+= key_part->store_length;
   }
-  return key_part - first_key_part;
+  return (uint)(key_part - first_key_part);
 }
 
 /* Helper method to compute NDB index bounds. Note: does not set range_no. */
@@ -6536,7 +6536,7 @@ int ha_ndbcluster::ndb_update_row(const
   uint blob_count= 0;
   if (uses_blob_value(table->write_set))
   {
-    int row_offset= new_data - table->record[0];
+    int row_offset= (int)(new_data - table->record[0]);
     int res= set_blob_values(op, row_offset, table->write_set, &blob_count,
                              (batch_allowed && !need_flush));
     if (res != 0)
@@ -8307,7 +8307,7 @@ static int ndbcluster_update_apply_statu
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,
-                   group_master_log_name, strlen(group_master_log_name));
+                   group_master_log_name, (int)strlen(group_master_log_name));
   r|= op->setValue(2u, tmp_buf);
   DBUG_ASSERT(r == 0);
   // start_pos
@@ -10024,7 +10024,7 @@ int ha_ndbcluster::create(const char *na
     if ((my_errno= write_ndb_file(name)))
       DBUG_RETURN(my_errno);
 
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, form);
     DBUG_RETURN(my_errno);
   }
@@ -10926,7 +10926,7 @@ int ha_ndbcluster::rename_table(const ch
       this is a "real" rename table, i.e. not tied to an offline alter table
       - send new name == "to" in query field
     */
-    ndbcluster_log_schema_op(thd, to, strlen(to),
+    ndbcluster_log_schema_op(thd, to, (int)strlen(to),
                              old_dbname, m_tabname,
                              ndb_table_id, ndb_table_version,
                              SOT_RENAME_TABLE_PREPARE,
@@ -11587,7 +11587,7 @@ int ha_ndbcluster::open(const char *name
                             name);
     }
     Ndb* ndb= check_ndb_in_thd(thd);
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, table);
     if ((m_share=get_share(name, table, FALSE)) == 0)
     {
@@ -12116,7 +12116,7 @@ int ndbcluster_drop_database_impl(THD *t
   List_iterator_fast<char> it(drop_list);
   while ((tabname=it++))
   {
-    tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
+    tablename_to_filename(tabname, tmp, (uint)(FN_REFLEN - (tmp - full_path)-1));
     if (ha_ndbcluster::drop_table_impl(thd, 0, ndb, full_path, dbname, tabname))
     {
       const NdbError err= dict->getNdbError();
@@ -12255,7 +12255,7 @@ int ndbcluster_find_all_files(THD *thd)
       }
       /* finalize construction of path */
       end+= tablename_to_filename(elmt.name, end,
-                                  sizeof(key)-(end-key));
+                                  (uint)(sizeof(key)-(end-key)));
       uchar *data= 0, *pack_data= 0;
       size_t length, pack_length;
       int discover= 0;
@@ -12303,7 +12303,7 @@ int ndbcluster_find_all_files(THD *thd)
       else
       {
         /* set up replication for this table */
-        ndbcluster_create_binlog_setup(thd, ndb, key, end-key,
+        ndbcluster_create_binlog_setup(thd, ndb, key, (uint)(end-key),
                                        elmt.database, elmt.name,
                                        0);
       }
@@ -12471,8 +12471,8 @@ ndbcluster_find_files(handlerton *hton,
     {
       file_name_str= (char*)my_hash_element(&ok_tables, i);
       end= end1 +
-        tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name));
-      ndbcluster_create_binlog_setup(thd, ndb, name, end-name,
+        tablename_to_filename(file_name_str, end1, (uint)(sizeof(name) - (end1 - name)));
+      ndbcluster_create_binlog_setup(thd, ndb, name, (uint)(end-name),
                                      db, file_name_str, 0);
     }
   }
@@ -12546,7 +12546,7 @@ ndbcluster_find_files(handlerton *hton,
     {
       LEX_STRING *tmp_file_name= 0;
       tmp_file_name= thd->make_lex_string(tmp_file_name, file_name_str,
-                                          strlen(file_name_str), TRUE);
+                                          (uint)strlen(file_name_str), TRUE);
       files->push_back(tmp_file_name); 
     }
   }
@@ -12987,7 +12987,7 @@ void ha_ndbcluster::set_dbname(const cha
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, name_len);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, dbname, sizeof(tmp_buff) - 1);
@@ -13019,7 +13019,7 @@ ha_ndbcluster::set_tabname(const char *p
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, end - ptr);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, tabname, sizeof(tmp_buff) - 1);
@@ -13790,7 +13790,7 @@ int handle_trailing_share(THD *thd, NDB_
       share->key_length= min_key_length;
     }
     share->key_length=
-      my_snprintf(share->key, min_key_length + 1, "#leak%lu",
+      (uint)my_snprintf(share->key, min_key_length + 1, "#leak%lu",
                   trailing_share_id++);
   }
   /* Keep it for possible the future trailing free */
@@ -15091,7 +15091,7 @@ ha_ndbcluster::read_multi_range_next(KEY
           need to process all index scan ranges together.
         */
         if (!multi_range_sorted ||
-            (expected_range_no= multi_range_curr - m_multi_ranges)
+            (expected_range_no= (int)(multi_range_curr - m_multi_ranges))
                 == current_range_no)
         {
           *multi_range_found_p= m_multi_ranges + current_range_no;
@@ -15140,7 +15140,7 @@ ha_ndbcluster::read_multi_range_next(KEY
   */
   DBUG_RETURN(read_multi_range_first(multi_range_found_p, 
                                      multi_range_curr,
-                                     multi_range_end - multi_range_curr, 
+                                     (uint)(multi_range_end - multi_range_curr), 
                                      multi_range_sorted,
                                      multi_range_buffer));
 }
@@ -15574,7 +15574,7 @@ ha_ndbcluster::update_table_comment(
         const char*     comment)/* in:  table comment defined by user */
 {
   THD *thd= current_thd;
-  uint length= strlen(comment);
+  uint length= (uint)strlen(comment);
   if (length > 64000 - 3)
   {
     return((char*)comment); /* string too long */
@@ -15595,7 +15595,7 @@ ha_ndbcluster::update_table_comment(
 
   char *str;
   const char *fmt="%s%snumber_of_replicas: %d";
-  const unsigned fmt_len_plus_extra= length + strlen(fmt);
+  const unsigned fmt_len_plus_extra= length + (uint)strlen(fmt);
   if ((str= (char*) my_malloc(fmt_len_plus_extra, MYF(0))) == NULL)
   {
     sql_print_error("ha_ndbcluster::update_table_comment: "
@@ -15991,7 +15991,7 @@ ndbcluster_show_status(handlerton *hton,
   else
     update_status_variables(NULL, &ns, g_ndb_cluster_connection);
 
-  buflen=
+  buflen= (uint)
     my_snprintf(buf, sizeof(buf),
                 "cluster_node_id=%ld, "
                 "connected_host=%s, "
@@ -16014,8 +16014,8 @@ ndbcluster_show_status(handlerton *hton,
     if (ns.transaction_hint_count[i] > 0 ||
         ns.transaction_no_hint_count[i] > 0)
     {
-      uint namelen= my_snprintf(name, sizeof(name), "node[%d]", i);
-      buflen= my_snprintf(buf, sizeof(buf),
+      uint namelen= (uint)my_snprintf(name, sizeof(name), "node[%d]", i);
+      buflen= (uint)my_snprintf(buf, sizeof(buf),
                           "transaction_hint=%ld, transaction_no_hint=%ld",
                           ns.transaction_hint_count[i],
                           ns.transaction_no_hint_count[i]);
@@ -16031,12 +16031,12 @@ ndbcluster_show_status(handlerton *hton,
     tmp.m_name= 0;
     while (ndb->get_free_list_usage(&tmp))
     {
-      buflen=
+      buflen= (uint)
         my_snprintf(buf, sizeof(buf),
                   "created=%u, free=%u, sizeof=%u",
                   tmp.m_created, tmp.m_free, tmp.m_sizeof);
       if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                     tmp.m_name, strlen(tmp.m_name), buf, buflen))
+                     tmp.m_name, (uint)strlen(tmp.m_name), buf, buflen))
         DBUG_RETURN(TRUE);
     }
   }
@@ -17557,19 +17557,19 @@ static int ndbcluster_fill_files_table(h
       }
 
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("DATAFILE",8,
                                               system_charset_info);
       table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
       table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(),
-                                                    strlen(df.getTablespace()),
+                                                    (uint)strlen(df.getTablespace()),
                                                     system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->
         store(ts.getDefaultLogfileGroup(),
-              strlen(ts.getDefaultLogfileGroup()),
+              (uint)strlen(ts.getDefaultLogfileGroup()),
               system_charset_info);
       table->field[IS_FILES_ENGINE]->set_notnull();
       table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name,
@@ -17595,7 +17595,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_ROW_FORMAT]->store("FIXED", 5, system_charset_info);
 
       char extra[30];
-      int len= my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
+      int len= (int)my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
       schema_table_store_record(thd, table);
@@ -17628,12 +17628,12 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
     table->field[IS_FILES_TABLESPACE_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->
       store(ts.getDefaultLogfileGroup(),
-           strlen(ts.getDefaultLogfileGroup()),
+           (uint)strlen(ts.getDefaultLogfileGroup()),
            system_charset_info);
 
     table->field[IS_FILES_ENGINE]->set_notnull();
@@ -17688,7 +17688,7 @@ static int ndbcluster_fill_files_table(h
 
       init_fill_schema_files_row(table);
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8,
@@ -17697,7 +17697,7 @@ static int ndbcluster_fill_files_table(h
       uf.getLogfileGroupId(&objid);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(),
-                                                  strlen(uf.getLogfileGroup()),
+                                                  (uint)strlen(uf.getLogfileGroup()),
                                                        system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId(), true);
@@ -17720,7 +17720,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_VERSION]->store(uf.getObjectVersion(), true);
 
       char extra[100];
-      int len= my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
+      int len= (int)my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
                            id, (ulong) lfg.getUndoBufferSize());
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
@@ -17755,7 +17755,7 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.getObjectId(), true);
@@ -17773,7 +17773,7 @@ static int ndbcluster_fill_files_table(h
     table->field[IS_FILES_VERSION]->store(lfg.getObjectVersion(), true);
 
     char extra[100];
-    int len= my_snprintf(extra,sizeof(extra),
+    int len= (int)my_snprintf(extra,sizeof(extra),
                          "UNDO_BUFFER_SIZE=%lu",
                          (ulong) lfg.getUndoBufferSize());
     table->field[IS_FILES_EXTRA]->set_notnull();

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-10-17 18:13:57 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-20 19:41:56 +0000
@@ -1256,7 +1256,7 @@ static int ndbcluster_find_all_databases
             /* create missing database */
             sql_print_information("NDB: Discovered missing database '%s'", db);
             const int no_print_error[1]= {0};
-            name_len= my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
+            name_len= (unsigned)my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
             run_query(thd, name, name + name_len,
                       no_print_error);
             run_query(thd, query, query + query_length,
@@ -1588,12 +1588,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
     
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -1631,12 +1631,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
 
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2007,12 +2007,12 @@ int ndbcluster_log_schema_op(THD *thd,
       DBUG_ASSERT(r == 0);
       
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, (int)strlen(log_db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab,
-                       strlen(log_tab));
+                       (int)strlen(log_tab));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2503,7 +2503,7 @@ ndb_binlog_thread_handle_schema_event(TH
           // fall through
         case SOT_RENAME_TABLE_NEW:
         {
-          uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
+          uint end= (uint)my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
                                 "NDB Binlog: Skipping renaming locally "
                                 "defined table '%s.%s' from binlog schema "
                                 "event '%s' from node %d. ",
@@ -2515,7 +2515,7 @@ ndb_binlog_thread_handle_schema_event(TH
         case SOT_DROP_TABLE:
           if (schema_type == SOT_DROP_TABLE)
           {
-            uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
+            uint end= (uint)my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
                                   "NDB Binlog: Skipping dropping locally "
                                   "defined table '%s.%s' from binlog schema "
                                   "event '%s' from node %d. ",
@@ -3292,7 +3292,7 @@ ndb_binlog_index_table__write_rows(THD *
 
     ndb_binlog_index->field[0]->store(first->master_log_pos, true);
     ndb_binlog_index->field[1]->store(first->master_log_file,
-                                      strlen(first->master_log_file),
+                                      (uint)strlen(first->master_log_file),
                                       &my_charset_bin);
     ndb_binlog_index->field[2]->store(epoch= first->epoch, true);
     if (ndb_binlog_index->s->fields > 7)
@@ -4043,7 +4043,7 @@ parse_conflict_fn_spec(const char* confl
   {
     const st_conflict_fn_def &fn= conflict_fns[i];
 
-    uint len= strlen(fn.name);
+    uint len= (uint)strlen(fn.name);
     if (strncmp(ptr, fn.name, len))
       continue;
 
@@ -4115,7 +4115,7 @@ parse_conflict_fn_spec(const char* confl
         }
       }
 
-      uint len= end_arg - start_arg;
+      uint len= (uint)(end_arg - start_arg);
       args[no_args].type=    type;
       args[no_args].ptr=     start_arg;
       args[no_args].len=     len;
@@ -4444,9 +4444,9 @@ ndbcluster_read_replication_table(THD *t
       DBUG_PRINT("info", ("reading[%u]: %s,%s,%u", i, db, table_name, id));
       if ((_op= trans->getNdbOperation(reptab)) == NULL) abort();
       if (_op->readTuple(NdbOperation::LM_CommittedRead)) abort();
-      ndb_pack_varchar(col_db, tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col_db, tmp_buf, db, (int)strlen(db));
       if (_op->equal(col_db->getColumnNo(), tmp_buf)) abort();
-      ndb_pack_varchar(col_table_name, tmp_buf, table_name, strlen(table_name));
+      ndb_pack_varchar(col_table_name, tmp_buf, table_name, (int)strlen(table_name));
       if (_op->equal(col_table_name->getColumnNo(), tmp_buf)) abort();
       if (_op->equal(col_server_id->getColumnNo(), id)) abort();
       if ((col_binlog_type_rec_attr[i]=
@@ -5202,7 +5202,7 @@ ndbcluster_create_event_ops(THD *thd, ND
   Ndb_event_data *event_data= share->event_data;
   int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0;
 #ifdef HAVE_NDB_BINLOG
-  uint len= strlen(share->table_name);
+  uint len= (int)strlen(share->table_name);
 #endif
   if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 &&
       strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
@@ -6576,7 +6576,7 @@ restart_cluster_failure:
     {
       LOG_INFO log_info;
       mysql_bin_log.get_current_log(&log_info);
-      int len=  strlen(log_info.log_file_name);
+      int len=  (uint)strlen(log_info.log_file_name);
       uint no= 0;
       if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
           no == 1)
@@ -7435,7 +7435,7 @@ ndbcluster_show_status_binlog(THD* thd,
     ndb_latest_epoch= injector_ndb->getLatestGCI();
     pthread_mutex_unlock(&injector_mutex);
 
-    buflen=
+    buflen= (uint)
       my_snprintf(buf, sizeof(buf),
                   "latest_epoch=%s, "
                   "latest_trans_epoch=%s, "
@@ -7448,7 +7448,7 @@ ndbcluster_show_status_binlog(THD* thd,
                   llstr(ndb_latest_handled_binlog_epoch, buff4),
                   llstr(ndb_latest_applied_binlog_epoch, buff5));
     if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                   "binlog", strlen("binlog"),
+                   "binlog", (uint)strlen("binlog"),
                    buf, buflen))
       DBUG_RETURN(TRUE);
   }

=== modified file 'sql/ha_ndbcluster_binlog.h'
--- a/sql/ha_ndbcluster_binlog.h	2011-09-22 13:41:13 +0000
+++ b/sql/ha_ndbcluster_binlog.h	2011-10-20 19:41:56 +0000
@@ -228,7 +228,7 @@ ndbcluster_show_status_binlog(THD* thd,
   the ndb binlog code
 */
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length);
+            size_t pack_length);
 int ndbcluster_find_all_files(THD *thd);
 
 char *ndb_pack_varchar(const NDBCOL *col, char *buf,

=== modified file 'sql/ha_ndbcluster_connection.cc'
--- a/sql/ha_ndbcluster_connection.cc	2011-10-17 18:13:57 +0000
+++ b/sql/ha_ndbcluster_connection.cc	2011-10-20 19:41:56 +0000
@@ -345,10 +345,8 @@ ndb_transid_mysql_connection_map_fill_ta
 {
   DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table");
 
-  if (check_global_access(thd, PROCESS_ACL))
-  {
-    DBUG_RETURN(0);
-  }
+  const bool all = check_global_access(thd, PROCESS_ACL);
+  const ulonglong self = thd_get_thread_id(thd);
 
   TABLE* table= tables->table;
   for (uint i = 0; i<g_pool_alloc; i++)
@@ -359,16 +357,20 @@ ndb_transid_mysql_connection_map_fill_ta
       const Ndb * p = g_pool[i]->get_next_ndb_object(0);
       while (p)
       {
-        table->field[0]->set_notnull();
-        table->field[0]->store(p->getCustomData64(), true);
-        table->field[1]->set_notnull();
-        table->field[1]->store(g_pool[i]->node_id());
-        table->field[2]->set_notnull();
-        table->field[2]->store(p->getNextTransactionId(), true);
-        schema_table_store_record(thd, table);
-        p = g_pool[i]->get_next_ndb_object(p);
+        Uint64 connection_id = p->getCustomData64();
+        if ((connection_id == self) || all)
+        {
+          table->field[0]->set_notnull();
+          table->field[0]->store(p->getCustomData64(), true);
+          table->field[1]->set_notnull();
+          table->field[1]->store(g_pool[i]->node_id());
+          table->field[2]->set_notnull();
+          table->field[2]->store(p->getNextTransactionId(), true);
+          schema_table_store_record(thd, table);
+          p = g_pool[i]->get_next_ndb_object(p);
+        }
+        g_pool[i]->unlock_ndb_objects();
       }
-      g_pool[i]->unlock_ndb_objects();
     }
   }
 

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2011-10-17 18:13:57 +0000
+++ b/sql/ha_ndbinfo.cc	2011-10-20 19:41:56 +0000
@@ -246,7 +246,7 @@ bool ha_ndbinfo::get_error_message(int e
   if (!message)
     DBUG_RETURN(false);
 
-  buf->set(message, strlen(message), &my_charset_bin);
+  buf->set(message, (uint32)strlen(message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s", buf->ptr()));
   DBUG_RETURN(false);
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/AndPredicateImpl.java	2011-10-20 19:41:56 +0000
@@ -61,11 +61,13 @@ public class AndPredicateImpl extends Pr
         }
     }
 
+    @Override
     public Predicate or(Predicate predicate) {
         throw new UnsupportedOperationException(
                 local.message("ERR_NotImplemented"));
     }
 
+    @Override
     public Predicate not() {
         throw new UnsupportedOperationException(
                 local.message("ERR_NotImplemented"));
@@ -109,6 +111,7 @@ public class AndPredicateImpl extends Pr
     /** Set the keys into the operation for each predicate.
      * Each predicate must be an equal predicate for a primary or unique key.
      */
+    @Override
     public void operationEqual(QueryExecutionContext context,
             Operation op) {
         for (PredicateImpl predicate: predicates) {
@@ -120,17 +123,6 @@ public class AndPredicateImpl extends Pr
         }
     }
 
-    /** Get the best index for the operation. Delegate to the method
-     * in the superclass, passing the array of predicates.
-     *
-     * @return the best index
-     */
-    @Override
-    public CandidateIndexImpl getBestCandidateIndex(QueryExecutionContext context) {
-        return getBestCandidateIndexFor(context, predicates.toArray(
-                new PredicateImpl[predicates.size()]));
-    }
-
     /** Get the number of conditions in the top level predicate.
      * This is used to determine whether a hash index can be used. If there
      * are exactly the number of conditions as index columns, then the
@@ -144,4 +136,14 @@ public class AndPredicateImpl extends Pr
     protected int getNumberOfConditionsInPredicate() {
         return predicates.size();
     }
+
+    /** Return an array of top level predicates that might be used with indices.
+     * 
+     * @return an array of top level predicates (defaults to {this}).
+     */
+    @Override
+    protected PredicateImpl[] getTopLevelPredicates() {
+        return predicates.toArray(new PredicateImpl[predicates.size()]);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/BetweenPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -46,11 +46,13 @@ public class BetweenPredicateImpl extend
         upper.setProperty(property);
     }
 
+    @Override
     public void markParameters() {
         lower.mark();
         upper.mark();
     }
 
+    @Override
     public void unmarkParameters() {
         lower.unmark();
         upper.unmark();
@@ -66,19 +68,36 @@ public class BetweenPredicateImpl extend
         property.markUpperBound(candidateIndices, this, false);
     }
 
+    @Override
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markLowerBound(candidateIndices, this, false);
+        property.markUpperBound(candidateIndices, this, false);
+    }
+
     /** Set the upper and lower bounds for the operation.
      * Delegate to the property to actually call the setBounds for each
      * of upper and lower bound.
      * @param context the query context that contains the parameter values
      * @param op the index scan operation on which to set bounds
+     * @return an indicator of which bound(s) were actually set
      */
     @Override
-    public void operationSetBounds(QueryExecutionContext context,
+    public int operationSetBounds(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(lower.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundLE, op);
-        property.operationSetBounds(upper.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundGE, op);
+        int result = NO_BOUND_SET;
+        Object lowerValue = lower.getParameterValue(context);
+        Object upperValue = upper.getParameterValue(context);
+        if (lowerValue != null) {
+            property.operationSetBounds(lowerValue,
+                    IndexScanOperation.BoundType.BoundLE, op);
+            result |= LOWER_BOUND_SET;
+        }
+        if (upperValue != null) {
+            property.operationSetBounds(upperValue,
+                    IndexScanOperation.BoundType.BoundGE, op);
+            result |= UPPER_BOUND_SET;
+        }
+        return result;
     }
 
     /** Set the upper bound for the operation.
@@ -88,10 +107,15 @@ public class BetweenPredicateImpl extend
      * @param op the index scan operation on which to set bounds
      */
     @Override
-    public void operationSetUpperBound(QueryExecutionContext context,
+    public int operationSetUpperBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(upper.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundGE, op);
+        Object upperValue = upper.getParameterValue(context);
+        if (upperValue != null) {
+            property.operationSetBounds(upperValue,
+                    IndexScanOperation.BoundType.BoundGE, op);
+            return UPPER_BOUND_SET;
+        }
+        return NO_BOUND_SET;
     }
 
     /** Set the lower bound for the operation.
@@ -101,10 +125,15 @@ public class BetweenPredicateImpl extend
      * @param op the index scan operation on which to set bounds
      */
     @Override
-    public void operationSetLowerBound(QueryExecutionContext context,
+    public int operationSetLowerBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(lower.getParameterValue(context),
-                IndexScanOperation.BoundType.BoundLE, op);
+        Object lowerValue = lower.getParameterValue(context);
+        if (lowerValue != null) {
+            property.operationSetBounds(lowerValue,
+                    IndexScanOperation.BoundType.BoundLE, op);
+            return LOWER_BOUND_SET;
+        }
+        return NO_BOUND_SET;
     }
 
     /** Create a filter for the operation. Set the condition into the
@@ -140,4 +169,9 @@ public class BetweenPredicateImpl extend
                 ScanFilter.BinaryCondition.COND_LE, filter);
     }
 
+    @Override 
+    public boolean isUsable(QueryExecutionContext context) {
+        return !(lower.getParameterValue(context) == null || upper.getParameterValue(context) == null);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java	2011-10-18 22:54:36 +0000
@@ -46,7 +46,7 @@ import java.util.List;
  * one for each index containing the column referenced by the query term.
  * 
  */
-public class CandidateIndexImpl {
+public final class CandidateIndexImpl {
 
     /** My message translator */
     static final I18NHelper local = I18NHelper.getInstance(CandidateIndexImpl.class);
@@ -63,6 +63,7 @@ public class CandidateIndexImpl {
     private CandidateColumnImpl[] candidateColumns = null;
     private ScanType scanType = PredicateImpl.ScanType.TABLE_SCAN;
     private int fieldScore = 1;
+    protected int score = 0;
 
     public CandidateIndexImpl(
             String className, Index storeIndex, boolean unique, AbstractDomainFieldHandlerImpl[] fields) {
@@ -114,7 +115,7 @@ public class CandidateIndexImpl {
 
     @Override
     public String toString() {
-        StringBuffer buffer = new StringBuffer();
+        StringBuilder buffer = new StringBuilder();
         buffer.append("CandidateIndexImpl for class: ");
         buffer.append(className);
         buffer.append(" index: ");
@@ -174,13 +175,12 @@ public class CandidateIndexImpl {
      * The last query term (candidate column) for each of the lower and upper bound is noted.
      * The method is synchronized because the method modifies the state of the instance,
      * which might be shared by multiple threads.
-     * @return the score of this index.
      */
-    synchronized int getScore() {
+    synchronized void score() {
+        score = 0;
         if (candidateColumns == null) {
-            return 0;
+            return;
         }
-        int result = 0;
         boolean lowerBoundDone = false;
         boolean upperBoundDone = false;
         if (unique) {
@@ -188,7 +188,7 @@ public class CandidateIndexImpl {
             for (CandidateColumnImpl column: candidateColumns) {
                 if (!(column.equalBound)) {
                     // not equal bound; can't use unique index
-                    return result;
+                    return;
                 }
             }
             if ("PRIMARY".equals(indexName)) {
@@ -196,7 +196,8 @@ public class CandidateIndexImpl {
             } else {
                 scanType = PredicateImpl.ScanType.UNIQUE_KEY;
             }
-            return 100;
+            score = 100;
+            return;
         } else {
             // range index
             // leading columns need any kind of bound
@@ -205,22 +206,22 @@ public class CandidateIndexImpl {
                 if ((candidateColumn.equalBound)) {
                     scanType = PredicateImpl.ScanType.INDEX_SCAN;
                     if (!lowerBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastLowerBoundColumn = candidateColumn;
                     }
                     if (!upperBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastUpperBoundColumn = candidateColumn;
                     }
                 } else if ((candidateColumn.inBound)) {
                     scanType = PredicateImpl.ScanType.INDEX_SCAN;
                     multiRange = true;
                     if (!lowerBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastLowerBoundColumn = candidateColumn;
                     }
                     if (!upperBoundDone) {
-                        result += fieldScore;
+                        score += fieldScore;
                         lastUpperBoundColumn = candidateColumn;
                     }
                 } else if (!(lowerBoundDone && upperBoundDone)) {
@@ -233,7 +234,7 @@ public class CandidateIndexImpl {
                     }
                     if (!lowerBoundDone) {
                         if (hasLowerBound) {
-                            result += fieldScore;
+                            score += fieldScore;
                             lastLowerBoundColumn = candidateColumn;
                         } else {
                             lowerBoundDone = true;
@@ -241,7 +242,7 @@ public class CandidateIndexImpl {
                     }
                     if (!upperBoundDone) {
                         if (hasUpperBound) {
-                            result += fieldScore;
+                            score += fieldScore;
                             lastUpperBoundColumn = candidateColumn;
                         } else {
                             upperBoundDone = true;
@@ -259,7 +260,7 @@ public class CandidateIndexImpl {
                 lastUpperBoundColumn.markLastUpperBoundColumn();
             }
         }
-        return result;
+        return;
     }
 
     public ScanType getScanType() {
@@ -350,6 +351,7 @@ public class CandidateIndexImpl {
     class CandidateColumnImpl {
 
         protected AbstractDomainFieldHandlerImpl domainFieldHandler;
+        protected PredicateImpl predicate;
         protected PredicateImpl lowerBoundPredicate;
         protected PredicateImpl upperBoundPredicate;
         protected PredicateImpl equalPredicate;
@@ -375,7 +377,6 @@ public class CandidateIndexImpl {
         }
 
         public int getParameterSize(QueryExecutionContext context) {
-            // TODO Auto-generated method stub
             return inPredicate.getParameterSize(context);
         }
 
@@ -402,21 +403,25 @@ public class CandidateIndexImpl {
         private void markLowerBound(PredicateImpl predicate, boolean strict) {
             lowerBoundStrict = strict;
             this.lowerBoundPredicate = predicate;
+            this.predicate = predicate;
         }
 
         private void markUpperBound(PredicateImpl predicate, boolean strict) {
             upperBoundStrict = strict;
             this.upperBoundPredicate = predicate;
+            this.predicate = predicate;
         }
 
         private void markEqualBound(PredicateImpl predicate) {
             equalBound = true;
             this.equalPredicate = predicate;
+            this.predicate = predicate;
         }
 
         public void markInBound(InPredicateImpl predicate) {
             inBound = true;
             this.inPredicate = predicate;
+            this.predicate = predicate;
         }
 
         /** Set bounds into each predicate that has been defined.
@@ -428,6 +433,8 @@ public class CandidateIndexImpl {
         private int operationSetBounds(
                 QueryExecutionContext context, IndexScanOperation op, int index, int boundStatus) {
 
+            int boundSet = PredicateImpl.NO_BOUND_SET;
+
             if (logger.isDetailEnabled()) logger.detail("column: " + domainFieldHandler.getName() 
                     + " boundStatus: " + boundStatus
                     + " lastLowerBoundColumn: " + lastLowerBoundColumn
@@ -439,51 +446,53 @@ public class CandidateIndexImpl {
                 case BOUND_STATUS_NO_BOUND_DONE:
                     // can set either/both lower or upper bound
                     if (equalPredicate != null) {
-                        equalPredicate.operationSetBounds(context, op, true);
+                        boundSet |= equalPredicate.operationSetBounds(context, op, true);
                     }
                     if (inPredicate != null) {
-                        inPredicate.operationSetBound(context, op, index, true);
+                        boundSet |= inPredicate.operationSetBound(context, op, index, true);
                     }
                     if (lowerBoundPredicate != null) {
-                        lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
+                        boundSet |= lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
                     }
                     if (upperBoundPredicate != null) {
-                        upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
+                        boundSet |= upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
                     }
                     break;
                 case BOUND_STATUS_LOWER_BOUND_DONE:
                     // cannot set lower, only upper bound
                     if (equalPredicate != null) {
-                        equalPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
+                        boundSet |= equalPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
                     }
                     if (inPredicate != null) {
-                        inPredicate.operationSetUpperBound(context, op, index);
+                        boundSet |= inPredicate.operationSetUpperBound(context, op, index);
                     }
                     if (upperBoundPredicate != null) {
-                        upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
+                        boundSet |= upperBoundPredicate.operationSetUpperBound(context, op, lastUpperBoundColumn);
                     }
                     break;
                 case BOUND_STATUS_UPPER_BOUND_DONE:
                     // cannot set upper, only lower bound
                     if (equalPredicate != null) {
-                        equalPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
+                        boundSet |= equalPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
                     }
                     if (inPredicate != null) {
-                        inPredicate.operationSetLowerBound(context, op, index);
+                        boundSet |= inPredicate.operationSetLowerBound(context, op, index);
                     }
                     if (lowerBoundPredicate != null) {
-                        lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
+                        boundSet |= lowerBoundPredicate.operationSetLowerBound(context, op, lastLowerBoundColumn);
                     }
                     break;
             }
-            if (!hasLowerBound()) {
-                // if this has no lower bound, set lower bound done
+            if (0 == (boundSet & PredicateImpl.LOWER_BOUND_SET)) {
+                // didn't set lower bound
                 boundStatus |= BOUND_STATUS_LOWER_BOUND_DONE;
             }
-            if (!hasUpperBound()) {
-                // if this has no upper bound, set upper bound done
+                
+            if (0 == (boundSet & PredicateImpl.UPPER_BOUND_SET)) {
+                // didn't set upper bound
                 boundStatus |= BOUND_STATUS_UPPER_BOUND_DONE;
             }
+                
             return boundStatus;
         }
 
@@ -511,8 +520,33 @@ public class CandidateIndexImpl {
         return storeIndex;
     }
 
+    public int getScore() {
+        return score;
+    }
+
     public boolean isMultiRange() {
         return multiRange;
     }
 
+    public boolean isUnique() {
+        return unique;
+    }
+
+    /** Is this index usable in the current context?
+     * If a primary or unique index, all parameters must be non-null.
+     * If a btree index, the parameter for the first comparison must be non-null
+     * @param context the query execution context
+     * @return true if all relevant parameters in the context are non-null
+     */
+    public boolean isUsable(QueryExecutionContext context) {
+        if (unique) {
+            return context.hasNoNullParameters();
+        } else {
+            // the first parameter must not be null
+            CandidateColumnImpl candidateColumn = candidateColumns[0];
+            PredicateImpl predicate = candidateColumn.predicate;
+            return predicate.isUsable(context);
+        }
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ComparativePredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -46,10 +46,12 @@ public abstract class ComparativePredica
         param.setProperty(property);
     }
 
+    @Override
     public void markParameters() {
         param.mark();
     }
 
+    @Override
     public void unmarkParameters() {
         param.unmark();
     }
@@ -61,17 +63,27 @@ public abstract class ComparativePredica
     }
 
     @Override
-    public void operationSetLowerBound(QueryExecutionContext context,
+    public int operationSetLowerBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         // delegate to setBounds for most operations
-        operationSetBounds(context, op, lastColumn);
+        return operationSetBounds(context, op, lastColumn);
     }
 
     @Override
-    public void operationSetUpperBound(QueryExecutionContext context,
+    public int operationSetUpperBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         // delegate to setBounds for most operations
-        operationSetBounds(context, op, lastColumn);
+        return operationSetBounds(context, op, lastColumn);
+    }
+
+    @Override
+    public ParameterImpl getParameter() {
+        return param;
+    }
+
+    @Override 
+    public boolean isUsable(QueryExecutionContext context) {
+        return param.getParameterValue(context) != null;
     }
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/EqualPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -40,21 +40,44 @@ public class EqualPredicateImpl extends
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markEqualBound(candidateIndices, this);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
         // can always set boundEQ
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundEQ, op);
+        Object value = param.getParameterValue(context);
+        if (value != null) {
+            property.operationSetBounds(value, IndexScanOperation.BoundType.BoundEQ, op);
+            return BOTH_BOUNDS_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     @Override
-    public void operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+    public int operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
         // only set lower bound
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLE, op);
+        Object value = param.getParameterValue(context);
+        if (value != null) {
+            property.operationSetBounds(value, IndexScanOperation.BoundType.BoundLE, op);
+            return LOWER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     @Override
-    public void operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+    public int operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
         // only set upper bound
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+        Object value = param.getParameterValue(context);
+        if (value != null) {
+            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+            return UPPER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     @Override

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterEqualPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,8 +39,19 @@ public class GreaterEqualPredicateImpl e
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLE, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markLowerBound(candidateIndices, this, false);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object lowerBound = param.getParameterValue(context);
+        if (lowerBound != null) {
+            property.operationSetBounds(lowerBound, IndexScanOperation.BoundType.BoundLE, op);
+            return LOWER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     /** Set the condition into the filter.

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/GreaterThanPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,13 +39,24 @@ public class GreaterThanPredicateImpl ex
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        if (lastColumn) {
-            // last column may be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLT, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markLowerBound(candidateIndices, this, true);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object lowerValue = param.getParameterValue(context);
+        if (lowerValue != null) {
+            if (lastColumn) {
+                // last column may be strict
+                property.operationSetBounds(lowerValue, IndexScanOperation.BoundType.BoundLT, op);
+            } else {
+                // not-last column must not be strict
+                property.operationSetBounds(lowerValue, IndexScanOperation.BoundType.BoundLE, op);
+            }
+            return LOWER_BOUND_SET;
         } else {
-            // not-last column must not be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundLE, op);
+            return NO_BOUND_SET;
         }
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -58,7 +58,7 @@ public class InPredicateImpl extends Pre
     }
 
     @Override
-    void markBoundsForCandidateIndices(QueryExecutionContext context,
+    public void markBoundsForCandidateIndices(QueryExecutionContext context,
             CandidateIndexImpl[] candidateIndices) {
         if (parameter.getParameterValue(context) == null) {
             // null parameters cannot be used with index scans
@@ -67,6 +67,11 @@ public class InPredicateImpl extends Pre
         property.markInBound(candidateIndices, this);
     }
 
+    @Override
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markInBound(candidateIndices, this);
+    }
+
     /** Set bound for the multi-valued parameter identified by the index.
      * 
      * @param context the query execution context
@@ -74,27 +79,27 @@ public class InPredicateImpl extends Pre
      * @param index the index into the parameter list
      * @param lastColumn if true, can set strict bound
      */
-    public void operationSetBound(
+    public int operationSetBound(
             QueryExecutionContext context, IndexScanOperation op, int index, boolean lastColumn) {
         if (lastColumn) {
             // last column can be strict
-            operationSetBound(context, op, index, BoundType.BoundEQ);
+            return operationSetBound(context, op, index, BoundType.BoundEQ);
         } else {
             // not last column cannot be strict
-            operationSetBound(context, op, index, BoundType.BoundLE);
-            operationSetBound(context, op, index, BoundType.BoundGE);
+            return operationSetBound(context, op, index, BoundType.BoundLE) +
+                    operationSetBound(context, op, index, BoundType.BoundGE);
         }
     }
 
-    public void operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, int index) {
-        operationSetBound(context, op, index, BoundType.BoundGE);
+    public int operationSetUpperBound(QueryExecutionContext context, IndexScanOperation op, int index) {
+        return operationSetBound(context, op, index, BoundType.BoundGE);
     }
 
-    public void operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, int index) {
-        operationSetBound(context, op, index, BoundType.BoundLE);
+    public int operationSetLowerBound(QueryExecutionContext context, IndexScanOperation op, int index) {
+        return operationSetBound(context, op, index, BoundType.BoundLE);
     }
 
-    private void operationSetBound(
+    private int operationSetBound(
             QueryExecutionContext context, IndexScanOperation op, int index, BoundType boundType) {
     Object parameterValue = parameter.getParameterValue(context);
         if (parameterValue == null) {
@@ -103,8 +108,8 @@ public class InPredicateImpl extends Pre
         } else if (parameterValue instanceof List<?>) {
             List<?> parameterList = (List<?>)parameterValue;
             Object value = parameterList.get(index);
-            property.operationSetBounds(value, boundType, op);
             if (logger.isDetailEnabled()) logger.detail("InPredicateImpl.operationSetBound for " + property.fmd.getName() + " List index: " + index + " value: " + value + " boundType: " + boundType);
+            property.operationSetBounds(value, boundType, op);
         } else if (parameterValue.getClass().isArray()) {
             Object[] parameterArray = (Object[])parameterValue;
             Object value = parameterArray[index];
@@ -115,6 +120,7 @@ public class InPredicateImpl extends Pre
                     local.message("ERR_Parameter_Wrong_Type", parameter.parameterName,
                             parameterValue.getClass().getName(), "List<?> or Object[]"));
         }
+        return BOTH_BOUNDS_SET;
     }
 
     /** Set bounds for the multi-valued parameter identified by the index.
@@ -157,6 +163,7 @@ public class InPredicateImpl extends Pre
      * @param context the query execution context with the parameter values
      * @param op the operation
      */
+    @Override
     public void filterCmpValue(QueryExecutionContext context,
             ScanOperation op) {
         try {
@@ -176,6 +183,7 @@ public class InPredicateImpl extends Pre
      * @param op the operation
      * @param filter the existing filter
      */
+    @Override
     public void filterCmpValue(QueryExecutionContext context, ScanOperation op, ScanFilter filter) {
         try {
             filter.begin(Group.GROUP_OR);
@@ -190,8 +198,8 @@ public class InPredicateImpl extends Pre
                 }
             } else if (parameterValue.getClass().isArray()) {
                 Object[] parameterArray = (Object[])parameterValue;
-                for (Object parameter: parameterArray) {
-                    property.filterCmpValue(parameter, BinaryCondition.COND_EQ, filter);
+                for (Object value: parameterArray) {
+                    property.filterCmpValue(value, BinaryCondition.COND_EQ, filter);
                 }
             } else {
                 throw new ClusterJUserException(
@@ -230,4 +238,9 @@ public class InPredicateImpl extends Pre
         return result;
     }
 
+    @Override 
+    public boolean isUsable(QueryExecutionContext context) {
+        return parameter.getParameterValue(context) != null;
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessEqualPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,8 +39,19 @@ public class LessEqualPredicateImpl exte
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markUpperBound(candidateIndices, this, false);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object upperValue = param.getParameterValue(context);
+        if (upperValue != null) {
+            property.operationSetBounds(upperValue, IndexScanOperation.BoundType.BoundGE, op);
+            return UPPER_BOUND_SET;
+        } else {
+            return NO_BOUND_SET;
+        }
     }
 
     /** Set the condition into the filter.

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/LessThanPredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -39,13 +39,24 @@ public class LessThanPredicateImpl exten
     }
 
     @Override
-    public void operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
-        if (lastColumn) {
-            // last column may be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGT, op);
+    public void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        property.markUpperBound(candidateIndices, this, true);
+    }
+
+    @Override
+    public int operationSetBounds(QueryExecutionContext context, IndexScanOperation op, boolean lastColumn) {
+        Object upperValue = param.getParameterValue(context);
+        if (upperValue != null) {
+            if (lastColumn) {
+                // last column may be strict
+                property.operationSetBounds(upperValue, IndexScanOperation.BoundType.BoundGT, op);
+            } else {
+                // not-last column must not be strict
+                property.operationSetBounds(upperValue, IndexScanOperation.BoundType.BoundGE, op);
+            }
+            return UPPER_BOUND_SET;
         } else {
-            // not-last column must not be strict
-            property.operationSetBounds(param.getParameterValue(context), IndexScanOperation.BoundType.BoundGE, op);
+            return NO_BOUND_SET;
         }
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-10-02 21:20:50 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-10-18 22:54:36 +0000
@@ -34,6 +34,9 @@ import com.mysql.clusterj.core.util.Logg
 
 import com.mysql.clusterj.query.Predicate;
 
+import java.util.Comparator;
+import java.util.TreeSet;
+
 public abstract class PredicateImpl implements Predicate {
 
     /** My message translator */
@@ -45,6 +48,20 @@ public abstract class PredicateImpl impl
     /** My domain object. */
     protected QueryDomainTypeImpl<?> dobj;
 
+    /** The primary/unique index for this query if it exists */
+    CandidateIndexImpl uniqueIndex;
+
+    /** The comparator for candidate indices, ordered descending by score */
+    Comparator<CandidateIndexImpl> candidateIndexComparator = new Comparator<CandidateIndexImpl>() {
+        public int compare(CandidateIndexImpl o1, CandidateIndexImpl o2) {
+            return o2.score - o1.score;
+        }
+    };
+
+    /** The candidate indices ordered by score */
+    private TreeSet<CandidateIndexImpl> scoredCandidateIndices =
+        new TreeSet<CandidateIndexImpl>(candidateIndexComparator);
+
     /** Scan types. */
     protected enum ScanType {
         INDEX_SCAN,
@@ -53,6 +70,15 @@ public abstract class PredicateImpl impl
         PRIMARY_KEY
     }
 
+    /** Indicates no bound set while setting bounds on index operations */
+    public static int NO_BOUND_SET = 0;
+    /** Indicates lower bound set while setting bounds on index operations */
+    public static int LOWER_BOUND_SET = 1;
+    /** Indicates upper bound set while setting bounds on index operations */
+    public static int UPPER_BOUND_SET = 2;
+    /** Indicates both bounds set while setting bounds on index operations */
+    public static int BOTH_BOUNDS_SET = 3;
+
     public PredicateImpl(QueryDomainTypeImpl<?> dobj) {
         this.dobj = dobj;
     }
@@ -84,19 +110,19 @@ public abstract class PredicateImpl impl
         // default is nothing to do
     }
 
-    public void operationSetBounds(QueryExecutionContext context,
+    public int operationSetBounds(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         throw new ClusterJFatalInternalException(
                 local.message("ERR_Implementation_Should_Not_Occur"));
     }
 
-    public void operationSetLowerBound(QueryExecutionContext context,
+    public int operationSetLowerBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn) {
         throw new ClusterJFatalInternalException(
                 local.message("ERR_Implementation_Should_Not_Occur"));
     }
 
-    public void operationSetUpperBound(QueryExecutionContext context,
+    public int operationSetUpperBound(QueryExecutionContext context,
             IndexScanOperation op, boolean lastColumn){
         throw new ClusterJFatalInternalException(
                 local.message("ERR_Implementation_Should_Not_Occur"));
@@ -181,60 +207,106 @@ public abstract class PredicateImpl impl
     }
 
     public CandidateIndexImpl getBestCandidateIndex(QueryExecutionContext context) {
-        return getBestCandidateIndexFor(context, this);
+        return getBestCandidateIndexFor(context, getTopLevelPredicates());
     }
 
     /** Get the best candidate index for the query, considering all indices
-     * defined and all predicates in the query.
+     * defined and all predicates in the query. If a unique index is usable
+     * (no non-null parameters) then return it. Otherwise, simply choose the
+     * first index for which there is at least one leading non-null parameter.
      * @param predicates the predicates
      * @return the best index for the query
      */
     protected CandidateIndexImpl getBestCandidateIndexFor(QueryExecutionContext context,
             PredicateImpl... predicates) {
-        // Create CandidateIndexImpl to decide how to scan.
+        // if there is a primary/unique index, see if it can be used in the current context
+        if (uniqueIndex != null && uniqueIndex.isUsable(context)) {
+            if (logger.isDebugEnabled()) logger.debug("usable unique index: " + uniqueIndex.getIndexName());
+            return uniqueIndex;
+        }
+        // find the best candidate index by returning the highest scoring index that is usable
+        // in the current context; i.e. has non-null parameters
+        // TODO: it might be better to score indexes again considering the current context
+        for (CandidateIndexImpl index: scoredCandidateIndices) {
+            if (index.isUsable(context)) {
+            if (logger.isDebugEnabled()) logger.debug("usable ordered index: " + index.getIndexName());
+                return index;
+            }
+        }
+        // there is no index that is usable in the current context
+        return CandidateIndexImpl.getIndexForNullWhereClause();
+
+    }
+
+    /** Get the number of conditions in the top level predicate.
+     * This is used to determine whether a hash index can be used. If there
+     * are exactly the number of conditions as index columns, then the
+     * hash index might be used.
+     * By default (for equal, greaterThan, lessThan, greaterEqual, lessEqual)
+     * there is one condition.
+     * AndPredicateImpl overrides this method.
+     * @return the number of conditions
+     */
+    protected int getNumberOfConditionsInPredicate() {
+        return 1;
+    }
+
+    /** Analyze this predicate to determine whether a primary key, unique key, or ordered index
+     * might be used. The result will be used during query execution once the actual parameters
+     * are known.
+     */
+    public void prepare() {
+        // Create CandidateIndexImpls
         CandidateIndexImpl[] candidateIndices = dobj.createCandidateIndexes();
         // Iterate over predicates and have each one register with
         // candidate indexes.
-        for (PredicateImpl predicateImpl : predicates) {
-            predicateImpl.markBoundsForCandidateIndices(context, candidateIndices);
+        for (PredicateImpl predicateImpl : getTopLevelPredicates()) {
+            predicateImpl.markBoundsForCandidateIndices(candidateIndices);
         }
-        // Iterate over candidate indices to find one that is usable.
-        int highScore = 0;
-        // Holder for the best index; default to the index for null where clause
-        CandidateIndexImpl bestCandidateIndexImpl = 
-                CandidateIndexImpl.getIndexForNullWhereClause();
+        // Iterate over candidate indices to find those that are usable.
         // Hash index operations require the predicates to have no extra conditions
         // beyond the index columns.
+        // Btree index operations are ranked by the number of usable conditions
         int numberOfConditions = getNumberOfConditionsInPredicate();
         for (CandidateIndexImpl candidateIndex : candidateIndices) {
             if (candidateIndex.supportsConditionsOfLength(numberOfConditions)) {
-                // opportunity for a user-defined plugin to evaluate indices
+                candidateIndex.score();
                 int score = candidateIndex.getScore();
-                if (logger.isDetailEnabled()) {
-                    logger.detail("Score: " + score + " from " + candidateIndex);
+                if (score != 0) {
+                    if (candidateIndex.isUnique()) {
+                        // there can be only one unique index for a given predicate
+                        uniqueIndex = candidateIndex;
+                    } else {
+                        // add possible indices to ordered map
+                        scoredCandidateIndices.add(candidateIndex);
+                    }
                 }
-                if (score > highScore) {
-                    bestCandidateIndexImpl = candidateIndex;
-                    highScore = score;
+                if (logger.isDetailEnabled()) {
+                    logger.detail("Score: " + score + " from " + candidateIndex.getIndexName());
                 }
             }
         }
-        if (logger.isDetailEnabled()) logger.detail("High score: " + highScore
-                + " from " + bestCandidateIndexImpl.getIndexName());
-        return bestCandidateIndexImpl;
     }
 
-    /** Get the number of conditions in the top level predicate.
-     * This is used to determine whether a hash index can be used. If there
-     * are exactly the number of conditions as index columns, then the
-     * hash index might be used.
-     * By default (for equal, greaterThan, lessThan, greaterEqual, lessEqual)
-     * there is one condition.
-     * AndPredicateImpl overrides this method.
-     * @return the number of conditions
+    protected void markBoundsForCandidateIndices(CandidateIndexImpl[] candidateIndices) {
+        // default is nothing to do
+    }
+
+    /** Return an array of top level predicates that might be used with indices.
+     * 
+     * @return an array of top level predicates (defaults to {this}).
      */
-    protected int getNumberOfConditionsInPredicate() {
-        return 1;
+    protected PredicateImpl[] getTopLevelPredicates() {
+        return new PredicateImpl[] {this};
+    }
+
+    public ParameterImpl getParameter() {
+        // default is there is no parameter for this predicate
+        return null;
+    }
+
+    public boolean isUsable(QueryExecutionContext context) {
+        return false;
     }
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java	2011-10-02 21:20:50 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java	2011-10-18 22:54:36 +0000
@@ -54,6 +54,11 @@ public class PropertyImpl implements Pre
         this.fmd = fmd;
     }
 
+    @Override
+    public String toString() {
+        return fmd.getName();
+    }
+
     public void setComplexParameter() {
         complexParameter = true;
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2011-10-20 19:41:56 +0000
@@ -118,6 +118,12 @@ public class QueryDomainTypeImpl<T> impl
         }
         this.where = (PredicateImpl)predicate;
         where.markParameters();
+        // statically analyze the where clause, looking for:
+        // primary keys all specified with equal
+        // unique keys all specified with equal
+        // btree index keys partly specified with ranges
+        // none of the above
+        where.prepare();
         return this;
     }
 
@@ -183,12 +189,13 @@ public class QueryDomainTypeImpl<T> impl
      * @throws ClusterJUserException if not all parameters are bound
      */
     public ResultData getResultData(QueryExecutionContext context) {
-	SessionSPI session = context.getSession();
+        SessionSPI session = context.getSession();
         // execute query based on what kind of scan is needed
         // if no where clause, scan the entire table
         CandidateIndexImpl index = where==null?
-            CandidateIndexImpl.getIndexForNullWhereClause():
-            where.getBestCandidateIndex(context);
+                CandidateIndexImpl.getIndexForNullWhereClause():
+                where.getBestCandidateIndex(context);
+
         ScanType scanType = index.getScanType();
         Map<String, Object> explain = newExplain(index, scanType);
         context.setExplain(explain);

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java	2011-10-20 19:41:56 +0000
@@ -338,4 +338,13 @@ public class QueryExecutionContextImpl i
         return boundParameters.get(index);
     }
 
+    public boolean hasNoNullParameters() {
+        for (Object value: boundParameters.values()) {
+            if (value == null) {
+                return false;
+            }
+        }
+        return true;
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/QueryExecutionContext.java	2011-10-20 19:41:56 +0000
@@ -52,4 +52,6 @@ public interface QueryExecutionContext {
 
     void deleteFilters();
 
+    boolean hasNoNullParameters();
+
 }

=== modified file 'storage/ndb/src/common/portlib/ndb_daemon.cc'
--- a/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-10-20 16:18:28 +0000
@@ -315,7 +315,7 @@ do_files(const char *pidfile_name, const
                 pidfile_name, errno);
 
   char buf[32];
-  int length = my_snprintf(buf, sizeof(buf), "%ld",
+  int length = (int)my_snprintf(buf, sizeof(buf), "%ld",
                            (long)NdbHost_GetProcessId());
   if (write(pidfd, buf, length) != length)
     return ERR1("Failed to write pid to pidfile '%s', errno: %d",

=== modified file 'storage/ndb/src/common/util/ndb_init.cpp'
--- a/storage/ndb/src/common/util/ndb_init.cpp	2011-09-27 17:28:13 +0000
+++ b/storage/ndb/src/common/util/ndb_init.cpp	2011-10-20 16:18:28 +0000
@@ -56,7 +56,7 @@ ndb_init_internal()
   {
     {
       const char* err = "ndb_init() failed - exit\n";
-      int res = write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }
@@ -79,7 +79,7 @@ ndb_init()
     if (my_init())
     {
       const char* err = "my_init() failed - exit\n";
-      int res = write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }

=== modified file 'storage/ndb/src/common/util/ndbzio.c'
--- a/storage/ndb/src/common/util/ndbzio.c	2011-09-29 05:44:30 +0000
+++ b/storage/ndb/src/common/util/ndbzio.c	2011-10-20 19:41:56 +0000
@@ -428,7 +428,7 @@ int read_buffer(ndbzio_stream *s)
   my_errno= 0;
   if (s->stream.avail_in == 0)
   {
-    s->stream.avail_in = my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
+    s->stream.avail_in = (uInt)my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
     if(s->stream.avail_in > 0)
       my_errno= 0;
     if (s->stream.avail_in == 0)
@@ -681,7 +681,7 @@ unsigned int ZEXPORT ndbzread ( ndbzio_s
         bytes_read= my_read(s->file, (uchar *)next_out, s->stream.avail_out,
                             MYF(0));
         if(bytes_read>0)
-          s->stream.avail_out -= bytes_read;
+          s->stream.avail_out -= (uInt)bytes_read;
         if (bytes_read == 0)
         {
           s->z_eof = 1;

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-03 08:02:28 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-20 19:52:11 +0000
@@ -3601,6 +3601,7 @@ Dbspj::computeHash(Signal* signal,
     (MAX_KEY_SIZE_IN_WORDS + 1) / 2;
   Uint64 tmp64[MAX_KEY_SIZE_IN_LONG_WORDS];
   Uint32 *tmp32 = (Uint32*)tmp64;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -3639,6 +3640,7 @@ Dbspj::computePartitionHash(Signal* sign
   Uint64 *tmp64 = _space;
   Uint32 *tmp32 = (Uint32*)tmp64;
   Uint32 sz = ptr.sz;
+  ndbassert(ptr.sz <= MAX_KEY_SIZE_IN_WORDS);
   copy(tmp32, ptr);
 
   const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
@@ -4456,6 +4458,12 @@ Dbspj::parseScanIndex(Build_context& ctx
     data.m_firstExecution = true;
     data.m_batch_chunks = 0;
 
+    /**
+     * We will need to look at the parameters again if the scan is pruned and the prune
+     * key uses parameter values. Therefore, we keep a reference to the start of the
+     * parameter buffer.
+     */
+    DABuffer origParam = param;
     err = parseDA(ctx, requestPtr, treeNodePtr,
                   tree, treeBits, param, paramBits);
     if (unlikely(err != 0))
@@ -4482,7 +4490,7 @@ Dbspj::parseScanIndex(Build_context& ctx
         /**
          * Expand pattern into a new pattern (with linked values)
          */
-        err = expand(pattern, treeNodePtr, tree, len, param, cnt);
+        err = expand(pattern, treeNodePtr, tree, len, origParam, cnt);
         if (unlikely(err != 0))
           break;
 
@@ -4501,7 +4509,7 @@ Dbspj::parseScanIndex(Build_context& ctx
          */
         Uint32 prunePtrI = RNIL;
         bool hasNull;
-        err = expand(prunePtrI, tree, len, param, cnt, hasNull);
+        err = expand(prunePtrI, tree, len, origParam, cnt, hasNull);
         if (unlikely(err != 0))
           break;
 
@@ -5078,7 +5086,8 @@ Dbspj::scanIndex_parent_batch_complete(S
       parallelism = (data.m_fragCount - data.m_frags_complete) / roundTrips;
     }
 
-    ndbassert(parallelism <= data.m_fragCount - data.m_frags_complete);
+    ndbassert(parallelism >= 1);
+    ndbassert((Uint32)parallelism + data.m_frags_complete <= data.m_fragCount);
     data.m_parallelism = static_cast<Uint32>(parallelism);
 
 #ifdef DEBUG_SCAN_FRAGREQ
@@ -6189,6 +6198,7 @@ Uint32
 Dbspj::appendToPattern(Local_pattern_store & pattern,
                        DABuffer & tree, Uint32 len)
 {
+  jam();
   if (unlikely(tree.ptr + len > tree.end))
     return DbspjErr::InvalidTreeNodeSpecification;
 
@@ -6203,6 +6213,7 @@ Uint32
 Dbspj::appendParamToPattern(Local_pattern_store& dst,
                             const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6218,6 +6229,7 @@ Uint32
 Dbspj::appendParamHeadToPattern(Local_pattern_store& dst,
                                 const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6235,6 +6247,7 @@ Dbspj::appendTreeToSection(Uint32 & ptrI
   /**
    * TODO handle errors
    */
+  jam();
   Uint32 SZ = 16;
   Uint32 tmp[16];
   while (len > SZ)
@@ -6293,6 +6306,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Section & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6316,6 +6330,7 @@ Uint32
 Dbspj::appendColToSection(Uint32 & dst, const RowPtr::Linear & row,
                           Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6335,6 +6350,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Linear & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6353,6 +6369,7 @@ Uint32
 Dbspj::appendAttrinfoToSection(Uint32 & dst, const RowPtr::Section & row,
                                Uint32 col, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6378,6 +6395,7 @@ Dbspj::appendAttrinfoToSection(Uint32 &
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Section & row, Uint32 col)
 {
+  jam();
   /**
    * TODO handle errors
    */
@@ -6400,6 +6418,7 @@ Dbspj::appendPkColToSection(Uint32 & dst
 Uint32
 Dbspj::appendPkColToSection(Uint32 & dst, const RowPtr::Linear & row, Uint32 col)
 {
+  jam();
   Uint32 offset = row.m_header->m_offset[col];
   Uint32 tmp = row.m_data[offset];
   Uint32 len = AttributeHeader::getDataSize(tmp);
@@ -6413,6 +6432,7 @@ Dbspj::appendFromParent(Uint32 & dst, Lo
                         Uint32 levels, const RowPtr & rowptr,
                         bool& hasNull)
 {
+  jam();
   Ptr<TreeNode> treeNodePtr;
   m_treenode_pool.getPtr(treeNodePtr, rowptr.m_src_node_ptrI);
   Uint32 corrVal = rowptr.m_src_correlation;
@@ -6527,6 +6547,7 @@ Dbspj::appendDataToSection(Uint32 & ptrI
                            Local_pattern_store::ConstDataBufferIterator& it,
                            Uint32 len, bool& hasNull)
 {
+  jam();
   if (unlikely(len==0))
   {
     jam();
@@ -6732,6 +6753,7 @@ Uint32
 Dbspj::expand(Uint32 & ptrI, DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt, bool& hasNull)
 {
+  jam();
   /**
    * TODO handle error
    */
@@ -6816,6 +6838,7 @@ Dbspj::expand(Local_pattern_store& dst,
               DABuffer& pattern, Uint32 len,
               DABuffer& param, Uint32 paramCnt)
 {
+  jam();
   /**
    * TODO handle error
    */

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-08-31 10:39:08 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-10-20 19:41:56 +0000
@@ -110,7 +110,7 @@ AsyncFile::writeReq(Request * request)
     bool write_not_complete = true;
 
     while(write_not_complete) {
-      int totsize = 0;
+      size_t totsize = 0;
       off_t offset = request->par.readWrite.pages[page_num].offset;
       char* bufptr = theWriteBuffer;
 
@@ -128,7 +128,7 @@ AsyncFile::writeReq(Request * request)
           if (((i + 1) < request->par.readWrite.numberOfPages)) {
             // There are more pages to write
             // Check that offsets are consequtive
-            off_t tmp = page_offset + request->par.readWrite.pages[i].size;
+            off_t tmp=(off_t)(page_offset+request->par.readWrite.pages[i].size);
             if (tmp != request->par.readWrite.pages[i+1].offset) {
               // Next page is not aligned with previous, not allowed
               DEBUG(ndbout_c("Page offsets are not aligned"));
@@ -143,7 +143,7 @@ AsyncFile::writeReq(Request * request)
               break;
             }
           }
-          page_offset += request->par.readWrite.pages[i].size;
+          page_offset += (off_t)request->par.readWrite.pages[i].size;
         }
         bufptr = theWriteBuffer;
       } else {

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp	2011-10-20 13:01:37 +0000
@@ -197,7 +197,7 @@ Filename::set(Ndbfs* fs,
   strcat(theName, fileExtension[type]);
   
   if(dir == true){
-    for(int l = strlen(theName) - 1; l >= 0; l--){
+    for(int l = (int)strlen(theName) - 1; l >= 0; l--){
       if(theName[l] == DIR_SEPARATOR[0]){
 	theName[l] = 0;
 	break;

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Win32AsyncFile.cpp	2011-10-20 18:36:21 +0000
@@ -217,7 +217,7 @@ Win32AsyncFile::readBuffer(Request* req,
     DWORD dwBytesRead;
     BOOL bRead = ReadFile(hFile,
                           buf,
-                          size,
+                          (DWORD)size,
                           &dwBytesRead,
                           &ov);
     if(!bRead){
@@ -248,7 +248,7 @@ Win32AsyncFile::readBuffer(Request* req,
 
     buf += bytes_read;
     size -= bytes_read;
-    offset += bytes_read;
+    offset += (off_t)bytes_read;
   }
   return 0;
 }
@@ -277,7 +277,7 @@ Win32AsyncFile::writeBuffer(const char *
     size_t bytes_written = 0;
 
     DWORD dwWritten;
-    BOOL bWrite = WriteFile(hFile, buf, bytes_to_write, &dwWritten, &ov);
+    BOOL bWrite = WriteFile(hFile, buf, (DWORD)bytes_to_write, &dwWritten, &ov);
     if(!bWrite) {
       return GetLastError();
     }
@@ -288,7 +288,7 @@ Win32AsyncFile::writeBuffer(const char *
 
     buf += bytes_written;
     size -= bytes_written;
-    offset += bytes_written;
+    offset += (off_t)bytes_written;
   }
   return 0;
 }
@@ -393,7 +393,7 @@ loop:
   do {
     if (0 != strcmp(".", ffd.cFileName) && 0 != strcmp("..", ffd.cFileName))
     {
-      int len = strlen(path);
+      int len = (int)strlen(path);
       strcat(path, ffd.cFileName);
       if(DeleteFile(path) || RemoveDirectory(path)) 
       {

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-10-17 18:13:57 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-10-20 19:52:11 +0000
@@ -470,31 +470,34 @@ void Qmgr::setCCDelay(UintR aCCDelay)
 void Qmgr::execCONNECT_REP(Signal* signal)
 {
   jamEntry();
-  const Uint32 nodeId = signal->theData[0];
+  const Uint32 connectedNodeId = signal->theData[0];
 
   if (ERROR_INSERTED(931))
   {
     jam();
-    ndbout_c("Discarding CONNECT_REP(%d)", nodeId);
-    infoEvent("Discarding CONNECT_REP(%d)", nodeId);
+    ndbout_c("Discarding CONNECT_REP(%d)", connectedNodeId);
+    infoEvent("Discarding CONNECT_REP(%d)", connectedNodeId);
     return;
   }
 
-  c_connectedNodes.set(nodeId);
+  c_connectedNodes.set(connectedNodeId);
 
-  NodeRecPtr nodePtr;
-  nodePtr.i = nodeId;
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  nodePtr.p->m_secret = 0;
-
-  nodePtr.i = getOwnNodeId();
-  ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
-  NodeInfo nodeInfo = getNodeInfo(nodeId);
-  switch(nodePtr.p->phase){
+  {
+    NodeRecPtr connectedNodePtr;
+    connectedNodePtr.i = connectedNodeId;
+    ptrCheckGuard(connectedNodePtr, MAX_NODES, nodeRec);
+    connectedNodePtr.p->m_secret = 0;
+  }
+
+  NodeRecPtr myNodePtr;
+  myNodePtr.i = getOwnNodeId();
+  ptrCheckGuard(myNodePtr, MAX_NODES, nodeRec);
+  NodeInfo connectedNodeInfo = getNodeInfo(connectedNodeId);
+  switch(myNodePtr.p->phase){
   case ZRUNNING:
-    if (nodeInfo.getType() == NodeInfo::DB)
+    if (connectedNodeInfo.getType() == NodeInfo::DB)
     {
-      ndbrequire(!c_clusterNodes.get(nodeId));
+      ndbrequire(!c_clusterNodes.get(connectedNodeId));
     }
   case ZSTARTING:
     jam();
@@ -504,16 +507,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     return;
   case ZAPI_ACTIVE:
+    ndbrequire(false);
   case ZAPI_INACTIVE:
-    return;
+    ndbrequire(false);
   case ZINIT:
-    ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM);
+    ndbrequire(getNodeInfo(connectedNodeId).m_type == NodeInfo::MGM);
     break;
   default:
     ndbrequire(false);
   }
 
-  if (nodeInfo.getType() != NodeInfo::DB)
+  if (connectedNodeInfo.getType() != NodeInfo::DB)
   {
     jam();
     return;
@@ -522,24 +526,24 @@ void Qmgr::execCONNECT_REP(Signal* signa
   switch(c_start.m_gsn){
   case GSN_CM_REGREQ:
     jam();
-    sendCmRegReq(signal, nodeId);
+    sendCmRegReq(signal, connectedNodeId);
 
     /**
      * We're waiting for CM_REGCONF c_start.m_nodes contains all configured
      *   nodes
      */
-    ndbrequire(nodePtr.p->phase == ZSTARTING);
-    ndbrequire(c_start.m_nodes.isWaitingFor(nodeId));
+    ndbrequire(myNodePtr.p->phase == ZSTARTING);
+    ndbrequire(c_start.m_nodes.isWaitingFor(connectedNodeId));
     return;
   case GSN_CM_NODEINFOREQ:
     jam();
     
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
       ndbrequire(getOwnNodeId() != cpresident);
-      ndbrequire(nodePtr.p->phase == ZSTARTING);
-      sendCmNodeInfoReq(signal, nodeId, nodePtr.p);
+      ndbrequire(myNodePtr.p->phase == ZSTARTING);
+      sendCmNodeInfoReq(signal, connectedNodeId, myNodePtr.p);
       return;
     }
     return;
@@ -547,17 +551,17 @@ void Qmgr::execCONNECT_REP(Signal* signa
     jam();
     
     ndbrequire(getOwnNodeId() != cpresident);
-    ndbrequire(nodePtr.p->phase == ZRUNNING);
-    if (c_start.m_nodes.isWaitingFor(nodeId))
+    ndbrequire(myNodePtr.p->phase == ZRUNNING);
+    if (c_start.m_nodes.isWaitingFor(connectedNodeId))
     {
       jam();
-      c_start.m_nodes.clearWaitingFor(nodeId);
+      c_start.m_nodes.clearWaitingFor(connectedNodeId);
       c_start.m_gsn = RNIL;
       
       NodeRecPtr addNodePtr;
-      addNodePtr.i = nodeId;
+      addNodePtr.i = connectedNodeId;
       ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
-      cmAddPrepare(signal, addNodePtr, nodePtr.p);
+      cmAddPrepare(signal, addNodePtr, myNodePtr.p);
       return;
     }
   }
@@ -565,11 +569,11 @@ void Qmgr::execCONNECT_REP(Signal* signa
     (void)1;
   }
   
-  ndbrequire(!c_start.m_nodes.isWaitingFor(nodeId));
-  ndbrequire(!c_readnodes_nodes.get(nodeId));
-  c_readnodes_nodes.set(nodeId);
+  ndbrequire(!c_start.m_nodes.isWaitingFor(connectedNodeId));
+  ndbrequire(!c_readnodes_nodes.get(connectedNodeId));
+  c_readnodes_nodes.set(connectedNodeId);
   signal->theData[0] = reference();
-  sendSignal(calcQmgrBlockRef(nodeId), GSN_READ_NODESREQ, signal, 1, JBA);
+  sendSignal(calcQmgrBlockRef(connectedNodeId), GSN_READ_NODESREQ, signal, 1, JBA);
   return;
 }//Qmgr::execCONNECT_REP()
 

=== modified file 'storage/ndb/src/kernel/error/ndbd_exit_codes.c'
--- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c	2011-10-20 13:01:37 +0000
@@ -267,7 +267,7 @@ const char *ndbd_exit_status_message(ndb
 
 int ndbd_exit_string(int err_no, char *str, unsigned int size)
 {
-  unsigned int len;
+  size_t len;
 
   ndbd_exit_classification cl;
   ndbd_exit_status st;
@@ -279,8 +279,8 @@ int ndbd_exit_string(int err_no, char *s
 
     len = my_snprintf(str, size-1, "%s: %s: %s", msg, st_msg, cl_msg);
     str[size-1]= '\0';
-  
-    return len;
+
+    return (int)len;
   }
   return -1;
 }

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-10-20 19:41:56 +0000
@@ -1827,7 +1827,7 @@ SimulatedBlock::infoEvent(const char * m
   BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
   va_end(ap);
   
-  int len = strlen(buf) + 1;
+  size_t len = strlen(buf) + 1;
   if(len > 96){
     len = 96;
     buf[95] = 0;
@@ -1847,7 +1847,7 @@ SimulatedBlock::infoEvent(const char * m
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
   
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,
@@ -1872,7 +1872,7 @@ SimulatedBlock::warningEvent(const char
   BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
   va_end(ap);
   
-  int len = strlen(buf) + 1;
+  size_t len = strlen(buf) + 1;
   if(len > 96){
     len = 96;
     buf[95] = 0;
@@ -1892,7 +1892,7 @@ SimulatedBlock::warningEvent(const char
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
 
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,

=== modified file 'storage/ndb/src/mgmapi/ndb_logevent.cpp'
--- a/storage/ndb/src/mgmapi/ndb_logevent.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp	2011-10-20 19:41:56 +0000
@@ -616,7 +616,7 @@ int ndb_logevent_get_next(const NdbLogEv
     BaseString tmp(val);
     Vector<BaseString> list;
     tmp.split(list);
-    for (size_t j = 0; j<list.size(); j++)
+    for (unsigned j = 0; j<list.size(); j++)
     {
       dst->Data[j] = atoi(list[j].c_str());
     }

=== modified file 'storage/ndb/src/mgmsrv/Defragger.hpp'
--- a/storage/ndb/src/mgmsrv/Defragger.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/mgmsrv/Defragger.hpp	2011-10-20 19:41:56 +0000
@@ -36,7 +36,7 @@ class Defragger {
   Vector<DefragBuffer*> m_buffers;
 
   DefragBuffer* find_buffer(NodeId nodeId, Uint32 fragId){
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (unsigned i = 0; i < m_buffers.size(); i++)
     {
       DefragBuffer* dbuf = m_buffers[i];
       if (dbuf->m_node_id == nodeId &&
@@ -47,7 +47,7 @@ class Defragger {
   }
 
   void erase_buffer(const DefragBuffer* dbuf){
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (unsigned i = 0; i < m_buffers.size(); i++)
     {
       if (m_buffers[i] == dbuf)
       {
@@ -63,7 +63,7 @@ public:
   Defragger() {};
   ~Defragger()
   {
-    for (size_t i = m_buffers.size(); i > 0; --i)
+    for (unsigned i = m_buffers.size(); i > 0; --i)
     {
       delete m_buffers[i-1]; // free the memory of the fragment
     }
@@ -120,7 +120,7 @@ public:
     clear any unassembled signal buffers from node
   */
   void node_failed(NodeId nodeId) {
-    for (size_t i = m_buffers.size(); i > 0; --i)
+    for (unsigned i = m_buffers.size(); i > 0; --i)
     {
       if (m_buffers[i-1]->m_node_id == nodeId)
       {

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-03 08:02:28 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-20 19:52:11 +0000
@@ -1661,7 +1661,8 @@ NdbQueryIndexScanOperationDefImpl::NdbQu
                            int& error)
   : NdbQueryScanOperationDefImpl(table,options,ident,ix,id,error),
   m_interface(*this), 
-  m_index(index)
+  m_index(index),
+  m_paramInPruneKey(false)
 {
   memset(&m_bound, 0, sizeof m_bound);
   if (bound!=NULL) {
@@ -2316,7 +2317,7 @@ NdbQueryLookupOperationDefImpl::appendKe
 
 
 Uint32
-NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef) const
+NdbQueryIndexScanOperationDefImpl::appendPrunePattern(Uint32Buffer& serializedDef)
 {
   Uint32 appendedPattern = 0;
 
@@ -2408,6 +2409,7 @@ NdbQueryIndexScanOperationDefImpl::appen
           }
           case NdbQueryOperandImpl::Param:
             appendedPattern |= QN_ScanIndexNode::SI_PRUNE_PARAMS;
+            m_paramInPruneKey = true;
             serializedDef.append(QueryPattern::param(paramCnt++));
             break;
           default:

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-03 08:02:28 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-10-20 19:52:11 +0000
@@ -382,6 +382,15 @@ public:
   virtual const IndexBound* getBounds() const
   { return NULL; } 
 
+  /** 
+   * True if this is a prunable scan and there are NdbQueryParamOperands in the
+   * distribution key.
+   */
+  virtual bool hasParamInPruneKey() const
+  {
+    return false;
+  }
+
   // Return 'true' is query type is a multi-row scan
   virtual bool isScanOperation() const = 0;
 
@@ -523,7 +532,7 @@ protected:
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const
   { return 0; }
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef)
   { return 0; }
 
 }; // class NdbQueryScanOperationDefImpl
@@ -553,11 +562,16 @@ public:
   virtual const IndexBound* getBounds() const
   { return &m_bound; } 
 
+  bool hasParamInPruneKey() const
+  {
+    return m_paramInPruneKey;
+  }
+
 protected:
   // Append pattern for creating complete range bounds to serialized code 
   virtual Uint32 appendBoundPattern(Uint32Buffer& serializedDef) const;
 
-  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef) const;
+  virtual Uint32 appendPrunePattern(Uint32Buffer& serializedDef);
 
 private:
 
@@ -583,6 +597,12 @@ private:
 
   /** True if there is a set of bounds.*/
   IndexBound m_bound;
+
+  /** 
+   * True if scan is prunable and there are NdbQueryParamOperands in the 
+   * distribution key.
+   */
+  bool m_paramInPruneKey;
 }; // class NdbQueryIndexScanOperationDefImpl
 
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-09-28 10:55:58 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-10-20 19:52:11 +0000
@@ -4562,6 +4562,10 @@ NdbQueryOperationImpl::prepareAttrInfo(U
     {
       requestInfo |= QN_ScanIndexParameters::SIP_PARALLEL;
     }
+    if (def.hasParamInPruneKey())
+    {
+      requestInfo |= QN_ScanIndexParameters::SIP_PRUNE_PARAMS;
+    }
     param->requestInfo = requestInfo;
     // Check that both values fit in param->batchSize.
     assert(getMaxBatchRows() < (1<<QN_ScanIndexParameters::BatchRowBits));

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-10-07 18:15:59 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-10-20 19:52:11 +0000
@@ -979,17 +979,17 @@ int ndb_error_string(int err_no, char *s
   int len;
 
   assert(size > 1);
-  if(size <= 1) 
+  if(size <= 1)
     return 0;
+
   error.code = err_no;
   ndberror_update(&error);
 
-  len =
-    my_snprintf(str, size-1, "%s: %s: %s", error.message,
+  len = (int)my_snprintf(str, size-1, "%s: %s: %s", error.message,
 		ndberror_status_message(error.status),
 		ndberror_classification_message(error.classification));
   str[size-1]= '\0';
-  
+
   if (error.classification != UE)
     return len;
   return -len;

=== modified file 'storage/ndb/test/ndbapi/testDict.cpp'
--- a/storage/ndb/test/ndbapi/testDict.cpp	2011-04-23 08:21:36 +0000
+++ b/storage/ndb/test/ndbapi/testDict.cpp	2011-10-20 12:21:10 +0000
@@ -8167,6 +8167,8 @@ runBug58277loadtable(NDBT_Context* ctx,
     int cnt = 0;
     for (int i = 0; i < rows; i++)
     {
+      int retries = 10;
+  retry:
       NdbTransaction* pTx = 0;
       CHK2((pTx = pNdb->startTransaction()) != 0, pNdb->getNdbError());
 
@@ -8183,7 +8185,19 @@ runBug58277loadtable(NDBT_Context* ctx,
         int x[] = {
          -630
         };
-        CHK3(pTx->execute(Commit) == 0, pTx->getNdbError(), x);
+        int res = pTx->execute(Commit);
+        if (res != 0 &&
+            pTx->getNdbError().status == NdbError::TemporaryError)
+        {
+          retries--;
+          if (retries >= 0)
+          {
+            pTx->close();
+            NdbSleep_MilliSleep(10);
+            goto retry;
+          }
+        }
+        CHK3(res == 0, pTx->getNdbError(), x);
         cnt++;
       }
       while (0);

=== modified file 'storage/ndb/test/run-test/atrt.hpp'
--- a/storage/ndb/test/run-test/atrt.hpp	2011-10-05 13:57:58 +0000
+++ b/storage/ndb/test/run-test/atrt.hpp	2011-10-20 19:41:56 +0000
@@ -201,6 +201,7 @@ extern const char * g_ndbd_bin_path;
 extern const char * g_ndbmtd_bin_path;
 extern const char * g_mysqld_bin_path;
 extern const char * g_mysql_install_db_bin_path;
+extern const char * g_libmysqlclient_so_path;
 
 extern const char * g_search_path[];
 

=== modified file 'storage/ndb/test/run-test/files.cpp'
--- a/storage/ndb/test/run-test/files.cpp	2011-10-05 13:57:58 +0000
+++ b/storage/ndb/test/run-test/files.cpp	2011-10-20 19:52:11 +0000
@@ -122,6 +122,24 @@ printfile(FILE* out, Properties& props,
   fflush(out);
 }
 
+static
+char *
+dirname(const char * path)
+{
+  char * s = strdup(path);
+  size_t len = strlen(s);
+  for (size_t i = 1; i<len; i++)
+  {
+    if (s[len - i] == '/')
+    {
+      s[len - i] = 0;
+      return s;
+    }
+  }
+  free(s);
+  return 0;
+}
+
 bool
 setup_files(atrt_config& config, int setup, int sshx)
 {
@@ -313,8 +331,23 @@ setup_files(atrt_config& config, int set
         }
         fprintf(fenv, "$PATH\n");
 	keys.push_back("PATH");
+
+        {
+          /**
+           * In 5.5...binaries aren't compiled with rpath
+           * So we need an explicit LD_LIBRARY_PATH
+           *
+           * Use path from libmysqlclient.so
+           */
+          char * dir = dirname(g_libmysqlclient_so_path);
+          fprintf(fenv, "LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n", dir);
+          free(dir);
+          keys.push_back("LD_LIBRARY_PATH");
+        }
+
 	for (size_t k = 0; k<keys.size(); k++)
 	  fprintf(fenv, "export %s\n", keys[k].c_str());
+
 	fflush(fenv);
 	fclose(fenv);
       }

=== modified file 'storage/ndb/test/run-test/main.cpp'
--- a/storage/ndb/test/run-test/main.cpp	2011-10-05 13:57:58 +0000
+++ b/storage/ndb/test/run-test/main.cpp	2011-10-20 19:41:56 +0000
@@ -86,6 +86,7 @@ const char * g_ndbd_bin_path = 0;
 const char * g_ndbmtd_bin_path = 0;
 const char * g_mysqld_bin_path = 0;
 const char * g_mysql_install_db_bin_path = 0;
+const char * g_libmysqlclient_so_path = 0;
 
 static struct
 {
@@ -93,11 +94,12 @@ static struct
   const char * exe;
   const char ** var;
 } g_binaries[] = {
-  { true,  "ndb_mgmd",         &g_ndb_mgmd_bin_path},
-  { true,  "ndbd",             &g_ndbd_bin_path },
-  { false, "ndbmtd",           &g_ndbmtd_bin_path },
-  { true,  "mysqld",           &g_mysqld_bin_path },
-  { true,  "mysql_install_db", &g_mysql_install_db_bin_path },
+  { true,  "ndb_mgmd",          &g_ndb_mgmd_bin_path},
+  { true,  "ndbd",              &g_ndbd_bin_path },
+  { false, "ndbmtd",            &g_ndbmtd_bin_path },
+  { true,  "mysqld",            &g_mysqld_bin_path },
+  { true,  "mysql_install_db",  &g_mysql_install_db_bin_path },
+  { true,  "libmysqlclient.so", &g_libmysqlclient_so_path },
   { true, 0, 0 }
 };
 
@@ -108,6 +110,8 @@ g_search_path[] =
   "libexec",
   "sbin",
   "scripts",
+  "lib",
+  "lib/mysql",
   0
 };
 static bool find_binaries();

=== modified file 'storage/ndb/tools/ndb_dump_frm_data.cpp'
--- a/storage/ndb/tools/ndb_dump_frm_data.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/tools/ndb_dump_frm_data.cpp	2011-10-20 19:41:56 +0000
@@ -104,7 +104,7 @@ dofile(const char* file)
       break;
     }
     ssize_t size2;
-    if ((size2 = read(fd, data, size)) == -1)
+    if ((size2 = read(fd, data, (unsigned)size)) == -1)
     {
       fprintf(stderr, "%s: read: %s\n", file, strerror(errno));
       break;
@@ -137,7 +137,7 @@ dofile(const char* file)
     printf("  orig: %u\n", (uint)size);
     printf("  pack: %u\n", (uint)pack_len);
     printf("*/\n\n");
-    dodump(name, pack_data, pack_len);
+    dodump(name, pack_data, (uint)pack_len);
     ret = 0;
   }
   while (0);

=== modified file 'support-files/compiler_warnings.supp'
--- a/support-files/compiler_warnings.supp	2011-10-14 08:26:28 +0000
+++ b/support-files/compiler_warnings.supp	2011-10-20 19:41:56 +0000
@@ -59,23 +59,23 @@ db_vrfy.c : .*comparison is always false
 # Ignore all conversion warnings on windows 64
 # (Is safe as we are not yet supporting strings >= 2G)
 #
-.* : conversion from '__int64' to .*int'.*
-.* : conversion from '__int64' to 'uint8'.*
-.* : conversion from '__int64' to 'uint32'.*
-.* : conversion from '__int64' to 'u.*long'.*
-.* : conversion from '__int64' to 'long'.*
-.* : conversion from '__int64' to 'off_t'.*
-.* : conversion from '.*size_t' to .*int'.*
-.* : conversion from '.*size_t' to 'TaoCrypt::word32'.*
-.* : conversion from '.*size_t' to 'u.*long'.*
-.* : conversion from '.*size_t' to 'uint32'.*
-.* : conversion from '.*size_t' to 'off_t'.*
-.* : conversion from '.*size_t' to 'size_s'.*
-.* : conversion from '.*size_t' to 'DWORD'.*
-.* : conversion from '.*size_t' to 'uLongf'.*
-.* : conversion from '.*size_t' to 'UINT'.*
-.* : conversion from '.*size_t' to 'uInt'.*
-.* : conversion from '.*size_t' to 'uint16'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint8'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'long'.*
+^(?:(?!ndb).)*$ : conversion from '__int64' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to .*int'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'TaoCrypt::word32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'u.*long'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint32'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'off_t'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'size_s'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'DWORD'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uLongf'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'UINT'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uInt'.*
+^(?:(?!ndb).)*$ : conversion from '.*size_t' to 'uint16'.*
 
 #
 # The following should be fixed by the ndb team

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster branch (jonas.oreland:3604 to 3606) jonas oreland21 Oct