List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:November 15 2010 10:26am
Subject:bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch
(ole.john.aske:3364)
View as plain text  
#At file:///net/fimafeng09/export/home/tmp/oleja/mysql/mysql-5.1-telco-7.0-spj-scan-scan/ based on revid:ole.john.aske@stripped

 3364 Ole John Aske	2010-11-15 [merge]
      Merge from telco-70

    added:
      storage/ndb/cmake/ndb_require_variable.cmake
    renamed:
      mysql-test/std_data/ndb_apply_status.frm => mysql-test/suite/rpl_ndb/t/ndb_apply_status.frm
    modified:
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/r/ndb_index_unique.result
      mysql-test/suite/ndb/r/ndb_native_default_support.result
      mysql-test/suite/ndb/t/ndb_condition_pushdown.test
      mysql-test/suite/ndb/t/ndb_index_unique.test
      mysql-test/suite/ndb/t/ndb_native_default_support.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbcluster_cond.cc
      sql/ha_ndbcluster_cond.h
      sql/ha_ndbcluster_glue.h
      sql/ha_ndbinfo.cc
      sql/mysqld.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/TcKeyReq.hpp
      storage/ndb/include/ndb_global.h
      storage/ndb/include/portlib/ndb_prefetch.h
      storage/ndb/ndb_configure.cmake
      storage/ndb/src/common/debugger/CMakeLists.txt
      storage/ndb/src/common/logger/CMakeLists.txt
      storage/ndb/src/common/util/CMakeLists.txt
      storage/ndb/src/common/util/SparseBitmask.cpp
      storage/ndb/src/kernel/CMakeLists.txt
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/mgmapi/CMakeLists.txt
      storage/ndb/src/mgmapi/Makefile.am
      storage/ndb/src/mgmclient/CMakeLists.txt
      storage/ndb/src/mgmclient/CommandInterpreter.cpp
      storage/ndb/src/mgmsrv/CMakeLists.txt
      storage/ndb/src/mgmsrv/Makefile.am
      storage/ndb/src/mgmsrv/testConfig.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/test/ndbapi/testDict.cpp
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
=== modified file 'mysql-test/suite/ndb/r/ndb_condition_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2010-11-10 10:05:24 +0000
+++ b/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2010-11-15 10:26:48 +0000
@@ -2169,5 +2169,28 @@ pk	x
 4	4
 5	5
 drop table t;
+set engine_condition_pushdown = on;
+create table t (x enum ('yes','yep','no')) engine = ndb;
+insert into t values ('yes'),('yep'),('no');
+explain select * from t where x like 'yes' order by x;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t	ALL	NULL	NULL	NULL	NULL	#	Using where; Using filesort
+select * from t where x like 'yes' order by x;
+x
+yes
+explain select * from t where x like 'ye%' order by x;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t	ALL	NULL	NULL	NULL	NULL	#	Using where; Using filesort
+select * from t where x like 'ye%' order by x;
+x
+yes
+yep
+explain select * from t where x not like 'ye%' order by x;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t	ALL	NULL	NULL	NULL	NULL	#	Using where; Using filesort
+select * from t where x not like 'ye%' order by x;
+x
+no
+drop table t;
 set engine_condition_pushdown = @old_ecpd;
 DROP TABLE t1,t2,t3,t4,t5;

=== modified file 'mysql-test/suite/ndb/r/ndb_index_unique.result'
--- a/mysql-test/suite/ndb/r/ndb_index_unique.result	2010-11-01 13:15:22 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_unique.result	2010-11-14 14:16:10 +0000
@@ -737,3 +737,155 @@ drop table t1;
 alter tablespace ts1 drop datafile 'datafile.dat' engine=ndb;
 drop tablespace ts1 engine=ndb;
 drop logfile group lg1 engine=ndb;
+# bug#57032
+create table t1 (
+a int not null,
+b int,
+primary key using hash (a),
+unique key using hash (b)
+)
+engine ndb;
+Warnings:
+Warning	1121	Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan
+insert into t1 values
+(0,0),(1,1),(2,2),(3,3),(4,4),
+(5,null),(6,null),(7,null),(8,null),(9,null);
+set @old_ecpd = @@session.engine_condition_pushdown;
+set engine_condition_pushdown = 0;
+select a from t1 where b is not null order by a;
+a
+0
+1
+2
+3
+4
+select a from t1 where b is null order by a;
+a
+5
+6
+7
+8
+9
+set engine_condition_pushdown = 1;
+select a from t1 where b is not null order by a;
+a
+0
+1
+2
+3
+4
+select a from t1 where b is null order by a;
+a
+5
+6
+7
+8
+9
+set engine_condition_pushdown = @old_ecpd;
+drop table t1;
+create table t1 (
+a int not null,
+b int,
+c int,
+primary key using hash (a),
+unique key using hash (b,c)
+)
+engine ndb;
+Warnings:
+Warning	1121	Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan
+insert into t1 values
+(0,0,0),(1,1,1),(2,2,1),(3,3,1),(4,4,2),
+(5,null,0),(6,null,1),(7,null,1),(8,null,1),(9,null,2),
+(10,0,null),(11,1,null),(12,1,null),(13,1,null),(14,2,null),
+(15,null,null),(16,null,null),(17,null,null),(18,null,null),(19,null,null);
+set @old_ecpd = @@session.engine_condition_pushdown;
+set engine_condition_pushdown = 0;
+select a from t1 where b is not null and c = 1 order by a;
+a
+1
+2
+3
+select a from t1 where b is null and c = 1 order by a;
+a
+6
+7
+8
+select a from t1 where b = 1 and c is null order by a;
+a
+11
+12
+13
+select a from t1 where b is null and c is null order by a;
+a
+15
+16
+17
+18
+19
+select a from t1 where b is not null and c is null order by a;
+a
+10
+11
+12
+13
+14
+select a from t1 where b is null and c is not null order by a;
+a
+5
+6
+7
+8
+9
+select a from t1 where b is not null and c is not null order by a;
+a
+0
+1
+2
+3
+4
+set engine_condition_pushdown = 1;
+select a from t1 where b is not null and c = 1 order by a;
+a
+1
+2
+3
+select a from t1 where b is null and c = 1 order by a;
+a
+6
+7
+8
+select a from t1 where b = 1 and c is null order by a;
+a
+11
+12
+13
+select a from t1 where b is null and c is null order by a;
+a
+15
+16
+17
+18
+19
+select a from t1 where b is not null and c is null order by a;
+a
+10
+11
+12
+13
+14
+select a from t1 where b is null and c is not null order by a;
+a
+5
+6
+7
+8
+9
+select a from t1 where b is not null and c is not null order by a;
+a
+0
+1
+2
+3
+4
+set engine_condition_pushdown = @old_ecpd;
+drop table t1;

=== modified file 'mysql-test/suite/ndb/r/ndb_native_default_support.result'
--- a/mysql-test/suite/ndb/r/ndb_native_default_support.result	2010-08-19 13:35:45 +0000
+++ b/mysql-test/suite/ndb/r/ndb_native_default_support.result	2010-11-10 13:39:11 +0000
@@ -1412,6 +1412,17 @@ i	j	f	d	d2	ch	HEX(b)	HEX(vb)	HEX(blob1)	
 drop table t1;
 Bug#55121 error 839 'Illegal null attribute' from NDB for fields with default value
 Ensure that Ndb handler doesn't expect native defaults for Blobs.
+set @save_sql_mode = @@session.sql_mode;
+set sql_mode=STRICT_TRANS_TABLES;
+CREATE TABLE t1 (
+fid smallint(6) unsigned NOT NULL DEFAULT '0',
+f01 text NOT NULL,
+f02 varchar(255) NOT NULL DEFAULT '',
+f03 text NOT NULL DEFAULT '',
+PRIMARY KEY (fid)
+) engine=ndb;
+ERROR 42000: BLOB/TEXT column 'f03' can't have a default value
+set sql_mode=@save_sql_mode;
 CREATE TABLE t1 (
 fid smallint(6) unsigned NOT NULL DEFAULT '0',
 f01 text NOT NULL,
@@ -1419,11 +1430,6 @@ f02 varchar(255) NOT NULL DEFAULT '',
 f03 text NOT NULL DEFAULT '',
 PRIMARY KEY (fid)
 ) engine=ndb;
-Warnings:
-Warning	1101	BLOB/TEXT column 'f03' can't have a default value
-show warnings;
-Level	Code	Message
-Warning	1101	BLOB/TEXT column 'f03' can't have a default value
 show create table t1;
 Table	Create Table
 t1	CREATE TABLE `t1` (

=== modified file 'mysql-test/suite/ndb/t/ndb_condition_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2010-11-09 13:54:36 +0000
+++ b/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2010-11-11 08:21:34 +0000
@@ -2269,6 +2269,21 @@ explain select * from t where 3 between 
 select * from t where 3 between -1 and x order by pk; 
 drop table t;
 
+# Bug#53360 No result for requests using LIKE condition on ENUM fields
+set engine_condition_pushdown = on;
+create table t (x enum ('yes','yep','no')) engine = ndb;
+insert into t values ('yes'),('yep'),('no');
+--replace_column 9 #
+explain select * from t where x like 'yes' order by x;
+select * from t where x like 'yes' order by x;
+--replace_column 9 #
+explain select * from t where x like 'ye%' order by x;
+select * from t where x like 'ye%' order by x;
+--replace_column 9 #
+explain select * from t where x not like 'ye%' order by x;
+select * from t where x not like 'ye%' order by x;
+drop table t;
+
 set engine_condition_pushdown = @old_ecpd;
 DROP TABLE t1,t2,t3,t4,t5;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_index_unique.test'
--- a/mysql-test/suite/ndb/t/ndb_index_unique.test	2010-11-01 13:15:22 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_unique.test	2010-11-14 14:16:10 +0000
@@ -433,4 +433,85 @@ alter tablespace ts1 drop datafile 'data
 drop tablespace ts1 engine=ndb;
 drop logfile group lg1 engine=ndb;
 
-# end of tests
\ No newline at end of file
+# bug#57032 'NOT NULL' evaluation is incorrect when using an 'unique index
+
+--echo # bug#57032
+
+create table t1 (
+  a int not null,
+  b int,
+  primary key using hash (a),
+  unique key using hash (b)
+)
+engine ndb;
+
+insert into t1 values
+  (0,0),(1,1),(2,2),(3,3),(4,4),
+  (5,null),(6,null),(7,null),(8,null),(9,null);
+
+set @old_ecpd = @@session.engine_condition_pushdown;
+
+set engine_condition_pushdown = 0;
+# failed, empty result
+select a from t1 where b is not null order by a;
+# worked
+select a from t1 where b is null order by a;
+
+set engine_condition_pushdown = 1;
+# failed, empty result
+select a from t1 where b is not null order by a;
+# worked
+select a from t1 where b is null order by a;
+
+set engine_condition_pushdown = @old_ecpd;
+
+drop table t1;
+
+create table t1 (
+  a int not null,
+  b int,
+  c int,
+  primary key using hash (a),
+  unique key using hash (b,c)
+)
+engine ndb;
+
+insert into t1 values
+  (0,0,0),(1,1,1),(2,2,1),(3,3,1),(4,4,2),
+  (5,null,0),(6,null,1),(7,null,1),(8,null,1),(9,null,2),
+  (10,0,null),(11,1,null),(12,1,null),(13,1,null),(14,2,null),
+  (15,null,null),(16,null,null),(17,null,null),(18,null,null),(19,null,null);
+
+set @old_ecpd = @@session.engine_condition_pushdown;
+
+set engine_condition_pushdown = 0;
+# worked
+select a from t1 where b is not null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b is null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b = 1 and c is null order by a;
+# worked
+select a from t1 where b is null and c is null order by a;
+select a from t1 where b is not null and c is null order by a;
+select a from t1 where b is null and c is not null order by a;
+select a from t1 where b is not null and c is not null order by a;
+
+set engine_condition_pushdown = 1;
+# worked
+select a from t1 where b is not null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b is null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b = 1 and c is null order by a;
+# worked
+select a from t1 where b is null and c is null order by a;
+select a from t1 where b is not null and c is null order by a;
+select a from t1 where b is null and c is not null order by a;
+select a from t1 where b is not null and c is not null order by a;
+
+set engine_condition_pushdown = @old_ecpd;
+
+drop table t1;
+
+# end of tests

=== modified file 'mysql-test/suite/ndb/t/ndb_native_default_support.test'
--- a/mysql-test/suite/ndb/t/ndb_native_default_support.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_native_default_support.test	2010-11-10 13:39:11 +0000
@@ -636,6 +636,9 @@ drop table t1;
 --echo Bug#55121 error 839 'Illegal null attribute' from NDB for fields with default value
 --echo Ensure that Ndb handler doesn't expect native defaults for Blobs.
 
+set @save_sql_mode = @@session.sql_mode;
+set sql_mode=STRICT_TRANS_TABLES;
+--error ER_BLOB_CANT_HAVE_DEFAULT
 CREATE TABLE t1 (
   fid smallint(6) unsigned NOT NULL DEFAULT '0',
   f01 text NOT NULL,
@@ -643,8 +646,17 @@ CREATE TABLE t1 (
   f03 text NOT NULL DEFAULT '',
   PRIMARY KEY (fid)
 ) engine=ndb;
+set sql_mode=@save_sql_mode;
 
-show warnings;
+--disable_warnings
+CREATE TABLE t1 (
+  fid smallint(6) unsigned NOT NULL DEFAULT '0',
+  f01 text NOT NULL,
+  f02 varchar(255) NOT NULL DEFAULT '',
+  f03 text NOT NULL DEFAULT '',
+  PRIMARY KEY (fid)
+) engine=ndb;
+--enable_warnings
 
 show create table t1;
 

=== renamed file 'mysql-test/std_data/ndb_apply_status.frm' => 'mysql-test/suite/rpl_ndb/t/ndb_apply_status.frm'
=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test	2010-10-22 15:16:26 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test	2010-10-28 06:47:47 +0000
@@ -7,7 +7,7 @@
 
 # The table might already have been created by mysqld -> ignore error
 --error 0,1
-copy_file $MYSQL_TEST_DIR/std_data/ndb_apply_status.frm $MYSQLTEST_VARDIR/master-data/mysql/ndb_apply_status.frm;
+copy_file $MYSQL_TEST_DIR/suite/rpl_ndb/t/ndb_apply_status.frm $MYSQLTEST_VARDIR/master-data/mysql/ndb_apply_status.frm;
 --disable_result_log
 --disable_abort_on_error
 select * from mysql.ndb_apply_status;

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2010-11-15 09:21:34 +0000
+++ b/sql/ha_ndbcluster.cc	2010-11-15 10:26:48 +0000
@@ -65,7 +65,7 @@ static const ulong ONE_YEAR_IN_SECONDS= 
 
 ulong opt_ndb_extra_logging;
 static ulong opt_ndb_wait_connected;
-extern ulong opt_ndb_wait_setup;
+ulong opt_ndb_wait_setup;
 static ulong opt_ndb_cache_check_time;
 static uint opt_ndb_cluster_connection_pool;
 static char* opt_ndb_connectstring;
@@ -5578,8 +5578,8 @@ guess_scan_flags(NdbOperation::LockMode 
  */
 
 int ha_ndbcluster::full_table_scan(const KEY* key_info, 
-                                   const uchar *key, 
-                                   uint key_len,
+                                   const key_range *start_key,
+                                   const key_range *end_key,
                                    uchar *buf)
 {
   int error;
@@ -5685,7 +5685,7 @@ int ha_ndbcluster::full_table_scan(const
         my_errno= HA_ERR_OUT_OF_MEM;
         DBUG_RETURN(my_errno);
       }       
-      if (m_cond->generate_scan_filter_from_key(&code, &options, key_info, key, key_len, buf))
+      if (m_cond->generate_scan_filter_from_key(&code, &options, key_info, start_key, end_key, buf))
         ERR_RETURN(code.getNdbError());
     }
 
@@ -7656,8 +7656,8 @@ int ha_ndbcluster::read_range_first_to_b
     }
     else if (type == UNIQUE_INDEX)
       DBUG_RETURN(full_table_scan(key_info, 
-                                  start_key->key, 
-                                  start_key->length, 
+                                  start_key,
+                                  end_key,
                                   buf));
     break;
   default:
@@ -7778,7 +7778,7 @@ int ha_ndbcluster::rnd_next(uchar *buf)
   else if (m_active_query)
     error= next_result(buf);
   else
-    error= full_table_scan(NULL, NULL, 0, buf);
+    error= full_table_scan(NULL, NULL, NULL, buf);
 
   table->status= error ? STATUS_NOT_FOUND: 0;
   DBUG_RETURN(error);
@@ -12358,6 +12358,7 @@ static int connect_callback()
   return 0;
 }
 
+#ifndef NDB_NO_WAIT_SETUP
 static int ndb_wait_setup_func_impl(ulong max_wait)
 {
   DBUG_ENTER("ndb_wait_setup_func_impl");
@@ -12395,8 +12396,8 @@ static int ndb_wait_setup_func_impl(ulon
   DBUG_RETURN((ndb_setup_complete == 1)? 0 : 1);
 }
 
-extern int(*ndb_wait_setup_func)(ulong);
-
+int(*ndb_wait_setup_func)(ulong) = 0;
+#endif
 extern int ndb_dictionary_is_mysqld;
 
 static int ndbcluster_init(void *p)
@@ -12508,7 +12509,9 @@ static int ndbcluster_init(void *p)
     goto ndbcluster_init_error;
   }
 
+#ifndef NDB_NO_WAIT_SETUP
   ndb_wait_setup_func= ndb_wait_setup_func_impl;
+#endif
 
   ndbcluster_inited= 1;
   DBUG_RETURN(FALSE);
@@ -16985,18 +16988,31 @@ static MYSQL_SYSVAR_BOOL(
   1                                  /* default */
 );
 
+#ifndef NDB_NO_LOG_EMPTY_EPOCHS
+#define LOG_EMPTY_EPOCHS_OPTS PLUGIN_VAR_OPCMDARG
+#define LOG_EMPTY_EPOCHS_DEFAULT 0
+#else
+#define LOG_EMPTY_EPOCHS_OPTS PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY
+#define LOG_EMPTY_EPOCHS_DEFAULT 1
+#endif
 
-my_bool opt_ndb_log_empty_epochs;
+static my_bool opt_ndb_log_empty_epochs;
 static MYSQL_SYSVAR_BOOL(
   log_empty_epochs,                  /* name */
   opt_ndb_log_empty_epochs,          /* var */
-  PLUGIN_VAR_OPCMDARG,
+  LOG_EMPTY_EPOCHS_OPTS,
   "",
   NULL,                              /* check func. */
   NULL,                              /* update func. */
-  0                                  /* default */
+  LOG_EMPTY_EPOCHS_DEFAULT           /* default */
 );
 
+bool ndb_log_empty_epochs(void)
+{
+  return opt_ndb_log_empty_epochs;
+}
+
+
 my_bool opt_ndb_log_apply_status;
 static MYSQL_SYSVAR_BOOL(
   log_apply_status,                 /* name */

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2010-11-10 08:01:00 +0000
+++ b/sql/ha_ndbcluster.h	2010-11-15 10:26:48 +0000
@@ -727,8 +727,8 @@ private:
   int unique_index_read(const uchar *key, uint key_len, 
                         uchar *buf);
   int full_table_scan(const KEY* key_info, 
-                      const uchar *key, 
-                      uint key_len,
+                      const key_range *start_key,
+                      const key_range *end_key,
                       uchar *buf);
   int flush_bulk_insert(bool allow_batch= FALSE);
   int ndb_write_row(uchar *record, bool primary_key_update,

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2010-11-09 12:12:59 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2010-11-10 10:31:20 +0000
@@ -38,7 +38,6 @@
 
 extern my_bool opt_ndb_log_orig;
 extern my_bool opt_ndb_log_bin;
-extern my_bool opt_ndb_log_empty_epochs;
 extern my_bool opt_ndb_log_update_as_write;
 extern my_bool opt_ndb_log_updated_only;
 extern my_bool opt_ndb_log_binlog_index;
@@ -46,6 +45,8 @@ extern my_bool opt_ndb_log_apply_status;
 extern ulong opt_ndb_extra_logging;
 extern ulong opt_server_id_mask;
 
+bool ndb_log_empty_epochs(void);
+
 /*
   defines for cluster replication table names
 */
@@ -867,14 +868,18 @@ static bool ndbcluster_flush_logs(handle
 /*
   Global schema lock across mysql servers
 */
-int ndbcluster_has_global_schema_lock(Thd_ndb *thd_ndb)
+bool ndbcluster_has_global_schema_lock(Thd_ndb *thd_ndb)
 {
+#ifndef NDB_NO_GLOBAL_SCHEMA_LOCK
   if (thd_ndb->global_schema_lock_trans)
   {
     thd_ndb->global_schema_lock_trans->refresh();
-    return 1;
+    return true;
   }
-  return 0;
+  return false;
+#else
+  return true; // OK
+#endif
 }
 
 int ndbcluster_no_global_schema_lock_abort(THD *thd, const char *msg)
@@ -888,6 +893,7 @@ int ndbcluster_no_global_schema_lock_abo
   return -1;
 }
 
+#ifndef NDB_NO_GLOBAL_SCHEMA_LOCK
 #include "ha_ndbcluster_lock_ext.h"
 
 /*
@@ -897,18 +903,26 @@ int ndbcluster_no_global_schema_lock_abo
 static int ndbcluster_global_schema_lock_is_locked_or_queued= 0;
 static int ndbcluster_global_schema_lock_no_locking_allowed= 0;
 static pthread_mutex_t ndbcluster_global_schema_lock_mutex;
+#endif
 void ndbcluster_global_schema_lock_init()
 {
+#ifndef NDB_NO_GLOBAL_SCHEMA_LOCK
   pthread_mutex_init(&ndbcluster_global_schema_lock_mutex, MY_MUTEX_INIT_FAST);
+#endif
 }
 void ndbcluster_global_schema_lock_deinit()
 {
+#ifndef NDB_NO_GLOBAL_SCHEMA_LOCK
   pthread_mutex_destroy(&ndbcluster_global_schema_lock_mutex);
+#endif
 }
 
 static int ndbcluster_global_schema_lock(THD *thd, int no_lock_queue,
                                          int report_cluster_disconnected)
 {
+#ifdef NDB_NO_GLOBAL_SCHEMA_LOCK
+  return 0;
+#else
   Ndb *ndb= check_ndb_in_thd(thd);
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   NdbError ndb_error;
@@ -1012,9 +1026,13 @@ static int ndbcluster_global_schema_lock
   }
   thd_ndb->global_schema_lock_error= ndb_error.code ? ndb_error.code : -1;
   DBUG_RETURN(-1);
+#endif
 }
 static int ndbcluster_global_schema_unlock(THD *thd)
 {
+#ifdef NDB_NO_GLOBAL_SCHEMA_LOCK
+  return 0;
+#else
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   DBUG_ASSERT(thd_ndb != 0);
   if (thd_ndb == 0 || (thd_ndb->options & TNO_NO_LOCK_SCHEMA_OP))
@@ -1063,6 +1081,7 @@ static int ndbcluster_global_schema_unlo
     }
   }
   DBUG_RETURN(0);
+#endif
 }
 
 static int ndbcluster_binlog_func(handlerton *hton, THD *thd, 
@@ -1088,12 +1107,14 @@ static int ndbcluster_binlog_func(handle
   case BFN_BINLOG_PURGE_FILE:
     res= ndbcluster_binlog_index_purge_file(thd, (const char *)arg);
     break;
+#ifndef NDB_NO_GLOBAL_SCHEMA_LOCK
   case BFN_GLOBAL_SCHEMA_LOCK:
     res= ndbcluster_global_schema_lock(thd, *(int*)arg, 1);
     break;
   case BFN_GLOBAL_SCHEMA_UNLOCK:
     res= ndbcluster_global_schema_unlock(thd);
     break;
+#endif
   }
   DBUG_RETURN(res);
 }
@@ -6366,7 +6387,7 @@ restart_cluster_failure:
       }
     }
     else if (res > 0 ||
-             (opt_ndb_log_empty_epochs &&
+             (ndb_log_empty_epochs() &&
               gci > ndb_latest_handled_binlog_epoch))
     {
       DBUG_PRINT("info", ("pollEvents res: %d", res));
@@ -6419,7 +6440,7 @@ restart_cluster_failure:
       {
         /*
           Must be an empty epoch since the condition
-          (opt_ndb_log_empty_epochs &&
+          (ndb_log_empty_epochs() &&
            gci > ndb_latest_handled_binlog_epoch)
           must be true we write empty epoch into
           ndb_binlog_index
@@ -6653,7 +6674,7 @@ restart_cluster_failure:
 
         while (trans.good())
         {
-          if (!opt_ndb_log_empty_epochs)
+          if (!ndb_log_empty_epochs())
           {
             /*
               If 
@@ -6671,6 +6692,7 @@ restart_cluster_failure:
                 (! (opt_ndb_log_apply_status &&
                     trans_slave_row_count) ))
             {
+#ifndef NDB_NO_LOG_EMPTY_EPOCHS
               /* nothing to commit, rollback instead */
               if (int r= trans.rollback())
               {
@@ -6680,6 +6702,9 @@ restart_cluster_failure:
                 /* TODO: Further handling? */
               }
               break;
+#else
+              abort(); // Should not come here, log-empty-epochs is always on
+#endif
             }
           }
       commit_to_binlog:

=== modified file 'sql/ha_ndbcluster_binlog.h'
--- a/sql/ha_ndbcluster_binlog.h	2010-07-02 11:01:48 +0000
+++ b/sql/ha_ndbcluster_binlog.h	2010-11-10 10:09:55 +0000
@@ -329,7 +329,7 @@ set_thd_ndb(THD *thd, Thd_ndb *thd_ndb)
 
 Ndb* check_ndb_in_thd(THD* thd, bool validate_ndb= false);
 
-int ndbcluster_has_global_schema_lock(Thd_ndb *thd_ndb);
+bool ndbcluster_has_global_schema_lock(Thd_ndb *thd_ndb);
 int ndbcluster_no_global_schema_lock_abort(THD *thd, const char *msg);
 
 class Ndbcluster_global_schema_lock_guard

=== modified file 'sql/ha_ndbcluster_cond.cc'
--- a/sql/ha_ndbcluster_cond.cc	2010-11-10 08:01:00 +0000
+++ b/sql/ha_ndbcluster_cond.cc	2010-11-15 10:26:48 +0000
@@ -258,7 +258,7 @@ void ndb_serialize_cond(const Item *item
         {
           Item_field *field_item= (Item_field *) item;
           Field *field= field_item->field;
-          enum_field_types type= field->type();
+          enum_field_types type= field->real_type();
           /*
             Check that the field is part of the table of the handler
             instance and that we expect a field with of this result type.
@@ -270,18 +270,19 @@ void ndb_serialize_cond(const Item *item
             DBUG_PRINT("info", ("table %s", tab->getName()));
             DBUG_PRINT("info", ("column %s", field->field_name));
             DBUG_PRINT("info", ("column length %u", field->field_length));
-            DBUG_PRINT("info", ("type %d", field->type()));
+            DBUG_PRINT("info", ("type %d", field->real_type()));
             DBUG_PRINT("info", ("result type %d", field->result_type()));
 
             // Check that we are expecting a field and with the correct
             // result type and of length that can store the item value
             if (context->expecting(Item::FIELD_ITEM) &&
-                context->expecting_field_type(field->type()) &&
+                context->expecting_field_type(field->real_type()) &&
                 context->expecting_max_length(field->field_length) &&
                 (context->expecting_field_result(field->result_type()) ||
                  // Date and year can be written as string or int
                  ((type == MYSQL_TYPE_TIME ||
                    type == MYSQL_TYPE_DATE || 
+                   type == MYSQL_TYPE_NEWDATE || 
                    type == MYSQL_TYPE_YEAR ||
                    type == MYSQL_TYPE_DATETIME)
                   ? (context->expecting_field_result(STRING_RESULT) ||
@@ -305,6 +306,7 @@ void ndb_serialize_cond(const Item *item
                 // We have not seen second argument yet
                 if (type == MYSQL_TYPE_TIME ||
                     type == MYSQL_TYPE_DATE || 
+                    type == MYSQL_TYPE_NEWDATE || 
                     type == MYSQL_TYPE_YEAR ||
                     type == MYSQL_TYPE_DATETIME)
                 {
@@ -348,6 +350,7 @@ void ndb_serialize_cond(const Item *item
                     !context->expecting_collation(item->collation.collation)
                     && type != MYSQL_TYPE_TIME
                     && type != MYSQL_TYPE_DATE
+                    && type != MYSQL_TYPE_NEWDATE
                     && type != MYSQL_TYPE_YEAR
                     && type != MYSQL_TYPE_DATETIME)
                 {
@@ -1449,44 +1452,163 @@ ha_ndbcluster_cond::generate_scan_filter
 }
 
 
+/*
+  Optimizer sometimes does hash index lookup of a key where some
+  key parts are null.  The set of cases where this happens makes
+  no sense but cannot be ignored since optimizer may expect the result
+  to be filtered accordingly.  The scan is actually on the table and
+  the index bounds are pushed down.
+*/
 int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbInterpretedCode* code,
                                                       NdbScanOperation::ScanOptions* options,
                                                       const KEY* key_info, 
-                                                      const uchar *key, 
-                                                      uint key_len,
+                                                      const key_range *start_key,
+                                                      const key_range *end_key,
                                                       uchar *buf)
 {
-  KEY_PART_INFO* key_part= key_info->key_part;
-  KEY_PART_INFO* end= key_part+key_info->key_parts;
-  NdbScanFilter filter(code);
-  int res;
   DBUG_ENTER("generate_scan_filter_from_key");
 
+#ifndef DBUG_OFF
+  {
+    DBUG_PRINT("info", ("key parts:%u length:%u",
+                        key_info->key_parts, key_info->key_length));
+    const key_range* keylist[2]={ start_key, end_key };
+    for (uint j=0; j <= 1; j++)
+    {
+      char buf[8192];
+      const key_range* key=keylist[j];
+      if (key == 0)
+      {
+        sprintf(buf, "key range %u: none", j);
+      }
+      else
+      {
+        sprintf(buf, "key range %u: flag:%u part", j, key->flag);
+        const KEY_PART_INFO* key_part=key_info->key_part;
+        const uchar* ptr=key->key;
+        for (uint i=0; i < key_info->key_parts; i++)
+        {
+          sprintf(buf+strlen(buf), " %u:", i);
+          for (uint k=0; k < key_part->store_length; k++)
+          {
+            sprintf(buf+strlen(buf), " %02x", ptr[k]);
+          }
+          ptr+=key_part->store_length;
+          if (ptr - key->key >= key->length)
+          {
+            /*
+              key_range has no count of parts so must test byte length.
+              But this is not the place for following assert.
+            */
+            // DBUG_ASSERT(ptr - key->key == key->length);
+            break;
+          }
+          key_part++;
+        }
+      }
+      DBUG_PRINT("info", ("%s", buf));
+    }
+  }
+#endif
+
+  NdbScanFilter filter(code);
+  int res;
   filter.begin(NdbScanFilter::AND);
-  for (; key_part != end; key_part++) 
+  do
   {
-    Field* field= key_part->field;
-    uint32 pack_len= field->pack_length();
-    const uchar* ptr= key;
-    DBUG_PRINT("info", ("Filtering value for %s", field->field_name));
-    DBUG_DUMP("key", ptr, pack_len);
-    if (key_part->null_bit)
+    /*
+      Case "x is not null".
+      Seen with index(x) where it becomes range "null < x".
+      Not seen with index(x,y) for any combination of bounds
+      which include "is not null".
+    */
+    if (start_key != 0 &&
+        start_key->flag == HA_READ_AFTER_KEY &&
+        end_key == 0 &&
+        key_info->key_parts == 1)
     {
-      DBUG_PRINT("info", ("Generating ISNULL filter"));
-      if (filter.isnull(key_part->fieldnr-1) == -1)
-	DBUG_RETURN(1);
+      const KEY_PART_INFO* key_part=key_info->key_part;
+      if (key_part->null_bit != 0) // nullable (must be)
+      {
+        const Field* field=key_part->field;
+        const uchar* ptr= start_key->key;
+        if (ptr[0] != 0) // null (in "null < x")
+        {
+          DBUG_PRINT("info", ("Generating ISNOTNULL filter for nullable %s",
+                              field->field_name));
+          if (filter.isnotnull(key_part->fieldnr-1) == -1)
+            DBUG_RETURN(1);
+          break;
+        }
+      }
     }
-    else
+
+    /*
+      Case "x is null" in an EQ range.
+      Seen with index(x) for "x is null".
+      Seen with index(x,y) for "x is null and y = 1".
+      Not seen with index(x,y) for "x is null and y is null".
+      Seen only when all key parts are present (but there is
+      no reason to limit the code to this case).
+    */
+    if (start_key != 0 &&
+        start_key->flag == HA_READ_KEY_EXACT &&
+        end_key != 0 &&
+        end_key->flag == HA_READ_AFTER_KEY &&
+        start_key->length == end_key->length &&
+        memcmp(start_key->key, end_key->key, start_key->length) == 0)
     {
-      DBUG_PRINT("info", ("Generating EQ filter"));
-      if (filter.cmp(NdbScanFilter::COND_EQ, 
-		     key_part->fieldnr-1,
-		     ptr,
-		     pack_len) == -1)
-	DBUG_RETURN(1);
+      const KEY_PART_INFO* key_part=key_info->key_part;
+      const uchar* ptr=start_key->key;
+      for (uint i=0; i < key_info->key_parts; i++)
+      {
+        const Field* field=key_part->field;
+        if (key_part->null_bit) // nullable
+        {
+          if (ptr[0] != 0) // null
+          {
+            DBUG_PRINT("info", ("Generating ISNULL filter for nullable %s",
+                                field->field_name));
+            if (filter.isnull(key_part->fieldnr-1) == -1)
+              DBUG_RETURN(1);
+          }
+          else
+          {
+            DBUG_PRINT("info", ("Generating EQ filter for nullable %s",
+                                field->field_name));
+            if (filter.cmp(NdbScanFilter::COND_EQ, 
+                           key_part->fieldnr-1,
+                           ptr + 1, // skip null-indicator byte
+                           field->pack_length()) == -1)
+              DBUG_RETURN(1);
+          }
+        }
+        else
+        {
+          DBUG_PRINT("info", ("Generating EQ filter for non-nullable %s",
+                              field->field_name));
+          if (filter.cmp(NdbScanFilter::COND_EQ, 
+                         key_part->fieldnr-1,
+                         ptr,
+                         field->pack_length()) == -1)
+            DBUG_RETURN(1);
+        }
+        ptr+=key_part->store_length;
+        if (ptr - start_key->key >= start_key->length)
+        {
+          break;
+        }
+        key_part++;
+      }
+      break;
     }
-    key += key_part->store_length;
-  }      
+
+    DBUG_PRINT("info", ("Unknown hash index scan"));
+    // enable to catch new cases when optimizer changes
+    // DBUG_ASSERT(false);
+  }
+  while (0);
+
   // Add any pushed condition
   if (m_cond_stack &&
       (res= generate_scan_filter_from_cond(filter)))

=== modified file 'sql/ha_ndbcluster_cond.h'
--- a/sql/ha_ndbcluster_cond.h	2010-11-09 09:29:29 +0000
+++ b/sql/ha_ndbcluster_cond.h	2010-11-15 09:23:10 +0000
@@ -143,7 +143,7 @@ public:
   Ndb_item(Field *field, int column_no) : type(NDB_FIELD)
   {
     NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE();
-    qualification.field_type= field->type();
+    qualification.field_type= field->real_type();
     field_value->field= field;
     field_value->column_no= column_no;
     value.field_value= field_value;
@@ -661,8 +661,8 @@ public:
   int generate_scan_filter_from_key(NdbInterpretedCode* code,
                                     NdbScanOperation::ScanOptions* options,
                                     const KEY* key_info, 
-                                    const uchar *key, 
-                                    uint key_len,
+                                    const key_range *start_key,
+                                    const key_range *end_key,
                                     uchar *buf);
 private:
   bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond,

=== modified file 'sql/ha_ndbcluster_glue.h'
--- a/sql/ha_ndbcluster_glue.h	2010-11-09 13:59:52 +0000
+++ b/sql/ha_ndbcluster_glue.h	2010-11-10 11:37:55 +0000
@@ -97,8 +97,16 @@ enum column_format_type {
 /* thd->binlog_query has new parameter "direct" */
 #define NDB_THD_BINLOG_QUERY_HAS_DIRECT
 
-#endif
+/* Global schema lock not available */
+#define NDB_NO_GLOBAL_SCHEMA_LOCK
+
+/* Missing functionality in injector -> no --ndb-log-empty-epochs */
+#define NDB_NO_LOG_EMPTY_EPOCHS
 
+/* No support for --ndb-wait_setup */
+#define NDB_NO_WAIT_SETUP
+
+#endif
 
 /* extract the bitmask of options from THD */
 static inline

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2010-04-15 12:55:31 +0000
+++ b/sql/ha_ndbinfo.cc	2010-11-10 14:17:13 +0000
@@ -16,7 +16,8 @@
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
 */
 
-#include "mysql_priv.h"
+#include "ha_ndbcluster_glue.h"
+
 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
 #include "ha_ndbinfo.h"
 #include "../storage/ndb/src/ndbapi/NdbInfo.hpp"

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2010-10-29 20:45:08 +0000
+++ b/sql/mysqld.cc	2010-11-15 10:26:48 +0000
@@ -489,8 +489,8 @@ handlerton *myisam_hton;
 handlerton *partition_hton;
 
 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
-ulong opt_ndb_wait_setup;
-int(*ndb_wait_setup_func)(ulong)= 0;
+extern ulong opt_ndb_wait_setup;
+extern int(*ndb_wait_setup_func)(ulong);
 #endif
 #ifndef MCP_BUG52305
 uint opt_server_id_bits= 0;

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2010-11-10 08:01:00 +0000
+++ b/storage/ndb/CMakeLists.txt	2010-11-15 10:26:48 +0000
@@ -25,13 +25,6 @@ IF(SOURCE_SUBLIBS)
 ELSE()
   INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/ndb_configure.cmake)
 
-  IF(NOT DEFINED WITH_ZLIB)
-    # Hardcode use of the bundled zlib
-    MESSAGE(STATUS "using bundled zlib (hardcoded)")
-    SET(ZLIB_LIBRARY zlib)
-    SET(ZLIB_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/zlib)
-  ENDIF()
-
   INCLUDE_DIRECTORIES(
     # MySQL Server includes
     ${CMAKE_SOURCE_DIR}/include
@@ -50,7 +43,8 @@ ELSE()
     ${CMAKE_CURRENT_BINARY_DIR}/include
 
     # Util library includes
-    ${ZLIB_INCLUDE_DIR})
+    ${ZLIB_INCLUDE_DIR}
+    ${READLINE_INCLUDE_DIR})
 
   # The root of storage/ndb/
   SET(NDB_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
@@ -132,5 +126,5 @@ IF(EXISTS ${CMAKE_SOURCE_DIR}/storage/my
 ELSE()
   # New plugin support, cross-platform
   MYSQL_ADD_PLUGIN(ndbcluster ${NDBCLUSTER_SOURCES} STORAGE_ENGINE
-    DEFAULT MODULE_OUTPUT_NAME ha_ndb)
+    DEFAULT MODULE_OUTPUT_NAME ha_ndb LINK_LIBRARIES ndbclient)
 ENDIF()

=== added file 'storage/ndb/cmake/ndb_require_variable.cmake'
--- a/storage/ndb/cmake/ndb_require_variable.cmake	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/ndb_require_variable.cmake	2010-11-10 09:42:49 +0000
@@ -0,0 +1,27 @@
+# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+# NDB_REQUIRE_VARIABLE
+#
+# Check that the variable with given name is defined
+#
+MACRO(NDB_REQUIRE_VARIABLE variable_name)
+  MESSAGE(STATUS "Checking variable ${variable_name} required by NDB")
+  IF(NOT DEFINED "${variable_name}")
+    MESSAGE(FATAL_ERROR "The variable ${variable_name} is required "
+                         "to build NDB")
+  ENDIF()
+ENDMACRO()
\ No newline at end of file

=== modified file 'storage/ndb/include/kernel/signaldata/TcKeyReq.hpp'
--- a/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp	2010-02-03 15:14:07 +0000
+++ b/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp	2010-11-15 10:26:48 +0000
@@ -20,6 +20,7 @@
 #define TC_KEY_REQ_H
 
 #include "SignalData.hpp"
+#include <transporter/TransporterDefinitions.hpp>
 
 /**
  * @class TcKeyReq
@@ -75,7 +76,8 @@ public:
   STATIC_CONST( SignalLength = 25 );
   STATIC_CONST( MaxKeyInfo = 8 );
   STATIC_CONST( MaxAttrInfo = 5 );
-  STATIC_CONST( MaxTotalAttrInfo = 0xFFFF );
+  STATIC_CONST( MaxTotalAttrInfo = ((MAX_SEND_MESSAGE_BYTESIZE / 4) - 
+                                    SignalLength ));
 
   /**
    * Long signal variant of TCKEYREQ

=== modified file 'storage/ndb/include/ndb_global.h'
--- a/storage/ndb/include/ndb_global.h	2010-10-28 07:45:21 +0000
+++ b/storage/ndb/include/ndb_global.h	2010-11-11 13:19:26 +0000
@@ -46,7 +46,6 @@
 #define NDB_WIN 1
 #define PATH_MAX 256
 #define DIR_SEPARATOR "\\"
-#define MYSQLCLUSTERDIR "c:\\mysql\\mysql-cluster"
 #define HAVE_STRCASECMP
 #pragma warning(disable: 4503 4786)
 #else

=== modified file 'storage/ndb/include/portlib/ndb_prefetch.h'
--- a/storage/ndb/include/portlib/ndb_prefetch.h	2010-11-09 10:45:29 +0000
+++ b/storage/ndb/include/portlib/ndb_prefetch.h	2010-11-11 15:03:47 +0000
@@ -20,6 +20,14 @@
 
 #ifdef HAVE_SUN_PREFETCH_H
 #include <sun_prefetch.h>
+#if (defined(__SUNPRO_C) && __SUNPRO_C >= 0x590) \
+    || (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x590)
+/* Universal sun_prefetch* macros available with Sun Studio 5.9 */
+#define USE_SUN_PREFETCH
+#elif defined(__sparc)
+/* Use sparc_prefetch* macros with older Sun Studio on sparc */
+#define USE_SPARC_PREFETCH
+#endif
 #endif
 
 #ifdef HAVE_SUN_PREFETCH_H
@@ -31,7 +39,9 @@ void NDB_PREFETCH_READ(void* addr)
 {
 #if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR > 10)
   __builtin_prefetch(addr, 0, 3);
-#elif defined(HAVE_SUN_PREFETCH_H)
+#elif defined(USE_SUN_PREFETCH)
+  sun_prefetch_read_once(addr);
+#elif defined(USE_SPARC_PREFETCH)
   sparc_prefetch_read_once(addr);
 #else
   (void)addr;
@@ -43,8 +53,10 @@ void NDB_PREFETCH_WRITE(void* addr)
 {
 #if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR > 10)
   __builtin_prefetch(addr, 1, 3);
-#elif defined(HAVE_SUN_PREFETCH_H)
-  sparc_prefetch_write_once(addr);
+#elif defined(USE_SUN_PREFETCH)
+  sun_prefetch_write_once(addr);
+#elif defined(USE_SPARC_PREFETCH)
+  sun_prefetch_write_once(addr);
 #else
   (void)addr;
 #endif

=== modified file 'storage/ndb/ndb_configure.cmake'
--- a/storage/ndb/ndb_configure.cmake	2010-11-09 10:45:29 +0000
+++ b/storage/ndb/ndb_configure.cmake	2010-11-10 09:42:49 +0000
@@ -22,6 +22,7 @@ INCLUDE(CheckFunctionExists)
 INCLUDE(CheckIncludeFiles)
 INCLUDE(CheckCSourceCompiles)
 INCLUDE(CheckCXXSourceRuns)
+INCLUDE(ndb_require_variable)
 
 CHECK_FUNCTION_EXISTS(posix_memalign HAVE_POSIX_MEMALIGN)
 CHECK_FUNCTION_EXISTS(clock_gettime HAVE_CLOCK_GETTIME)
@@ -136,3 +137,13 @@ CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DI
 # generated ndb_config.h
 ADD_DEFINITIONS(-DHAVE_NDB_CONFIG_H)
 
+# check zlib
+IF(NOT DEFINED WITH_ZLIB)
+  # Hardcode use of the bundled zlib if not set by MySQL
+  MESSAGE(STATUS "Using bundled zlib (hardcoded)")
+  SET(ZLIB_LIBRARY zlib)
+  SET(ZLIB_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/zlib)
+ENDIF()
+NDB_REQUIRE_VARIABLE(ZLIB_LIBRARY)
+NDB_REQUIRE_VARIABLE(ZLIB_INCLUDE_DIR)
+

=== modified file 'storage/ndb/src/common/debugger/CMakeLists.txt'
--- a/storage/ndb/src/common/debugger/CMakeLists.txt	2009-06-03 15:38:04 +0000
+++ b/storage/ndb/src/common/debugger/CMakeLists.txt	2010-11-11 15:08:52 +0000
@@ -24,4 +24,4 @@ ADD_LIBRARY(ndbtrace STATIC
             BlockNames.cpp
             EventLogger.cpp)
 
-TARGET_LINK_LIBRARIES(ndbtrace ndbgeneral)
+TARGET_LINK_LIBRARIES(ndbtrace ndblogger ndberror ndbgeneral)

=== modified file 'storage/ndb/src/common/logger/CMakeLists.txt'
--- a/storage/ndb/src/common/logger/CMakeLists.txt	2010-10-19 15:03:27 +0000
+++ b/storage/ndb/src/common/logger/CMakeLists.txt	2010-11-11 15:08:52 +0000
@@ -24,3 +24,4 @@ ADD_LIBRARY(ndblogger STATIC
 	    SysLogHandler.cpp
             EventLogHandler.cpp
             FileLogHandler.cpp)
+TARGET_LINK_LIBRARIES(ndblogger ndbgeneral)

=== modified file 'storage/ndb/src/common/util/CMakeLists.txt'
--- a/storage/ndb/src/common/util/CMakeLists.txt	2010-10-29 08:40:49 +0000
+++ b/storage/ndb/src/common/util/CMakeLists.txt	2010-11-11 15:08:52 +0000
@@ -55,7 +55,7 @@ ADD_LIBRARY(ndbgeneral STATIC
 	    SparseBitmask.cpp
             require.c
 )
-TARGET_LINK_LIBRARIES(ndbgeneral ${ZLIB_LIBRARY} mysys)
+TARGET_LINK_LIBRARIES(ndbgeneral ndbtrace ${ZLIB_LIBRARY} mysys)
 
 ADD_EXECUTABLE(BaseString-t BaseString.cpp)
 SET_TARGET_PROPERTIES(BaseString-t

=== modified file 'storage/ndb/src/common/util/SparseBitmask.cpp'
--- a/storage/ndb/src/common/util/SparseBitmask.cpp	2010-08-28 09:37:09 +0000
+++ b/storage/ndb/src/common/util/SparseBitmask.cpp	2010-11-11 09:46:05 +0000
@@ -90,6 +90,7 @@ TAPTEST(SparseBitmask)
   return 1; // OK
 }
 
+#endif
+
 template class Vector<unsigned>;
 
-#endif

=== modified file 'storage/ndb/src/kernel/CMakeLists.txt'
--- a/storage/ndb/src/kernel/CMakeLists.txt	2010-11-10 08:01:00 +0000
+++ b/storage/ndb/src/kernel/CMakeLists.txt	2010-11-15 10:26:48 +0000
@@ -55,12 +55,12 @@ ENDIF()
 
 ADD_EXECUTABLE(ndbd
   main.cpp ndbd.cpp angel.cpp SimBlockList.cpp ${NDBD_EXTRA_SRC})
-TARGET_LINK_LIBRARIES(ndbd ${NDBD_LIBS} ndbsched)
+TARGET_LINK_LIBRARIES(ndbd ${NDBD_LIBS} ndbsched ${LIBDL})
 INSTALL(TARGETS ndbd DESTINATION libexec)
 
 IF(NDB_BUILD_NDBMTD)
   ADD_EXECUTABLE(ndbmtd
     main.cpp ndbd.cpp angel.cpp SimBlockList.cpp ${NDBD_EXTRA_SRC})
-  TARGET_LINK_LIBRARIES(ndbmtd ${NDBD_LIBS} ndbsched_mt)
+  TARGET_LINK_LIBRARIES(ndbmtd ${NDBD_LIBS} ndbsched_mt ${LIBDL})
   INSTALL(TARGETS ndbmtd DESTINATION libexec)
 ENDIF()

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2010-10-15 14:42:00 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2010-11-15 10:26:48 +0000
@@ -1246,16 +1246,10 @@ void Cmvmi::execTAMPER_ORD(Signal* signa
   {
     jam();
     signal->theData[0] = 0;
-    sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
-    sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+    for (Uint32 i = 0; blocks[i] != 0; i++)
+    {
+      sendSignal(blocks[i], GSN_NDB_TAMPER, signal, 1, JBB);
+    }
     return;
   }
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2010-10-29 20:51:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2010-11-15 10:26:48 +0000
@@ -9815,8 +9815,7 @@ bool Dbtc::testFragmentDrop(Signal* sign
     signal->header.theLength = newLen + 3;
     signal->header.m_noOfSections = 0;
 
-    EXECUTE_DIRECT(DBTC, GSN_SIGNAL_DROPPED_REP, signal,
-                   newLen + 3);
+    executeFunction(GSN_SIGNAL_DROPPED_REP, signal);
     return true;
   }
   return false;

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2010-11-04 09:41:08 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2010-11-15 10:26:48 +0000
@@ -2322,9 +2322,12 @@ SimulatedBlock::assembleFragments(Signal
       signal->header.m_fragmentInfo = 3;
 
 
+      /**
+       * NOTE: Don't use EXECUTE_DIRECT as it 
+       *       sets sendersBlockRef to reference()
+       */
       /* Perform dropped signal handling, in this thread, now */
-      EXECUTE_DIRECT(theNumber, GSN_SIGNAL_DROPPED_REP, 
-                     signal, signal->header.theLength);
+      executeFunction(GSN_SIGNAL_DROPPED_REP, signal);
       
       /* return false to caller - they should not process the signal */
       return false;

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2010-11-10 08:01:00 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2010-11-15 10:26:48 +0000
@@ -1276,17 +1276,6 @@ SimulatedBlock::EXECUTE_DIRECT(Uint32 bl
   m_currentGsn = tGsn;
   subTime(tGsn, diff);
 #endif
-#ifdef VM_TRACE
-  if(globalData.testOn){
-    signal->header.theVerId_signalNumber = gsn;
-    signal->header.theReceiversBlockNumber = numberToBlock(block, instanceNo);
-    signal->header.theSendersBlockRef = reference();
-    globalSignalLoggers.executeDirect(signal->header,
-				      1,        // out
-				      &signal->theData[0],
-                                      globalData.ownId);
-  }
-#endif
 }
 
 /**

=== modified file 'storage/ndb/src/mgmapi/CMakeLists.txt'
--- a/storage/ndb/src/mgmapi/CMakeLists.txt	2010-10-19 12:19:31 +0000
+++ b/storage/ndb/src/mgmapi/CMakeLists.txt	2010-11-11 15:08:52 +0000
@@ -27,4 +27,4 @@ ADD_LIBRARY(ndbmgmapi STATIC
             mgmapi_configuration.cpp
             LocalConfig.cpp
             ${CMAKE_SOURCE_DIR}/storage/ndb/src/kernel/error/ndbd_exit_codes.c)
-TARGET_LINK_LIBRARIES(ndbmgmapi ndbconf)
+TARGET_LINK_LIBRARIES(ndbmgmapi ndbconf ndbportlib)

=== modified file 'storage/ndb/src/mgmapi/Makefile.am'
--- a/storage/ndb/src/mgmapi/Makefile.am	2010-08-06 08:19:19 +0000
+++ b/storage/ndb/src/mgmapi/Makefile.am	2010-11-11 13:19:26 +0000
@@ -14,8 +14,6 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
 
-MYSQLCLUSTERdir=        .
-
 EXTRA_DIST = CMakeLists.txt
 
 noinst_LTLIBRARIES = libmgmapi.la
@@ -25,8 +23,7 @@ libmgmapi_la_SOURCES = mgmapi.cpp ndb_lo
 
 INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi
 
-DEFS_LOC = -DNDB_MGMAPI -DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\"" \
-           -DNO_DEBUG_MESSAGES
+DEFS_LOC = -DNDB_MGMAPI
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_util.mk.am

=== modified file 'storage/ndb/src/mgmclient/CMakeLists.txt'
--- a/storage/ndb/src/mgmclient/CMakeLists.txt	2010-11-09 16:03:56 +0000
+++ b/storage/ndb/src/mgmclient/CMakeLists.txt	2010-11-10 09:42:49 +0000
@@ -13,8 +13,7 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
-INCLUDE_DIRECTORIES(${NDB_SOURCE_DIR}/src/common/mgmcommon
-		    ${READLINE_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${NDB_SOURCE_DIR}/src/common/mgmcommon)
 
 ADD_LIBRARY(ndbmgmclient STATIC
             CommandInterpreter.cpp)

=== modified file 'storage/ndb/src/mgmclient/CommandInterpreter.cpp'
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2010-10-13 17:17:01 +0000
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp	2010-11-11 09:46:05 +0000
@@ -3183,3 +3183,4 @@ err:
 }
 
 template class Vector<char const*>;
+template class Vector<int>;

=== modified file 'storage/ndb/src/mgmsrv/CMakeLists.txt'
--- a/storage/ndb/src/mgmsrv/CMakeLists.txt	2010-11-09 16:17:44 +0000
+++ b/storage/ndb/src/mgmsrv/CMakeLists.txt	2010-11-11 15:08:52 +0000
@@ -16,14 +16,30 @@
 INCLUDE_DIRECTORIES(
   ${NDB_SOURCE_DIR}/src/mgmapi
   ${NDB_SOURCE_DIR}/src/ndbapi
-  ${NDB_SOURCE_DIR}/src/mgmclient
-  ${READLINE_INCLUDE_DIR})
+  ${NDB_SOURCE_DIR}/src/mgmclient)
 
 ADD_LIBRARY(ndbconf
                Config.cpp
                ConfigInfo.cpp
                InitConfigFileParser.cpp
 )
+TARGET_LINK_LIBRARIES(ndbconf ndbmgmapi)
+
+# Define MYSQLCLUSTERDIR, the default location
+# of ndb_mgmd config files
+IF(NOT DEFINED DEFAULT_MYSQL_HOME)
+  # MySQL does not define DEFAULT_MYSQL_HOME in pre 5.5 version, fall
+  # back to our old defaults and be backwards compatible
+  IF(WIN32)
+    SET(DEFAULT_MYSQL_HOME "C:/mysql" )
+  ELSE()
+    SET(DEFAULT_MYSQL_HOME ${CMAKE_INSTALL_PREFIX})
+  ENDIF()
+ENDIF()
+SET(clusterdir ${DEFAULT_MYSQL_HOME}/mysql-cluster)
+ADD_DEFINITIONS(-DMYSQLCLUSTERDIR=\"${clusterdir}\")
+MESSAGE(STATUS "Using MYSQLCLUSTERDIR: '${clusterdir}'")
+
 
 IF(WIN32)
   # Add the rseource files for logging to event log

=== modified file 'storage/ndb/src/mgmsrv/Makefile.am'
--- a/storage/ndb/src/mgmsrv/Makefile.am	2010-08-06 08:19:19 +0000
+++ b/storage/ndb/src/mgmsrv/Makefile.am	2010-11-11 13:19:26 +0000
@@ -16,11 +16,6 @@
 
 EXTRA_DIST = CMakeLists.txt
 
-MYSQLDATAdir =		$(localstatedir)
-MYSQLSHAREdir =		$(pkgdatadir)
-MYSQLBASEdir=		$(prefix)
-MYSQLCLUSTERdir=        $(prefix)/mysql-cluster
-
 ndbbin_PROGRAMS = ndb_mgmd
 
 ndb_mgmd_SOURCES = \
@@ -55,10 +50,9 @@ LDADD_LOC = $(top_builddir)/storage/ndb/
             @NDB_SCI_LIBS@ \
 	    @TERMCAP_LIB@
 
-DEFS_LOC =		-DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
-			-DMYSQL_DATADIR="\"$(MYSQLDATAdir)\"" \
-			-DSHAREDIR="\"$(MYSQLSHAREdir)\"" \
-			-DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\""
+# Define MYSQLCLUSTERDIR, the default location
+# of ndb_mgmd config files
+DEFS_LOC = -DMYSQLCLUSTERDIR="\"$(prefix)/mysql-cluster\""
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am

=== modified file 'storage/ndb/src/mgmsrv/testConfig.cpp'
--- a/storage/ndb/src/mgmsrv/testConfig.cpp	2010-08-27 12:12:51 +0000
+++ b/storage/ndb/src/mgmsrv/testConfig.cpp	2010-11-11 09:46:05 +0000
@@ -471,3 +471,6 @@ TAPTEST(MgmConfig)
   ndb_end(0);
   return 1; // OK
 }
+
+template class Vector<const char*>;
+

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2010-11-10 08:01:00 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2010-11-15 10:26:48 +0000
@@ -989,8 +989,7 @@ NdbTransaction::doSend()
           query == lastLookupQuery && theFirstExecOpInList == NULL;
         const int tReturnCode = query->doSend(theDBnode, lastFlag);
         if (tReturnCode == -1) {
-          theReturnStatus = ReturnFailure;
-          break;
+          goto fail;
         }
         last = query;
         query = query->getNext();
@@ -1008,9 +1007,8 @@ NdbTransaction::doSend()
       const Uint32 lastFlag = ((tNext == NULL) ? 1 : 0);
       const int tReturnCode = tOp->doSend(theDBnode, lastFlag);
       if (tReturnCode == -1) {
-        theReturnStatus = ReturnFailure;
-        break;
-      }
+        goto fail;
+      }//if
       tOp = tNext;
     }
 
@@ -1052,10 +1050,12 @@ NdbTransaction::doSend()
     abort();
     break;
   }//switch
-  setOperationErrorCodeAbort(4002);
+
   theReleaseOnClose = true;
   theTransactionIsStarted = false;
   theCommitStatus = Aborted;
+fail:
+  setOperationErrorCodeAbort(4002);
   DBUG_RETURN(-1);
 }//NdbTransaction::doSend()
 

=== modified file 'storage/ndb/test/ndbapi/testDict.cpp'
--- a/storage/ndb/test/ndbapi/testDict.cpp	2010-09-30 14:27:18 +0000
+++ b/storage/ndb/test/ndbapi/testDict.cpp	2010-11-10 10:14:04 +0000
@@ -1875,7 +1875,15 @@ int getColumnMaxLength(const NdbDictiona
       return 0;
     }
 
-    length= ((1 << attrDesc.AttributeSize) * c->getLength()) >> 3;
+    if (attrDesc.AttributeSize == 0)
+    {
+      // bits...
+      length = 4 * ((c->getLength() + 31) / 32);
+    }
+    else
+    {
+      length = ((1 << attrDesc.AttributeSize) * c->getLength()) >> 3;
+    }
   }
 
   return length;
@@ -1995,6 +2003,8 @@ int runFailAddFragment(NDBT_Context* ctx
       NdbSleep_MilliSleep(SAFTY); // Hope that snapshot has arrived
       CHECK2(pDic->createTable(tab) != 0,
              "failed to fail after error insert " << errval);
+      CHECK2(restarter.insertErrorInNode(nodeId, 0) == 0,
+             "failed to clean error insert value");
       CHECK(restarter.dumpStateAllNodes(&dump2, 1) == 0);
       NdbSleep_MilliSleep(SAFTY); // Hope that snapshot has arrived
       CHECK2(pDic->createTable(tab) == 0,
@@ -2014,6 +2024,8 @@ int runFailAddFragment(NDBT_Context* ctx
       NdbSleep_MilliSleep(SAFTY); // Hope that snapshot has arrived
       CHECK2(pDic->createTable(tab) != 0,
              "failed to fail after error insert " << errval);
+      CHECK2(restarter.insertErrorInNode(nodeId, 0) == 0,
+             "failed to clean error insert value");
       CHECK(restarter.dumpStateAllNodes(&dump2, 1) == 0);
       NdbSleep_MilliSleep(SAFTY); // Hope that snapshot has arrived
       CHECK2(pDic->createTable(tab) == 0,
@@ -2037,6 +2049,8 @@ int runFailAddFragment(NDBT_Context* ctx
 
       CHECK2(pDic->createIndex(idx) != 0,
              "failed to fail after error insert " << errval);
+      CHECK2(restarter.insertErrorInNode(nodeId, 0) == 0,
+             "failed to clean error insert value");
       CHECK(restarter.dumpStateAllNodes(&dump2, 1) == 0);
       NdbSleep_MilliSleep(SAFTY); // Hope that snapshot has arrived
       CHECK2(pDic->createIndex(idx) == 0,
@@ -7098,6 +7112,8 @@ loop:
       NdbDictionary::HashMap check;
       CHECK2(res != 0, "create hashmap existed");
       
+      CHECK2(restarter.insertErrorInNode(nodeId, 0) == 0,
+             "failed to clear error insert");
       CHECK(restarter.dumpStateAllNodes(&dump2, 1) == 0);
     }
   }

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2010-11-05 10:26:02 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2010-11-10 12:28:34 +0000
@@ -351,6 +351,7 @@ int runTestGetValue(NDBT_Context* ctx, N
       case 880: // TUP - Read too much
       case 823: // TUP - Too much AI
       case 4257: // NDBAPI - Too much AI
+      case 4002: // NDBAPI - send problem
 	// OK errors
 	ERR(pCon->getNdbError());
 	break;

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2010-10-28 08:57:25 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2010-11-11 13:19:26 +0000
@@ -50,7 +50,6 @@ TARGET_LINK_LIBRARIES(ndb_config ndbmgmc
 SET(options "-I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmapi")
 SET(options "${options} -I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmsrv")
 SET(options "${options} -I${CMAKE_SOURCE_DIR}/storage/ndb/include/mgmcommon")
-#SET(options "${options} -DMYSQLCLUSTERDIR=\"\\\"\\\"\"")
 SET_TARGET_PROPERTIES(ndb_config PROPERTIES
                       COMPILE_FLAGS "${options}")
 

=== modified file 'storage/ndb/tools/Makefile.am'
--- a/storage/ndb/tools/Makefile.am	2010-08-06 08:19:19 +0000
+++ b/storage/ndb/tools/Makefile.am	2010-11-11 13:19:26 +0000
@@ -63,8 +63,7 @@ ndb_config_SOURCES = ndb_config.cpp \
 
 ndb_config_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/mgmapi \
                       -I$(top_srcdir)/storage/ndb/src/mgmsrv \
-                      -I$(top_srcdir)/storage/ndb/include/mgmcommon \
-                      -DMYSQLCLUSTERDIR="\"\""
+                      -I$(top_srcdir)/storage/ndb/include/mgmcommon
 
 ndb_restore_LDADD = $(top_builddir)/storage/ndb/src/common/util/libndbazio.la \
                     $(LDADD)

No bundle (reason: revision is a merge).
Thread
bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch(ole.john.aske:3364) Ole John Aske15 Nov