List:Commits« Previous MessageNext Message »
From:jonas oreland Date:March 30 2011 7:32am
Subject:bzr commit into mysql-5.1-telco-7.2 branch (jonas:4141)
View as plain text  
#At file:///home/jonas/src/telco-7.2/ based on revid:jonas@stripped

 4141 jonas oreland	2011-03-30 [merge]
      ndb - merge 71 to 720

    added:
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnType.java
    modified:
      mysql-test/r/partition.result
      mysql-test/r/subselect4.result
      mysql-test/suite/ndb/r/ndb_restore_compat_downward.result
      mysql-test/suite/ndb/r/ndb_restore_misc.result
      mysql-test/t/subselect4.test
      sql/ha_ndbcluster.cc
      sql/handler.cc
      sql/item_cmpfunc.cc
      sql/item_subselect.cc
      sql/opt_range.cc
      sql/opt_range.h
      sql/sql_base.cc
      sql/sql_select.cc
      storage/ndb/clusterj/clusterj-api/Makefile.am
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnMetadata.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainFieldHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Column.java
      storage/ndb/clusterj/clusterj-jdbc/Makefile.am
      storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Parser.g
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/DynamicObjectTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ColumnImpl.java
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
      storage/ndb/src/kernel/vm/Configuration.cpp
      storage/ndb/src/mgmsrv/ConfigInfo.cpp
      storage/ndb/src/ndbapi/ObjectMap.cpp
      storage/ndb/src/ndbapi/ObjectMap.hpp
      storage/ndb/test/ndbapi/testScan.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/tools/restore/consumer_restore.cpp
=== modified file 'mysql-test/r/partition.result'
--- a/mysql-test/r/partition.result	2010-03-22 12:30:27 +0000
+++ b/mysql-test/r/partition.result	2011-03-23 13:15:16 +0000
@@ -1563,8 +1563,8 @@ insert into t1 values (18446744073709551
 select * from t1;
 a
 18446744073709551612
-18446744073709551613
 18446744073709551614
+18446744073709551613
 18446744073709551615
 select * from t1 where a = 18446744073709551615;
 a
@@ -1573,8 +1573,8 @@ delete from t1 where a = 184467440737095
 select * from t1;
 a
 18446744073709551612
-18446744073709551613
 18446744073709551614
+18446744073709551613
 drop table t1;
 CREATE TABLE t1 (
 num int(11) NOT NULL, cs int(11) NOT NULL)

=== modified file 'mysql-test/r/subselect4.result'
--- a/mysql-test/r/subselect4.result	2010-08-05 10:42:14 +0000
+++ b/mysql-test/r/subselect4.result	2011-03-24 09:27:05 +0000
@@ -77,6 +77,92 @@ Note	1249	Select 2 was reduced during op
 CREATE VIEW v1 AS SELECT 1 LIKE ( 1 IN ( SELECT 1 ) );
 CREATE VIEW v2 AS SELECT 1 LIKE '%' ESCAPE ( 1 IN ( SELECT 1 ) );
 DROP VIEW v1, v2;
+# 
+# Bug#51070: Query with a NOT IN subquery predicate returns a wrong
+# result set
+# 
+CREATE TABLE t1 ( a INT, b INT );
+INSERT INTO t1 VALUES ( 1, NULL ), ( 2, NULL );
+CREATE TABLE t2 ( c INT, d INT );
+INSERT INTO t2 VALUES ( NULL, 3 ), ( NULL, 4 );
+CREATE TABLE t3 ( e INT, f INT );
+INSERT INTO t3 VALUES ( NULL, NULL ), ( NULL, NULL );
+CREATE TABLE t4 ( a INT );
+INSERT INTO t4 VALUES (1), (2), (3);
+CREATE TABLE t5 ( a INT );
+INSERT INTO t5 VALUES (NULL), (2);
+EXPLAIN
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+x	PRIMARY	x	x	x	x	x	x	x	x
+x	DEPENDENT SUBQUERY	x	x	x	x	x	x	x	x
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 );
+a	b
+EXPLAIN
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 ) IS NULL;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	PRIMARY	t1	ALL	NULL	NULL	NULL	NULL	2	Using where
+2	DEPENDENT SUBQUERY	t2	ALL	NULL	NULL	NULL	NULL	2	Using where
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 ) IS NULL;
+a	b
+1	NULL
+2	NULL
+SELECT * FROM t1 WHERE ( a, b ) IN ( SELECT c, d FROM t2 ) IS NULL;
+a	b
+1	NULL
+2	NULL
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 ) IS UNKNOWN;
+a	b
+1	NULL
+2	NULL
+SELECT * FROM t1 WHERE (( a, b ) NOT IN ( SELECT c, d FROM t2 )) IS UNKNOWN;
+a	b
+1	NULL
+2	NULL
+SELECT * FROM t1 WHERE 1 = 1 AND ( a, b ) NOT IN ( SELECT c, d FROM t2 );
+a	b
+EXPLAIN
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT e, f FROM t3 );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+x	PRIMARY	x	x	x	x	x	x	x	x
+x	DEPENDENT SUBQUERY	x	x	x	x	x	x	x	x
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT e, f FROM t3 );
+a	b
+EXPLAIN
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT a, b FROM t1 );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+x	PRIMARY	x	x	x	x	x	x	x	x
+x	DEPENDENT SUBQUERY	x	x	x	x	x	x	x	x
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT a, b FROM t1 );
+c	d
+EXPLAIN
+SELECT * FROM t3 WHERE ( e, f ) NOT IN ( SELECT c, d FROM t2 );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+x	PRIMARY	x	x	x	x	x	x	x	x
+x	DEPENDENT SUBQUERY	x	x	x	x	x	x	x	x
+SELECT * FROM t3 WHERE ( e, f ) NOT IN ( SELECT c, d FROM t2 );
+e	f
+EXPLAIN
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT e, f FROM t3 );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+x	PRIMARY	x	x	x	x	x	x	x	x
+x	DEPENDENT SUBQUERY	x	x	x	x	x	x	x	x
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT e, f FROM t3 );
+c	d
+SELECT * FROM t1 WHERE ( a, b ) NOT IN 
+( SELECT c, d FROM t2 WHERE c = 1 AND c <> 1 );
+a	b
+1	NULL
+2	NULL
+SELECT * FROM t1 WHERE b NOT IN ( SELECT c FROM t2 WHERE c = 1 );
+a	b
+1	NULL
+2	NULL
+SELECT * FROM t1 WHERE NULL NOT IN ( SELECT c FROM t2 WHERE c = 1 AND c <> 1 );
+a	b
+1	NULL
+2	NULL
+DROP TABLE t1, t2, t3, t4, t5;
 #
 # End of 5.1 tests.
 #

=== modified file 'mysql-test/suite/ndb/r/ndb_restore_compat_downward.result'
--- a/mysql-test/suite/ndb/r/ndb_restore_compat_downward.result	2011-02-22 01:15:42 +0000
+++ b/mysql-test/suite/ndb/r/ndb_restore_compat_downward.result	2011-03-24 10:00:24 +0000
@@ -46,7 +46,7 @@ SYSTEM_VALUES_ID	VALUE
 1	3
 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0;
 server_id	epoch	log_name	start_pos	end_pos
-0	151		0	0
+0	152		0	0
 ForceVarPart: 1
 ForceVarPart: 1
 ForceVarPart: 1
@@ -110,7 +110,7 @@ SYSTEM_VALUES_ID	VALUE
 1	5
 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0;
 server_id	epoch	log_name	start_pos	end_pos
-0	331		0	0
+0	332		0	0
 SELECT * FROM DESCRIPTION ORDER BY USERNAME;
 USERNAME	ADDRESS
 Guangbao Ni	Suite 503, 5F NCI Tower, A12 Jianguomenwai Avenue Chaoyang District, Beijing, 100022  PRC

=== modified file 'mysql-test/suite/ndb/r/ndb_restore_misc.result'
--- a/mysql-test/suite/ndb/r/ndb_restore_misc.result	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/r/ndb_restore_misc.result	2011-03-24 10:00:24 +0000
@@ -575,10 +575,10 @@ c1	c2	c3
 drop table t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t11_c;
 select epoch from mysql.ndb_apply_status where server_id=0;
 epoch
-331
+332
 select epoch from mysql.ndb_apply_status where server_id=0;
 epoch
-151
+152
 select epoch > (1 << 32) from mysql.ndb_apply_status where server_id=0;
 epoch > (1 << 32)
 1

=== modified file 'mysql-test/t/subselect4.test'
--- a/mysql-test/t/subselect4.test	2010-08-05 10:42:14 +0000
+++ b/mysql-test/t/subselect4.test	2011-03-24 09:27:05 +0000
@@ -74,6 +74,68 @@ CREATE VIEW v1 AS SELECT 1 LIKE ( 1 IN (
 CREATE VIEW v2 AS SELECT 1 LIKE '%' ESCAPE ( 1 IN ( SELECT 1 ) );
 DROP VIEW v1, v2;
 
+--echo # 
+--echo # Bug#51070: Query with a NOT IN subquery predicate returns a wrong
+--echo # result set
+--echo # 
+CREATE TABLE t1 ( a INT, b INT );
+INSERT INTO t1 VALUES ( 1, NULL ), ( 2, NULL );
+
+CREATE TABLE t2 ( c INT, d INT );
+INSERT INTO t2 VALUES ( NULL, 3 ), ( NULL, 4 );
+
+CREATE TABLE t3 ( e INT, f INT );
+INSERT INTO t3 VALUES ( NULL, NULL ), ( NULL, NULL );
+
+CREATE TABLE t4 ( a INT );
+INSERT INTO t4 VALUES (1), (2), (3);
+
+CREATE TABLE t5 ( a INT );
+INSERT INTO t5 VALUES (NULL), (2);
+
+--replace_column 1 x 3 x 4 x 5 x 6 x 7 x 8 x 9 x 10 x
+EXPLAIN
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 );
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 );
+
+EXPLAIN
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 ) IS NULL;
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 ) IS NULL;
+SELECT * FROM t1 WHERE ( a, b ) IN ( SELECT c, d FROM t2 ) IS NULL;
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT c, d FROM t2 ) IS UNKNOWN;
+SELECT * FROM t1 WHERE (( a, b ) NOT IN ( SELECT c, d FROM t2 )) IS UNKNOWN;
+
+SELECT * FROM t1 WHERE 1 = 1 AND ( a, b ) NOT IN ( SELECT c, d FROM t2 );
+
+--replace_column 1 x 3 x 4 x 5 x 6 x 7 x 8 x 9 x 10 x
+EXPLAIN
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT e, f FROM t3 );
+SELECT * FROM t1 WHERE ( a, b ) NOT IN ( SELECT e, f FROM t3 );
+
+--replace_column 1 x 3 x 4 x 5 x 6 x 7 x 8 x 9 x 10 x
+EXPLAIN
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT a, b FROM t1 );
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT a, b FROM t1 );
+
+--replace_column 1 x 3 x 4 x 5 x 6 x 7 x 8 x 9 x 10 x
+EXPLAIN
+SELECT * FROM t3 WHERE ( e, f ) NOT IN ( SELECT c, d FROM t2 );
+SELECT * FROM t3 WHERE ( e, f ) NOT IN ( SELECT c, d FROM t2 );
+
+--replace_column 1 x 3 x 4 x 5 x 6 x 7 x 8 x 9 x 10 x
+EXPLAIN
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT e, f FROM t3 );
+SELECT * FROM t2 WHERE ( c, d ) NOT IN ( SELECT e, f FROM t3 );
+
+SELECT * FROM t1 WHERE ( a, b ) NOT IN 
+  ( SELECT c, d FROM t2 WHERE c = 1 AND c <> 1 );
+
+SELECT * FROM t1 WHERE b NOT IN ( SELECT c FROM t2 WHERE c = 1 );
+
+SELECT * FROM t1 WHERE NULL NOT IN ( SELECT c FROM t2 WHERE c = 1 AND c <> 1 );
+
+DROP TABLE t1, t2, t3, t4, t5;
+
 
 --echo #
 --echo # End of 5.1 tests.

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-03-16 06:54:54 +0000
+++ b/sql/ha_ndbcluster.cc	2011-03-29 19:03:46 +0000
@@ -4994,6 +4994,7 @@ int ha_ndbcluster::end_bulk_delete()
                           &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
+      m_is_bulk_delete = false;
       DBUG_RETURN(ndb_err(m_thd_ndb->trans));
     }
     assert(m_rows_deleted >= ignore_count);
@@ -5575,7 +5576,11 @@ int ha_ndbcluster::index_first(uchar *bu
   // Start the ordered index scan and fetch the first row
 
   // Only HA_READ_ORDER indexes get called by index_first
+#ifdef MCP_BUG11764737
   const int error= ordered_index_scan(0, 0, TRUE, FALSE, buf, NULL);
+#else
+  const int error= ordered_index_scan(0, 0, m_sorted, FALSE, buf, NULL);
+#endif
   table->status=error ? STATUS_NOT_FOUND: 0;
   DBUG_RETURN(error);
 }
@@ -5585,7 +5590,11 @@ int ha_ndbcluster::index_last(uchar *buf
 {
   DBUG_ENTER("ha_ndbcluster::index_last");
   ha_statistic_increment(&SSV::ha_read_last_count);
+#ifdef MCP_BUG11764737
   const int error= ordered_index_scan(0, 0, TRUE, TRUE, buf, NULL);
+#else
+  const int error= ordered_index_scan(0, 0, m_sorted, TRUE, buf, NULL);
+#endif
   table->status=error ? STATUS_NOT_FOUND: 0;
   DBUG_RETURN(error);
 }
@@ -6242,14 +6251,6 @@ int ha_ndbcluster::reset()
 
   assert(m_is_bulk_delete == false);
   m_is_bulk_delete = false;
-
-  /* 
-    Setting pushed_code=NULL here is a temporary fix for bug #58553. This
-    should not be needed any longer if http://lists.mysql.com/commits/125336 
-    is merged into this branch.
-  */
-  pushed_cond= NULL;
-
   DBUG_RETURN(0);
 }
 

=== modified file 'sql/handler.cc'
--- a/sql/handler.cc	2011-02-04 14:49:48 +0000
+++ b/sql/handler.cc	2011-03-24 10:00:09 +0000
@@ -4917,6 +4917,7 @@ int handler::ha_reset()
   free_io_cache(table);
   /* reset the bitmaps to point to defaults */
   table->default_column_bitmaps();
+  pushed_cond= NULL;
   DBUG_RETURN(reset());
 }
 

=== modified file 'sql/item_cmpfunc.cc'
--- a/sql/item_cmpfunc.cc	2011-01-14 08:54:47 +0000
+++ b/sql/item_cmpfunc.cc	2011-03-24 09:27:05 +0000
@@ -1752,6 +1752,76 @@ bool Item_in_optimizer::fix_fields(THD *
 }
 
 
+/**
+   The implementation of optimized \<outer expression\> [NOT] IN \<subquery\>
+   predicates. The implementation works as follows.
+
+   For the current value of the outer expression
+   
+   - If it contains only NULL values, the original (before rewrite by the
+     Item_in_subselect rewrite methods) inner subquery is non-correlated and
+     was previously executed, there is no need to re-execute it, and the
+     previous return value is returned.
+
+   - If it contains NULL values, check if there is a partial match for the
+     inner query block by evaluating it. For clarity we repeat here the
+     transformation previously performed on the sub-query. The expression
+
+     <tt>
+     ( oc_1, ..., oc_n ) 
+     \<in predicate\>
+     ( SELECT ic_1, ..., ic_n
+       FROM \<table\>
+       WHERE \<inner where\> 
+     )
+     </tt>
+
+     was transformed into
+     
+     <tt>
+     ( oc_1, ..., oc_n ) 
+     \<in predicate\>
+     ( SELECT ic_1, ..., ic_n 
+       FROM \<table\> 
+       WHERE \<inner where\> AND ... ( ic_k = oc_k OR ic_k IS NULL ) 
+       HAVING ... NOT ic_k IS NULL
+     )
+     </tt>
+
+     The evaluation will now proceed according to special rules set up
+     elsewhere. These rules include:
+
+     - The HAVING NOT \<inner column\> IS NULL conditions added by the
+       aforementioned rewrite methods will detect whether they evaluated (and
+       rejected) a NULL value and if so, will cause the subquery to evaluate
+       to NULL. 
+
+     - The added WHERE and HAVING conditions are present only for those inner
+       columns that correspond to outer column that are not NULL at the moment.
+     
+     - If there is an eligible index for executing the subquery, the special
+       access method "Full scan on NULL key" is employed which ensures that
+       the inner query will detect if there are NULL values resulting from the
+       inner query. This access method will quietly resort to table scan if it
+       needs to find NULL values as well.
+
+     - Under these conditions, the sub-query need only be evaluated in order to
+       find out whether it produced any rows.
+     
+       - If it did, we know that there was a partial match since there are
+         NULL values in the outer row expression.
+
+       - If it did not, the result is FALSE or UNKNOWN. If at least one of the
+         HAVING sub-predicates rejected a NULL value corresponding to an outer
+         non-NULL, and hence the inner query block returns UNKNOWN upon
+         evaluation, there was a partial match and the result is UNKNOWN.
+
+   - If it contains no NULL values, the call is forwarded to the inner query
+     block.
+
+     @see Item_in_subselect::val_bool()
+     @see Item_is_not_null_test::val_int()
+ */
 longlong Item_in_optimizer::val_int()
 {
   bool tmp;
@@ -1805,7 +1875,7 @@ longlong Item_in_optimizer::val_int()
           all_left_cols_null= false;
       }
 
-      if (!((Item_in_subselect*)args[1])->is_correlated && 
+      if (!item_subs->is_correlated && 
           all_left_cols_null && result_for_null_param != UNKNOWN)
       {
         /* 
@@ -1819,8 +1889,11 @@ longlong Item_in_optimizer::val_int()
       else 
       {
         /* The subquery has to be evaluated */
-        (void) args[1]->val_bool_result();
-        null_value= !item_subs->engine->no_rows();
+        (void) item_subs->val_bool_result();
+        if (item_subs->engine->no_rows())
+          null_value= item_subs->null_value;
+        else
+          null_value= TRUE;
         if (all_left_cols_null)
           result_for_null_param= null_value;
       }

=== modified file 'sql/item_subselect.cc'
--- a/sql/item_subselect.cc	2011-01-14 13:36:47 +0000
+++ b/sql/item_subselect.cc	2011-03-24 09:27:05 +0000
@@ -50,7 +50,7 @@ Item_subselect::Item_subselect():
     item value is NULL if select_subselect not changed this value
     (i.e. some rows will be found returned)
   */
-  null_value= 1;
+  null_value= TRUE;
 }
 
 
@@ -430,9 +430,9 @@ void Item_maxmin_subselect::print(String
 
 void Item_singlerow_subselect::reset()
 {
-  null_value= 1;
+  null_value= TRUE;
   if (value)
-    value->null_value= 1;
+    value->null_value= TRUE;
 }
 
 
@@ -577,7 +577,7 @@ double Item_singlerow_subselect::val_rea
   DBUG_ASSERT(fixed == 1);
   if (!exec() && !value->null_value)
   {
-    null_value= 0;
+    null_value= FALSE;
     return value->val_real();
   }
   else
@@ -592,7 +592,7 @@ longlong Item_singlerow_subselect::val_i
   DBUG_ASSERT(fixed == 1);
   if (!exec() && !value->null_value)
   {
-    null_value= 0;
+    null_value= FALSE;
     return value->val_int();
   }
   else
@@ -606,7 +606,7 @@ String *Item_singlerow_subselect::val_st
 {
   if (!exec() && !value->null_value)
   {
-    null_value= 0;
+    null_value= FALSE;
     return value->val_str(str);
   }
   else
@@ -621,7 +621,7 @@ my_decimal *Item_singlerow_subselect::va
 {
   if (!exec() && !value->null_value)
   {
-    null_value= 0;
+    null_value= FALSE;
     return value->val_decimal(decimal_value);
   }
   else
@@ -636,7 +636,7 @@ bool Item_singlerow_subselect::val_bool(
 {
   if (!exec() && !value->null_value)
   {
-    null_value= 0;
+    null_value= FALSE;
     return value->val_bool();
   }
   else
@@ -654,7 +654,7 @@ Item_exists_subselect::Item_exists_subse
   bool val_bool();
   init(select_lex, new select_exists_subselect(this));
   max_columns= UINT_MAX;
-  null_value= 0; //can't be NULL
+  null_value= FALSE; //can't be NULL
   maybe_null= 0; //can't be NULL
   value= 0;
   DBUG_VOID_RETURN;
@@ -817,15 +817,14 @@ double Item_in_subselect::val_real()
   */
   DBUG_ASSERT(0);
   DBUG_ASSERT(fixed == 1);
-  null_value= 0;
+  null_value= was_null= FALSE;
   if (exec())
   {
     reset();
-    null_value= 1;
     return 0;
   }
   if (was_null && !value)
-    null_value= 1;
+    null_value= TRUE;
   return (double) value;
 }
 
@@ -838,15 +837,14 @@ longlong Item_in_subselect::val_int()
   */
   DBUG_ASSERT(0);
   DBUG_ASSERT(fixed == 1);
-  null_value= 0;
+  null_value= was_null= FALSE;
   if (exec())
   {
     reset();
-    null_value= 1;
     return 0;
   }
   if (was_null && !value)
-    null_value= 1;
+    null_value= TRUE;
   return value;
 }
 
@@ -859,16 +857,15 @@ String *Item_in_subselect::val_str(Strin
   */
   DBUG_ASSERT(0);
   DBUG_ASSERT(fixed == 1);
-  null_value= 0;
+  null_value= was_null= FALSE;
   if (exec())
   {
     reset();
-    null_value= 1;
     return 0;
   }
   if (was_null && !value)
   {
-    null_value= 1;
+    null_value= TRUE;
     return 0;
   }
   str->set((ulonglong)value, &my_charset_bin);
@@ -879,20 +876,14 @@ String *Item_in_subselect::val_str(Strin
 bool Item_in_subselect::val_bool()
 {
   DBUG_ASSERT(fixed == 1);
-  null_value= 0;
+  null_value= was_null= FALSE;
   if (exec())
   {
     reset();
-    /* 
-      Must mark the IN predicate as NULL so as to make sure an enclosing NOT
-      predicate will return FALSE. See the comments in 
-      subselect_uniquesubquery_engine::copy_ref_key for further details.
-    */
-    null_value= 1;
     return 0;
   }
   if (was_null && !value)
-    null_value= 1;
+    null_value= TRUE;
   return value;
 }
 
@@ -903,16 +894,15 @@ my_decimal *Item_in_subselect::val_decim
     method should not be used
   */
   DBUG_ASSERT(0);
-  null_value= 0;
+  null_value= was_null= FALSE;
   DBUG_ASSERT(fixed == 1);
   if (exec())
   {
     reset();
-    null_value= 1;
     return 0;
   }
   if (was_null && !value)
-    null_value= 1;
+    null_value= TRUE;
   int2my_decimal(E_DEC_FATAL_ERROR, value, 0, decimal_value);
   return decimal_value;
 }

=== modified file 'sql/opt_range.cc'
--- a/sql/opt_range.cc	2010-10-06 10:06:47 +0000
+++ b/sql/opt_range.cc	2011-03-23 13:15:16 +0000
@@ -1097,7 +1097,12 @@ SQL_SELECT::~SQL_SELECT()
 #undef index					// Fix for Unixware 7
 
 QUICK_SELECT_I::QUICK_SELECT_I()
+#ifdef MCP_BUG11764737
   :max_used_key_length(0),
+#else
+  :sorted(false),
+   max_used_key_length(0),
+#endif
    used_key_parts(0)
 {}
 
@@ -1109,7 +1114,9 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(T
   DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
 
   in_ror_merged_scan= 0;
+#ifdef MCP_BUG11764737
   sorted= 0;
+#endif
   index= key_nr;
   head=  table;
   key_part_info= head->key_info[index].key_part;
@@ -4983,7 +4990,11 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_
     {
       if (!(quick= get_quick_select(param, (*first_scan)->idx,
                                     (*first_scan)->sel_arg, alloc)) ||
+#ifdef MCP_BUG11764737
           quick_intrsect->push_quick_back(quick))
+#else
+          (quick->sorted= 1, quick_intrsect->push_quick_back(quick)))
+#endif
       {
         delete quick_intrsect;
         DBUG_RETURN(NULL);
@@ -4998,6 +5009,9 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_
         DBUG_RETURN(NULL);
       }
       quick->file= NULL; 
+#ifndef MCP_BUG11764737
+      quick->sorted= 1;
+#endif
       quick_intrsect->cpk_quick= quick;
     }
     quick_intrsect->records= records;
@@ -5024,7 +5038,11 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quic
     for (scan= first_ror; scan != last_ror; scan++)
     {
       if (!(quick= (*scan)->make_quick(param, FALSE, &quick_roru->alloc)) ||
+#ifdef MCP_BUG11764737
           quick_roru->push_quick_back(quick))
+#else
+          (quick->sorted= 1, quick_roru->push_quick_back(quick)))
+#endif
         DBUG_RETURN(NULL);
     }
     quick_roru->records= records;
@@ -8454,7 +8472,11 @@ int QUICK_RANGE_SELECT::reset()
   in_range= FALSE;
   cur_range= (QUICK_RANGE**) ranges.buffer;
 
+#ifdef MCP_BUG11764737
   if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
+#else
+  if (file->inited == handler::NONE && (error= file->ha_index_init(index,sorted)))
+#endif
     DBUG_RETURN(error);
  
   /* Do not allocate the buffers twice. */
@@ -8671,7 +8693,11 @@ int QUICK_RANGE_SELECT::get_next_prefix(
     result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0,
 				   last_range->max_keypart_map ? &end_key : 0,
                                    test(last_range->flag & EQ_RANGE),
+#ifdef MCP_BUG11764737
 				   TRUE);
+#else
+				   sorted);
+#endif
     if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE))
       last_range= 0;			// Stop searching
 
@@ -8784,6 +8810,10 @@ QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUI
   multi_range= NULL;
   multi_range_buff= NULL;
 
+#ifndef MCP_BUG11764737
+  sorted= 1;      // 'sorted' as internals use index_last/_prev
+#endif
+
   QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer;
   QUICK_RANGE **end_range= pr + ranges.elements;
   for (; pr!=end_range; pr++)
@@ -8802,6 +8832,16 @@ QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUI
 }
 
 
+#ifndef MCP_BUG11764737
+int QUICK_SELECT_DESC::reset(void)
+{
+  sorted= 1; // 'sorted' index access is required by internals
+  rev_it.rewind();
+  return QUICK_RANGE_SELECT::reset();
+}
+#endif
+
+
 int QUICK_SELECT_DESC::get_next()
 {
   DBUG_ENTER("QUICK_SELECT_DESC::get_next");
@@ -10208,11 +10248,21 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *par
     if (quick_prefix_records == HA_POS_ERROR)
       quick->quick_prefix_select= NULL; /* Can't construct a quick select. */
     else
+    {
       /* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */
       quick->quick_prefix_select= get_quick_select(param, param_idx,
                                                    index_tree,
                                                    &quick->alloc);
 
+#ifndef MCP_BUG11764737
+      if (!quick->quick_prefix_select)
+      {
+        delete quick;
+        DBUG_RETURN(NULL);
+      }
+      quick->quick_prefix_select->sorted= 1;
+#endif
+    }
     /*
       Extract the SEL_ARG subtree that contains only ranges for the MIN/MAX
       attribute, and create an array of QUICK_RANGES to be used by the
@@ -10603,6 +10653,10 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(vo
   DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
 
   head->set_keyread(TRUE); /* We need only the key attributes */
+  /*
+    Request ordered index access as usage of ::index_last(), 
+    ::index_first() within QUICK_GROUP_MIN_MAX_SELECT depends on it.
+  */
   if ((result= file->ha_index_init(index,1)))
     DBUG_RETURN(result);
   if (quick_prefix_select && quick_prefix_select->reset())

=== modified file 'sql/opt_range.h'
--- a/sql/opt_range.h	2010-10-06 10:06:47 +0000
+++ b/sql/opt_range.h	2011-03-23 13:15:16 +0000
@@ -762,7 +762,11 @@ public:
   int get_type() { return QS_TYPE_RANGE_DESC; }
 private:
   bool range_reads_after_key(QUICK_RANGE *range);
+#ifdef MCP_BUG11764737
   int reset(void) { rev_it.rewind(); return QUICK_RANGE_SELECT::reset(); }
+#else
+  int reset(void);
+#endif
   List<QUICK_RANGE> rev_ranges;
   List_iterator<QUICK_RANGE> rev_it;
   uint used_key_parts;

=== modified file 'sql/sql_base.cc'
--- a/sql/sql_base.cc	2010-10-22 14:13:23 +0000
+++ b/sql/sql_base.cc	2011-03-24 10:00:09 +0000
@@ -3001,6 +3001,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *
   table->insert_values= 0;
   table->fulltext_searched= 0;
   table->file->ft_handler= 0;
+  /*
+    Check that there is no reference to a condtion from an earlier query
+    (cf. Bug#58553). 
+  */
+  DBUG_ASSERT(table->file->pushed_cond == NULL);
   table->reginfo.impossible_range= 0;
   /* Catch wrong handling of the auto_increment_field_not_null. */
   DBUG_ASSERT(!table->auto_increment_field_not_null);

=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc	2011-02-23 10:42:16 +0000
+++ b/sql/sql_select.cc	2011-03-24 10:00:09 +0000
@@ -1743,6 +1743,19 @@ JOIN::save_join_tab()
 }
 
 
+#ifndef MCP_BUG11764737
+static void
+disable_sorted_access(JOIN_TAB* join_tab)
+{
+  join_tab->sorted= 0;
+  if (join_tab->select && join_tab->select->quick)
+  {
+    join_tab->select->quick->sorted= 0;
+  }
+}
+#endif
+
+
 /**
   Exec select.
 
@@ -1912,7 +1925,11 @@ JOIN::exec()
     DBUG_PRINT("info", ("%s", thd->proc_info));
     if (!curr_join->sort_and_group &&
         curr_join->const_tables != curr_join->tables)
+#ifdef MCP_BUG11764737
       curr_join->join_tab[curr_join->const_tables].sorted= 0;
+#else
+      disable_sorted_access(&curr_join->join_tab[curr_join->const_tables]);
+#endif
     if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0)))
     {
       error= tmp_error;
@@ -2076,7 +2093,11 @@ JOIN::exec()
       curr_join->group_list= 0;
       if (!curr_join->sort_and_group &&
           curr_join->const_tables != curr_join->tables)
+#ifdef MCP_BUG11764737
         curr_join->join_tab[curr_join->const_tables].sorted= 0;
+#else
+        disable_sorted_access(&curr_join->join_tab[curr_join->const_tables]);
+#endif
       if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
 	  (tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
 				0)))
@@ -6540,7 +6561,6 @@ make_join_select(JOIN *join,SQL_SELECT *
           tab->select_cond=sel->cond=tmp;
           /* Push condition to storage engine if this is enabled
              and the condition is not guarded */
-          tab->table->file->pushed_cond= NULL;
 	  if (thd->variables.engine_condition_pushdown)
           {
             COND *push_cond= 
@@ -6846,9 +6866,15 @@ make_join_readinfo(JOIN *join, ulonglong
   uint i;
   bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
   bool ordered_set= 0;
-  bool sorted= 1;
   DBUG_ENTER("make_join_readinfo");
 
+#ifdef MCP_BUG11764737
+  bool sorted= 1;
+#else
+  /* First table sorted if ORDER or GROUP BY was specified */
+  bool sorted= (join->order || join->group_list);
+#endif
+
   for (i=join->const_tables ; i < join->tables ; i++)
   {
     JOIN_TAB *tab=join->join_tab+i;
@@ -12156,7 +12182,12 @@ join_read_key(JOIN_TAB *tab)
 
   if (!table->file->inited)
   {
+#ifdef MCP_BUG11764737
+    table->file->ha_index_init(tab->ref.key, 0);
+#else
+    DBUG_ASSERT(!tab->sorted);  // Don't expect sort req. for single row.
     table->file->ha_index_init(tab->ref.key, tab->sorted);
+#endif
   }
   if (cmp_buffer_with_ref(tab) ||
       (table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
@@ -12428,7 +12459,11 @@ join_read_last(JOIN_TAB *tab)
   tab->read_record.index=tab->index;
   tab->read_record.record=table->record[0];
   if (!table->file->inited)
+#ifdef MCP_BUG11764737
     table->file->ha_index_init(tab->index, 1);
+#else
+    table->file->ha_index_init(tab->index, tab->sorted);
+#endif
   if ((error= tab->table->file->index_last(tab->table->record[0])))
     return report_error(table, error);
   return 0;
@@ -12452,7 +12487,11 @@ join_ft_read_first(JOIN_TAB *tab)
   TABLE *table= tab->table;
 
   if (!table->file->inited)
+#ifdef MCP_BUG11764737
     table->file->ha_index_init(tab->ref.key, 1);
+#else
+    table->file->ha_index_init(tab->ref.key, tab->sorted);
+#endif
 #if NOT_USED_YET
   /* as ft-key doesn't use store_key's, see also FT_SELECT::init() */
   if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))

=== modified file 'storage/ndb/clusterj/clusterj-api/Makefile.am'
--- a/storage/ndb/clusterj/clusterj-api/Makefile.am	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-api/Makefile.am	2011-03-23 22:59:52 +0000
@@ -67,6 +67,7 @@ clusterj_api_java = \
   $(clusterj_api_src)/com/mysql/clusterj/Constants.java \
   $(clusterj_api_src)/com/mysql/clusterj/LockMode.java \
   $(clusterj_api_src)/com/mysql/clusterj/ColumnMetadata.java \
+  $(clusterj_api_src)/com/mysql/clusterj/ColumnType.java \
   $(clusterj_api_src)/com/mysql/clusterj/DynamicObject.java \
   $(clusterj_api_src)/com/mysql/clusterj/DynamicObjectDelegate.java
 

=== modified file 'storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnMetadata.java'
--- a/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnMetadata.java	2010-12-21 00:52:28 +0000
+++ b/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnMetadata.java	2011-03-23 22:41:01 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -27,7 +27,7 @@ public interface ColumnMetadata {
     /** Return the type of the column.
      * @return the type of the column
      */
-    Type type();
+    ColumnType columnType();
 
     /** Return the java type of the column.
      * @return the java type of the column
@@ -76,39 +76,4 @@ public interface ColumnMetadata {
      */
     String charsetName();
 
-    public enum Type {
-
-        Bigint,          ///< 64 bit. 8 byte signed integer, can be used in array
-        Bigunsigned,     ///< 64 Bit. 8 byte signed integer, can be used in array
-        Binary,          ///< Len
-        Bit,             ///< Bit, length specifies no of bits
-        Blob,            ///< Binary large object (see NdbBlob)
-        Char,            ///< Len. A fixed array of 1-byte chars
-        Date,            ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
-        Datetime,        ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
-        Double,          ///< 64-bit float. 8 byte float, can be used in array
-        Decimal,         ///< MySQL >= 5.0 signed decimal,  Precision, Scale
-        Decimalunsigned,
-        Float,           ///< 32-bit float. 4 bytes float, can be used in array
-        Int,             ///< 32 bit. 4 byte signed integer, can be used in array
-        Longvarchar,     ///< Length bytes: 2, little-endian
-        Longvarbinary,   ///< Length bytes: 2, little-endian
-        Mediumint,       ///< 24 bit. 3 byte signed integer, can be used in array
-        Mediumunsigned,  ///< 24 bit. 3 byte unsigned integer, can be used in array
-        Olddecimal,
-        Olddecimalunsigned,
-        Smallint,        ///< 16 bit. 2 byte signed integer, can be used in array
-        Smallunsigned,   ///< 16 bit. 2 byte unsigned integer, can be used in array
-        Text,            ///< Text blob
-        Time,            ///< Time without date
-        Timestamp,       ///< Unix time
-        Tinyint,         ///< 8 bit. 1 byte signed integer, can be used in array
-        Tinyunsigned,    ///< 8 bit. 1 byte unsigned integer, can be used in array
-        Undefined,
-        Unsigned,        ///< 32 bit. 4 byte unsigned integer, can be used in array
-        Varbinary,       ///< Length bytes: 1, Max: 255
-        Varchar,         ///< Length bytes: 1, Max: 255
-        Year             ///< Year 1901-2155 (1 byte)
-    }
-
 }

=== added file 'storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnType.java'
--- a/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnType.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/ColumnType.java	2011-03-23 22:41:01 +0000
@@ -0,0 +1,56 @@
+/*
+   Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package com.mysql.clusterj;
+
+/** This class enumerates the column types for columns in ndb.
+ *
+ */
+public enum ColumnType {
+
+    Bigint,          ///< 64 bit. 8 byte signed integer, can be used in array
+    Bigunsigned,     ///< 64 Bit. 8 byte signed integer, can be used in array
+    Binary,          ///< Length is fixed. A fixed array of 1-byte values
+    Bit,             ///< Bit, length specifies no of bits
+    Blob,            ///< Binary large object (see NdbBlob)
+    Char,            ///< Length is fixed. A fixed array of 1-byte chars
+    Date,            ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
+    Datetime,        ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
+    Double,          ///< 64-bit float. 8 byte float, can be used in array
+    Decimal,         ///< MySQL >= 5.0 signed decimal,  Precision, Scale
+    Decimalunsigned,
+    Float,           ///< 32-bit float. 4 bytes float, can be used in array
+    Int,             ///< 32 bit. 4 byte signed integer, can be used in array
+    Longvarchar,     ///< Length bytes: 2, little-endian
+    Longvarbinary,   ///< Length bytes: 2, little-endian
+    Mediumint,       ///< 24 bit. 3 byte signed integer, can be used in array
+    Mediumunsigned,  ///< 24 bit. 3 byte unsigned integer, can be used in array
+    Olddecimal,
+    Olddecimalunsigned,
+    Smallint,        ///< 16 bit. 2 byte signed integer, can be used in array
+    Smallunsigned,   ///< 16 bit. 2 byte unsigned integer, can be used in array
+    Text,            ///< Text blob
+    Time,            ///< Time without date
+    Timestamp,       ///< Unix time
+    Tinyint,         ///< 8 bit. 1 byte signed integer, can be used in array
+    Tinyunsigned,    ///< 8 bit. 1 byte unsigned integer, can be used in array
+    Undefined,
+    Unsigned,        ///< 32 bit. 4 byte unsigned integer, can be used in array
+    Varbinary,       ///< Length bytes: 1, Max: 255
+    Varchar,         ///< Length bytes: 1, Max: 255
+    Year             ///< Year 1901-2155 (1 byte)
+}

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java	2011-03-22 01:48:09 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java	2011-03-23 22:41:01 +0000
@@ -21,6 +21,7 @@ import com.mysql.clusterj.ClusterJDatast
 import com.mysql.clusterj.ClusterJFatalInternalException;
 import com.mysql.clusterj.ClusterJUserException;
 import com.mysql.clusterj.ColumnMetadata;
+import com.mysql.clusterj.ColumnType;
 import com.mysql.clusterj.core.spi.ValueHandler;
 import com.mysql.clusterj.core.spi.DomainTypeHandler;
 import com.mysql.clusterj.core.query.CandidateIndexImpl;
@@ -90,7 +91,7 @@ public abstract class AbstractDomainFiel
     protected boolean partitionKey;
 
     /** The Store Type for the column. */
-    protected Type storeColumnType = null;
+    protected ColumnType storeColumnType = null;
 
     /** Column names in the case of a field mapped to multiple columns, e.g. foreign keys */
     protected String[] columnNames;
@@ -2753,7 +2754,7 @@ public abstract class AbstractDomainFiel
         return scale;
     }
 
-    public Type type() {
+    public ColumnType columnType() {
         return this.storeColumnType;
     }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainFieldHandlerImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainFieldHandlerImpl.java	2011-02-06 21:37:05 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainFieldHandlerImpl.java	2011-03-23 22:41:01 +0000
@@ -20,6 +20,7 @@ package com.mysql.clusterj.core.metadata
 import com.mysql.clusterj.core.spi.ValueHandler;
 import com.mysql.clusterj.ClusterJDatastoreException;
 import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.ColumnType;
 
 import com.mysql.clusterj.annotation.Column;
 import com.mysql.clusterj.annotation.Lob;
@@ -218,7 +219,7 @@ public class DomainFieldHandlerImpl exte
             } else if (type.equals(Long.class)) {
                 objectOperationHandlerDelegate = objectOperationHandlerObjectLong;
             } else if (type.equals(Short.class)) {
-                if (Type.Year.equals(storeColumnType)) {
+                if (ColumnType.Year.equals(storeColumnType)) {
                     objectOperationHandlerDelegate = objectOperationHandlerObjectShortYear;
                 } else {
                     objectOperationHandlerDelegate = objectOperationHandlerObjectShort;
@@ -230,7 +231,7 @@ public class DomainFieldHandlerImpl exte
             } else if (type.equals(long.class)) {
                 objectOperationHandlerDelegate = objectOperationHandlerLong;
             } else if (type.equals(short.class)) {
-                if (Type.Year.equals(storeColumnType)) {
+                if (ColumnType.Year.equals(storeColumnType)) {
                     objectOperationHandlerDelegate = objectOperationHandlerShortYear;
                 } else {
                     objectOperationHandlerDelegate = objectOperationHandlerShort;

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Column.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Column.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Column.java	2011-03-23 22:41:01 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -17,7 +17,7 @@
 
 package com.mysql.clusterj.core.store;
 
-import com.mysql.clusterj.ColumnMetadata.Type;
+import com.mysql.clusterj.ColumnType;
 
 /** Column metadata for ndb columns.
  *
@@ -33,7 +33,7 @@ public interface Column {
      * 
      * @return the store type
      */
-    public Type getType();
+    public ColumnType getType();
 
     /** Is this column a primary key column?
      * @return true if this column is a primary key column

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/Makefile.am'
--- a/storage/ndb/clusterj/clusterj-jdbc/Makefile.am	2011-02-21 11:53:51 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/Makefile.am	2011-03-25 08:40:51 +0000
@@ -73,12 +73,6 @@ clusterj_jdbc_java = \
 
 clusterj_jdbc_DATA = $(clusterj_jdbc_version_JAR)
 
-EXTRA_DIST = \
-  $(clusterj_jdbc_src)/com/mysql/clusterj/jdbc/*.java \
-  $(clusterj_jdbc_src)/com/mysql/clusterj/jdbc/antlr/*.java \
-  $(clusterj_jdbc_src)/com/mysql/clusterj/jdbc/antlr/node/*.java \
-  logging.properties pom.xml CMakeLists.txt
-
 $(MANIFEST): 
 	echo "Manifest-Version: 1.0"  > $(MANIFEST) 
 	echo "Export-Package: com.mysql.clusterj,com.mysql.clusterj.query,com.mysql.clusterj.annotation"  >> $(MANIFEST)
@@ -88,7 +82,8 @@ $(MANIFEST): 
 
 clusterj_jdbc_antlr = \
   $(top_srcdir)/storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Parser.g \
-  $(top_srcdir)/storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Lexer.g
+  $(top_srcdir)/storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Lexer.g \
+  $(top_srcdir)/storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/imports/MySQL51Functions.g
 
 clusterj_jdbc_antlr_src = \
   $(top_srcdir)/storage/ndb/clusterj/clusterj-jdbc/target/generated-sources/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Parser.java \
@@ -134,6 +129,13 @@ clean-local:
 
 MOSTLYCLEANFILES = $(MANIFEST)
 
+EXTRA_DIST = \
+  $(clusterj_jdbc_src)/com/mysql/clusterj/jdbc/*.java \
+  $(clusterj_jdbc_src)/com/mysql/clusterj/jdbc/antlr/*.java \
+  $(clusterj_jdbc_src)/com/mysql/clusterj/jdbc/antlr/node/*.java \
+  logging.properties pom.xml CMakeLists.txt \
+  $(clusterj_jdbc_antlr)
+
 mostlyclean-local: clean-local
 	rm -rf $(top_srcdir)/storage/ndb/clusterj/clusterj-jdbc/target
 

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Parser.g'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Parser.g	2011-02-21 11:53:51 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/antlr3/com/mysql/clusterj/jdbc/antlr/MySQL51Parser.g	2011-03-24 00:05:36 +0000
@@ -160,7 +160,7 @@ select
 boolean seenUnion = false;
 }
 	:	select_paren
-		(UNION (mod=ALL | mod=DISTINCT)? union_selects+=select {seenUnion=true;})*
+		(UNION (mod=ALL | mod=DISTINCT)? union_selects+=select_paren {seenUnion=true;})*
 		 	-> {seenUnion}? ^(UNION $mod? select_paren $union_selects+)
 			-> select_paren
 	;
@@ -277,9 +277,10 @@ select_options
 	|	STRAIGHT_JOIN
 	|	SQL_SMALL_RESULT
 	|	SQL_BIG_RESULT
-	|	SQL_BUFFER_RESULT
-	|	SQL_CACHE
-	|	SQL_NO_CACHE
+// the following cause parser warnings
+//	|	SQL_BUFFER_RESULT
+//	|	SQL_CACHE
+//	|	SQL_NO_CACHE
 	|	SQL_CALC_FOUND_ROWS
 	;
 	
@@ -315,9 +316,10 @@ $table_references.table_count = $table_r
 table_ref
 	:	(t1=table_factor -> $t1 )
 		(
-			(LEFT|RIGHT)=>(ltype=LEFT|ltype=RIGHT) outer=OUTER? JOIN t3=table_ref lrjoinCond=join_condition_both 
+			(LEFT|RIGHT)=>(ltype=LEFT|ltype=RIGHT) outer=OUTER? JOIN t3=table_ref lrjoinCond=join_condition_either 
 				-> ^($ltype {$tree} $t3 $lrjoinCond $outer?)
-		|	(type=INNER|type=CROSS)? JOIN t2=table_factor cond1=join_condition_both? 
+// join condition is not optional here
+		|	(type=INNER|type=CROSS)? JOIN t2=table_factor cond1=join_condition_either 
 				-> ^(JOIN {$tree} $t2 $cond1? $type?)
 		|	(	type=STRAIGHT_JOIN t2=table_factor 
 				(	(join_condition_on)=> cond2=join_condition_on	-> ^($type {$tree} $t2 $cond2)
@@ -342,7 +344,7 @@ join_condition_on
 	:	ON where_condition		-> ^(ON where_condition)
 	;
 
-join_condition_both
+join_condition_either
 	:	join_condition_on
 	|	USING LPAREN fields+=ident (COMMA fields+=ident)* RPAREN		-> ^(USING $fields+)
 	;
@@ -805,7 +807,8 @@ equalityOperator
 bitwiseOrExpr
   : lhs=bitwiseAndExpr 
     ( (op+=BITWISE_OR^ rhs+=bitwiseAndExpr)+
-    | ((NOT^)? IN^ (parenExprList | subselect))
+// force compiler to always recognize NOT IN regardless of whatever follows
+    | (((NOT^)? IN^)=>(NOT^)? IN^ (parenExprList | subselect))
     | LIKE^ unaryExpr (ESCAPE STRING)?  // STRING must be empty or one character long (or be "\\" if not in sql_mode NO_BACKSLASH_ESCAPES)
     | isOperator^
     )?
@@ -825,7 +828,8 @@ shiftExpr
    TODO: It cannot be on the left of a MINUS, because that expression makes no sense.
 */
 additiveExpr
-	:	lhs=multiplicativeExpr ((op+=PLUS^|op+=MINUS^) rhs+=multiplicativeExpr)*
+// force any PLUS or MINUS to be binary not unary for this rule
+    :   lhs=multiplicativeExpr ((PLUS|MINUS)=>(op+=PLUS^|op+=MINUS^) rhs+=multiplicativeExpr)*
 	;
 
 multOperator
@@ -1063,7 +1067,8 @@ delete
 boolean multiTableDelete = false;
 }
 	:	DELETE
-		(options{k=1;}: opts+=LOW_PRIORITY | opts+=QUICK | opts+=IGNORE)*		// the yacc parser accepts any combination and any number of these modifiers, so we do, too.
+// opts+=QUICK causes parser warnings
+		(options{k=1;}: opts+=LOW_PRIORITY | opts+=IGNORE)*		// the yacc parser accepts any combination and any number of these modifiers, so we do, too.
 		(	FROM 
 			t+=simple_table_ref_no_alias (DOT MULT {multiTableDelete = true;} )? (COMMA t+=simple_table_ref_no_alias (DOT MULT)? {multiTableDelete = true;} )*
 			(USING tr=table_references {multiTableDelete = true;})?
@@ -1072,12 +1077,12 @@ boolean multiTableDelete = false;
 				order_by?
 				(LIMIT NUMBER)?
 			)?
-								-> {multiTableDelete}? ^(DELETE ^(OPTIONS $opts+)? ^(TABLE $t)+ ^(USING $tr) ^(WHERE where_condition)?)
-								 				 	-> ^(DELETE<com.mysql.clusterj.jdbc.antlr.node.DeleteNode> ^(OPTIONS $opts+)? ^(TABLE $t) ^(WHERE where_condition)? order_by? ^(LIMIT NUMBER)?)
+				-> {multiTableDelete}? ^(DELETE<com.mysql.clusterj.jdbc.antlr.node.DeleteNode> ^(OPTIONS $opts+)? ^(TABLE $t)+ ^(USING $tr) ^(WHERE<com.mysql.clusterj.jdbc.antlr.node.WhereNode> where_condition)?)
+				-> ^(DELETE<com.mysql.clusterj.jdbc.antlr.node.DeleteNode> ^(OPTIONS $opts+)? ^(TABLE $t) ^(WHERE<com.mysql.clusterj.jdbc.antlr.node.WhereNode> where_condition)? order_by? ^(LIMIT NUMBER)?)
 								 
 		|	t+=simple_table_ref_no_alias (DOT MULT)? (COMMA t+=simple_table_ref_no_alias (DOT MULT)?)*
 			FROM tr=table_references
-			(WHERE where_condition)?			-> ^(DELETE ^(OPTIONS $opts+)? ^(TABLE $t)+ ^(FROM $tr) ^(WHERE where_condition)?)
+			(WHERE where_condition)?			-> ^(DELETE ^(OPTIONS $opts+)? ^(TABLE $t)+ ^(FROM $tr) ^(WHERE<com.mysql.clusterj.jdbc.antlr.node.WhereNode> where_condition)?)
 		)
 	;
 

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/DynamicObjectTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/DynamicObjectTest.java	2011-02-02 09:52:33 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/DynamicObjectTest.java	2011-03-23 22:41:01 +0000
@@ -17,15 +17,11 @@
 
 package testsuite.clusterj;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
 import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.ColumnType;
 import com.mysql.clusterj.DynamicObject;
 import com.mysql.clusterj.ColumnMetadata;
 import com.mysql.clusterj.Query;
-import com.mysql.clusterj.ColumnMetadata.Type;
 import com.mysql.clusterj.annotation.PersistenceCapable;
 import com.mysql.clusterj.query.QueryBuilder;
 import com.mysql.clusterj.query.QueryDomainType;
@@ -42,7 +38,7 @@ public class DynamicObjectTest extends A
 
     private Object[] expectedTBasicNames = new Object[] {"id", "name", "age", "magic"};
 
-    private Object[] expectedTBasicTypes = new Object[] {Type.Int, Type.Varchar, Type.Int, Type.Int};
+    private Object[] expectedTBasicTypes = new Object[] {ColumnType.Int, ColumnType.Varchar, ColumnType.Int, ColumnType.Int};
 
     private Object[] expectedTBasicJavaTypes = new Object[] {Integer.class, String.class, Integer.class, Integer.class};
 
@@ -181,11 +177,6 @@ public class DynamicObjectTest extends A
     }
 
     private static class DynamicObjectPrivate extends DynamicObject {
-        public DynamicObjectPrivate() {}
-        @Override
-        public String table() {
-            return "DynamicObjectProtectedConstructor";
-        }        
     }
 
     public class DynamicObjectNonStatic extends DynamicObject {
@@ -220,7 +211,7 @@ public class DynamicObjectTest extends A
         ColumnMetadata[] metadata = tbasic.columnMetadata();
         for (int i = 0; i < metadata.length; ++i) {
             errorIfNotEqual("t_basic column " + i + " name", expectedTBasicNames[i], metadata[i].name());
-            errorIfNotEqual("t_basic column " + i + " type", expectedTBasicTypes[i], metadata[i].type());
+            errorIfNotEqual("t_basic column " + i + " type", expectedTBasicTypes[i], metadata[i].columnType());
             errorIfNotEqual("t_basic column " + i + " javaType", expectedTBasicJavaTypes[i], metadata[i].javaType());
             errorIfNotEqual("t_basic column " + i + " maximumLength", expectedTBasicMaximumLengths[i], metadata[i].maximumLength());
             errorIfNotEqual("t_basic column " + i + " charsetName", expectedTBasicCharsetNames [i], metadata[i].charsetName());

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ColumnImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ColumnImpl.java	2011-02-04 22:14:36 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ColumnImpl.java	2011-03-23 22:41:01 +0000
@@ -23,7 +23,7 @@ import com.mysql.ndbjtie.ndbapi.NdbDicti
 import com.mysql.clusterj.ClusterJDatastoreException;
 import com.mysql.clusterj.ClusterJFatalInternalException;
 
-import com.mysql.clusterj.ColumnMetadata.Type;
+import com.mysql.clusterj.ColumnType;
 
 import com.mysql.clusterj.core.store.Column;
 
@@ -57,7 +57,7 @@ class ColumnImpl implements Column {
     private int charsetNumber = 0;
 
     /** The ndb column type for the column */
-    private Type columnType;
+    private ColumnType columnType;
 
     /** The prefix length for variable size columns */
     private int prefixLength = -1;
@@ -238,43 +238,43 @@ class ColumnImpl implements Column {
         }
     }
 
-    public Type getType() {
+    public ColumnType getType() {
         return columnType;
     }
 
-    private Type convertType(int type) {
+    private ColumnType convertType(int type) {
         switch (type) {
-            case ColumnConst.Type.Bigint: return Type.Bigint;
-            case ColumnConst.Type.Bigunsigned: return Type.Bigunsigned;
-            case ColumnConst.Type.Binary: return Type.Binary;
-            case ColumnConst.Type.Bit: return Type.Bit;
-            case ColumnConst.Type.Blob: return Type.Blob;
-            case ColumnConst.Type.Char: return Type.Char;
-            case ColumnConst.Type.Date: return Type.Date;
-            case ColumnConst.Type.Datetime: return Type.Datetime;
-            case ColumnConst.Type.Decimal: return Type.Decimal;
-            case ColumnConst.Type.Decimalunsigned: return Type.Decimalunsigned;
-            case ColumnConst.Type.Double: return Type.Double;
-            case ColumnConst.Type.Float: return Type.Float;
-            case ColumnConst.Type.Int: return Type.Int;
-            case ColumnConst.Type.Longvarbinary: return Type.Longvarbinary;
-            case ColumnConst.Type.Longvarchar: return Type.Longvarchar;
-            case ColumnConst.Type.Mediumint: return Type.Mediumint;
-            case ColumnConst.Type.Mediumunsigned: return Type.Mediumunsigned;
-            case ColumnConst.Type.Olddecimal: return Type.Olddecimal;
-            case ColumnConst.Type.Olddecimalunsigned: return Type.Olddecimalunsigned;
-            case ColumnConst.Type.Smallint: return Type.Smallint;
-            case ColumnConst.Type.Smallunsigned: return Type.Smallunsigned;
-            case ColumnConst.Type.Text: return Type.Text;
-            case ColumnConst.Type.Time: return Type.Time;
-            case ColumnConst.Type.Timestamp: return Type.Timestamp;
-            case ColumnConst.Type.Tinyint: return Type.Tinyint;
-            case ColumnConst.Type.Tinyunsigned: return Type.Tinyunsigned;
-            case ColumnConst.Type.Undefined: return Type.Undefined;
-            case ColumnConst.Type.Unsigned: return Type.Unsigned;
-            case ColumnConst.Type.Varbinary: return Type.Varbinary;
-            case ColumnConst.Type.Varchar: return Type.Varchar;
-            case ColumnConst.Type.Year: return Type.Year;
+            case ColumnConst.Type.Bigint: return ColumnType.Bigint;
+            case ColumnConst.Type.Bigunsigned: return ColumnType.Bigunsigned;
+            case ColumnConst.Type.Binary: return ColumnType.Binary;
+            case ColumnConst.Type.Bit: return ColumnType.Bit;
+            case ColumnConst.Type.Blob: return ColumnType.Blob;
+            case ColumnConst.Type.Char: return ColumnType.Char;
+            case ColumnConst.Type.Date: return ColumnType.Date;
+            case ColumnConst.Type.Datetime: return ColumnType.Datetime;
+            case ColumnConst.Type.Decimal: return ColumnType.Decimal;
+            case ColumnConst.Type.Decimalunsigned: return ColumnType.Decimalunsigned;
+            case ColumnConst.Type.Double: return ColumnType.Double;
+            case ColumnConst.Type.Float: return ColumnType.Float;
+            case ColumnConst.Type.Int: return ColumnType.Int;
+            case ColumnConst.Type.Longvarbinary: return ColumnType.Longvarbinary;
+            case ColumnConst.Type.Longvarchar: return ColumnType.Longvarchar;
+            case ColumnConst.Type.Mediumint: return ColumnType.Mediumint;
+            case ColumnConst.Type.Mediumunsigned: return ColumnType.Mediumunsigned;
+            case ColumnConst.Type.Olddecimal: return ColumnType.Olddecimal;
+            case ColumnConst.Type.Olddecimalunsigned: return ColumnType.Olddecimalunsigned;
+            case ColumnConst.Type.Smallint: return ColumnType.Smallint;
+            case ColumnConst.Type.Smallunsigned: return ColumnType.Smallunsigned;
+            case ColumnConst.Type.Text: return ColumnType.Text;
+            case ColumnConst.Type.Time: return ColumnType.Time;
+            case ColumnConst.Type.Timestamp: return ColumnType.Timestamp;
+            case ColumnConst.Type.Tinyint: return ColumnType.Tinyint;
+            case ColumnConst.Type.Tinyunsigned: return ColumnType.Tinyunsigned;
+            case ColumnConst.Type.Undefined: return ColumnType.Undefined;
+            case ColumnConst.Type.Unsigned: return ColumnType.Unsigned;
+            case ColumnConst.Type.Varbinary: return ColumnType.Varbinary;
+            case ColumnConst.Type.Varchar: return ColumnType.Varchar;
+            case ColumnConst.Type.Year: return ColumnType.Year;
             default: throw new ClusterJFatalInternalException(
                     local.message("ERR_Unknown_Column_Type",
                     tableName, columnName, type));

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2011-02-08 13:55:54 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2011-03-28 11:59:09 +0000
@@ -118,11 +118,6 @@
  * Maximum number of Parallel Scan queries on one hash index fragment
  */
 #define MAX_PARALLEL_SCANS_PER_FRAG 12
-/*
- * Maximum parallel ordered index scans per primary table fragment.
- * Implementation limit is (256 minus 12).
- */
-#define MAX_PARALLEL_INDEX_SCANS_PER_FRAG 32
 
 /**
  * Computed defines

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-02-15 11:41:27 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-03-28 11:59:09 +0000
@@ -180,6 +180,7 @@
 #define CFG_DB_LATE_ALLOC             615
 
 #define CFG_DB_2PASS_INR              616
+#define CFG_DB_PARALLEL_SCANS_PER_FRAG 617
 
 #define CFG_NODE_ARBIT_RANK           200
 #define CFG_NODE_ARBIT_DELAY          201

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-02-23 19:28:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-03-28 11:59:09 +0000
@@ -3240,6 +3240,7 @@ public:
   Uint32 c_max_redo_lag;
   Uint32 c_max_redo_lag_counter;
   Uint64 cTotalLqhKeyReqCount;
+  Uint32 c_max_parallel_scans_per_frag;
 
   inline bool getAllowRead() const {
     return getNodeState().startLevel < NodeState::SL_STOPPING_3;

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp	2011-02-15 11:41:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp	2011-03-28 11:59:09 +0000
@@ -88,6 +88,8 @@ void Dblqh::initData() 
   cTotalLqhKeyReqCount = 0;
   c_max_redo_lag = 30; // seconds
   c_max_redo_lag_counter = 3; // 3 strikes and you're out
+
+  c_max_parallel_scans_per_frag = 32;
 }//Dblqh::initData()
 
 void Dblqh::initRecords() 

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-02-23 22:59:16 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-03-29 19:03:46 +0000
@@ -1351,6 +1351,16 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* 
   c_max_redo_lag_counter = 3;
   ndb_mgm_get_int_parameter(p, CFG_DB_REDO_OVERCOMMIT_COUNTER,
                             &c_max_redo_lag_counter);
+
+  c_max_parallel_scans_per_frag = 32;
+  ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_SCANS_PER_FRAG,
+                            &c_max_parallel_scans_per_frag);
+
+  if (c_max_parallel_scans_per_frag > (256 - MAX_PARALLEL_SCANS_PER_FRAG) / 2)
+  {
+    jam();
+    c_max_parallel_scans_per_frag = (256 - MAX_PARALLEL_SCANS_PER_FRAG) / 2;
+  }
   return;
 }//Dblqh::execSIZEALT_REP()
 
@@ -9752,6 +9762,8 @@ void Dblqh::closeScanRequestLab(Signal* 
       scanptr.p->scanCompletedStatus = ZTRUE;
       break;
     case ScanRecord::WAIT_CLOSE_SCAN:
+      jam();
+      scanptr.p->scanCompletedStatus = ZTRUE;
     case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
       jam();
       /*empty*/;
@@ -9874,7 +9886,9 @@ void Dblqh::scanLockReleasedLab(Signal* 
     jam();
     scanptr.p->scanReleaseCounter++;     
     scanReleaseLocksLab(signal);
-  } else {
+  }
+  else if (scanptr.p->scanCompletedStatus != ZTRUE)
+  {
     jam();
     /*
     We come here when we have been scanning for a long time and not been able
@@ -9884,7 +9898,12 @@ void Dblqh::scanLockReleasedLab(Signal* 
     */
     scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
     sendScanFragConf(signal, ZFALSE);
-  }//if
+  }
+  else
+  {
+    jam();
+    closeScanLab(signal);
+  }
 }//Dblqh::scanLockReleasedLab()
 
 bool
@@ -11331,7 +11350,19 @@ Uint32 Dblqh::initScanrec(const ScanFrag
    * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1)  =  1-11
    *  idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
    */
+
+  /**
+   * ACC only supports 12 parallel scans per fragment (hard limit)
+   * TUP/TUX does not have any such limit...but when scanning with keyinfo
+   *         (for take-over) no more than 255 such scans can be active
+   *         at a fragment (dur to 8 bit number in scan-keyinfo protocol)
+   *
+   * TODO: Make TUP/TUX limits depend on scanKeyinfoFlag (possibly with
+   *       other config limit too)
+   */
+
   Uint32 start, stop;
+  Uint32 max_parallel_scans_per_frag = c_max_parallel_scans_per_frag;
   if (accScan)
   {
     start = 1;
@@ -11340,13 +11371,13 @@ Uint32 Dblqh::initScanrec(const ScanFrag
   else if (rangeScan)
   {
     start = MAX_PARALLEL_SCANS_PER_FRAG;
-    stop = start + MAX_PARALLEL_INDEX_SCANS_PER_FRAG - 1;
+    stop = start + max_parallel_scans_per_frag - 1;
   }
   else
   {
     ndbassert(tupScan);
-    start = MAX_PARALLEL_SCANS_PER_FRAG + MAX_PARALLEL_INDEX_SCANS_PER_FRAG;
-    stop = start + MAX_PARALLEL_INDEX_SCANS_PER_FRAG - 1;
+    start = MAX_PARALLEL_SCANS_PER_FRAG + max_parallel_scans_per_frag;
+    stop = start + max_parallel_scans_per_frag - 1;
   }
   ndbrequire((start < 32 * tFragPtr.p->m_scanNumberMask.Size) &&
              (stop < 32 * tFragPtr.p->m_scanNumberMask.Size));
@@ -11359,7 +11390,7 @@ Uint32 Dblqh::initScanrec(const ScanFrag
       jam();
       return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
     }
-    
+
     /**
      * Put on queue
      */

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-02-23 19:28:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-03-29 06:50:49 +0000
@@ -1546,7 +1546,7 @@ Dbspj::abort(Signal* signal, Ptr<Request
   if ((requestPtr.p->m_state & Request::RS_ABORTING) != 0)
   {
     jam();
-    return;
+    goto checkcomplete;
   }
 
   requestPtr.p->m_state |= Request::RS_ABORTING;
@@ -1573,6 +1573,7 @@ Dbspj::abort(Signal* signal, Ptr<Request
     }
   }
 
+checkcomplete:
   checkBatchComplete(signal, requestPtr, 0);
 }
 
@@ -5475,6 +5476,33 @@ Dbspj::scanIndex_cleanup(Ptr<Request> re
 {
   ScanIndexData& data = treeNodePtr.p->m_scanindex_data;
   Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
+  if (requestPtr.p->m_state & Request::RS_ABORTING)
+  {
+    /**
+     * If we're aborting...there can be keys attached...that has not
+     *   (and will not) be sent...release them to avoid memleak
+     */
+    jam();
+    Ptr<ScanFragHandle> fragPtr;
+    for (list.first(fragPtr); !fragPtr.isNull(); list.next(fragPtr))
+    {
+      if (fragPtr.p->m_rangePtrI != RNIL)
+      {
+        releaseSection(fragPtr.p->m_rangePtrI);
+        fragPtr.p->m_rangePtrI = RNIL;
+      }
+    }
+  }
+  else
+  {
+#ifdef VM_TRACE
+    Ptr<ScanFragHandle> fragPtr;
+    for (list.first(fragPtr); !fragPtr.isNull(); list.next(fragPtr))
+    {
+      ndbrequire(fragPtr.p->m_rangePtrI == RNIL);
+    }
+#endif
+  }
   list.remove();
 
   if (treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN)

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-02-08 08:36:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-03-29 17:26:47 +0000
@@ -925,7 +925,7 @@ int Dbtup::handleReadReq(Signal* signal,
 /* ------------------------------------------------------------------------- */
       jam();
       Uint32 TnoOfDataRead= (Uint32) ret;
-      req_struct->read_length= TnoOfDataRead;
+      req_struct->read_length += TnoOfDataRead;
       sendReadAttrinfo(signal, req_struct, TnoOfDataRead, regOperPtr);
       return 0;
     }
@@ -2202,7 +2202,7 @@ int Dbtup::interpreterStartLab(Signal* s
      *    This is used for ANYVALUE and interpreted delete.
      */
     req_struct->log_size+= RlogSize;
-    req_struct->read_length= RattroutCounter;
+    req_struct->read_length += RattroutCounter;
     sendReadAttrinfo(signal, req_struct, RattroutCounter, regOperPtr);
     if (RlogSize > 0) {
       return sendLogAttrinfo(signal, req_struct, RlogSize, regOperPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp	2011-02-08 14:45:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp	2011-03-29 17:26:47 +0000
@@ -2778,6 +2778,7 @@ Dbtup::flush_read_buffer(KeyReqStruct *r
 
   req_struct->out_buf_index = 0; // Reset buffer
   req_struct->out_buf_bits = 0;
+  req_struct->read_length += len;
 }
 
 Uint32

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2011-03-29 12:32:02 +0000
@@ -733,7 +733,11 @@ Configuration::calcSizeAlt(ConfigValues 
 
 
   if (noOfLocalScanRecords == 0) {
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
     noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 
+#else
+    noOfLocalScanRecords = 4 * (noOfDBNodes * noOfScanRecords) +
+#endif
       1 /* NR */ + 
       1 /* LCP */; 
   }

=== modified file 'storage/ndb/src/mgmsrv/ConfigInfo.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2011-02-15 11:41:27 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2011-03-29 12:32:02 +0000
@@ -31,6 +31,7 @@
 #else
 #include "ConfigInfo.hpp"
 #include <mgmapi_config_parameters.h>
+#include <ndb_version.h>
 #endif /* NDB_MGMAPI */
 
 #define KEY_INTERNAL 0
@@ -933,7 +934,11 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_USED,
     false,
     ConfigInfo::CI_INT64,
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
     "20M",
+#else
+    "128M",
+#endif
     "0",
     "65536G" }, // 32k pages * 32-bit i value
   
@@ -981,7 +986,11 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_USED,
     0,
     ConfigInfo::CI_INT,
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
     "1500",
+#else
+    "5000",
+#endif
     "10",
     STR_VALUE(MAX_INT_RNIL) },
 
@@ -1041,7 +1050,11 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_USED,
     0,
     ConfigInfo::CI_INT,
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
     "4000",
+#else
+    "0",
+#endif
     "0",
     "256000" },
 
@@ -1254,7 +1267,11 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_USED,
     false,
     ConfigInfo::CI_INT,
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
     "3000",
+#else
+    "7500",
+#endif
     "10",
     STR_VALUE(MAX_INT_RNIL) },
 
@@ -1926,6 +1943,24 @@ const ConfigInfo::ParamInfo ConfigInfo::
     "true"                       /* Max */
   },
 
+  {
+    CFG_DB_PARALLEL_SCANS_PER_FRAG,
+    "MaxParallelScansPerFragment",
+    DB_TOKEN,
+    "Max parallel scans per fragment (tup or tux). If this limit is reached "
+    " scans will be serialized using a queue.",
+    ConfigInfo::CI_USED,
+    false,
+    ConfigInfo::CI_INT,
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+    "32",
+#else
+    "256",
+#endif
+    "1",                         /* Min */
+    STR_VALUE(MAX_INT_RNIL)      /* Max */
+  },
+
   /***************************************************************************
    * API
    ***************************************************************************/

=== modified file 'storage/ndb/src/ndbapi/ObjectMap.cpp'
--- a/storage/ndb/src/ndbapi/ObjectMap.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/ObjectMap.cpp	2011-03-29 07:38:11 +0000
@@ -47,9 +47,9 @@ int NdbObjectIdMap::expand(Uint32 incSiz
     m_map = tmp;
     
     for(Uint32 i = m_size; i < newSize; i++){
-      m_map[i].m_next = i + 1;
+      m_map[i].m_next = 2 * (i + 1) + 1;
     }
-    m_firstFree = m_size;
+    m_firstFree = (2 * m_size) + 1;
     m_map[newSize-1].m_next = InvalidId;
     m_size = newSize;
   }

=== modified file 'storage/ndb/src/ndbapi/ObjectMap.hpp'
--- a/storage/ndb/src/ndbapi/ObjectMap.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/ObjectMap.hpp	2011-03-29 10:13:42 +0000
@@ -47,7 +47,7 @@ private:
   Uint32 m_expandSize;
   Uint32 m_firstFree;
   union MapEntry {
-     Uint32 m_next;
+     UintPtr m_next;
      void * m_obj;
   } * m_map;
 
@@ -60,12 +60,14 @@ Uint32
 NdbObjectIdMap::map(void * object){
   
   //  lock();
+  assert((UintPtr(object) & 3) == 0);
   
   if(m_firstFree == InvalidId && expand(m_expandSize))
     return InvalidId;
   
-  Uint32 ff = m_firstFree;
-  m_firstFree = m_map[ff].m_next;
+  Uint32 ff = m_firstFree >> 1;
+  assert(UintPtr(m_map[ff].m_next) == Uint32(m_map[ff].m_next));
+  m_firstFree = Uint32(m_map[ff].m_next);
   m_map[ff].m_obj = object;
   
   //  unlock();
@@ -86,7 +88,7 @@ NdbObjectIdMap::unmap(Uint32 id, void *o
     void * obj = m_map[i].m_obj;
     if (object == obj) {
       m_map[i].m_next = m_firstFree;
-      m_firstFree = i;
+      m_firstFree = (2 * i) + 1;
     } else {
       g_eventLogger->error("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx",
                            id, (long) object, (long) obj);
@@ -109,7 +111,8 @@ NdbObjectIdMap::getObject(Uint32 id){
   // DBUG_PRINT("info",("NdbObjectIdMap::getObject(%u) obj=0x%x", id,  m_map[id>>2].m_obj));
   id >>= 2;
   if(id < m_size){
-    return m_map[id].m_obj;
+    if ((m_map[id].m_next & 3) == 0)
+      return m_map[id].m_obj;
   }
   return 0;
 }

=== modified file 'storage/ndb/test/ndbapi/testScan.cpp'
--- a/storage/ndb/test/ndbapi/testScan.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/ndbapi/testScan.cpp	2011-03-28 11:59:09 +0000
@@ -296,12 +296,25 @@ int runScanRead(NDBT_Context* ctx, NDBT_
   int records = ctx->getProperty("Rows", ctx->getNumRecords());
   int parallelism = ctx->getProperty("Parallelism", 240);
   int abort = ctx->getProperty("AbortProb", 5);
+  int tupscan = ctx->getProperty("TupScan", (Uint32)0);
 
   int i = 0;
   HugoTransactions hugoTrans(*ctx->getTab());
   while (i<loops && !ctx->isTestStopped()) {
     g_info << i << ": ";
-    if (hugoTrans.scanReadRecords(GETNDB(step), records, abort, parallelism) != 0){
+
+    int scan_flags = 0;
+    if (tupscan == 1)
+    {
+      scan_flags |= NdbScanOperation::SF_TupScan;
+      if (hugoTrans.scanReadRecords(GETNDB(step), records, abort, parallelism,
+                                    NdbOperation::LM_CommittedRead,
+                                    scan_flags) != 0)
+        return NDBT_FAILED;
+    }
+    else if (hugoTrans.scanReadRecords(GETNDB(step), records, abort, parallelism)
+             != 0)
+    {
       return NDBT_FAILED;
     }
     i++;
@@ -1568,6 +1581,13 @@ TESTCASE("ScanRead100", 
   STEPS(runScanRead, 100);
   FINALIZER(runClearTable);
 }
+TESTCASE("TupScanRead100",
+	 "Verify scan requirement: Scan with 100 simultaneous threads"){
+  TC_PROPERTY("TupScan", 1);
+  INITIALIZER(runLoadTable);
+  STEPS(runScanRead, 100);
+  FINALIZER(runClearTable);
+}
 TESTCASE("Scan-bug8262", 
 	 ""){
   TC_PROPERTY("Rows", 1);

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-02-01 08:36:25 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-03-28 11:59:09 +0000
@@ -395,6 +395,10 @@ max-time: 1800
 cmd: testScan
 args: -n ScanRead100 -l 100 T1 D1 D2 
 
+max-time: 1800
+cmd: testScan
+args: -n TupScanRead100 -l 100 T1 D1 D2 
+
 max-time: 600
 cmd: testScan
 args: -n ScanRead40 -l 100 T1 D1 D2 

=== modified file 'storage/ndb/tools/restore/consumer_restore.cpp'
--- a/storage/ndb/tools/restore/consumer_restore.cpp	2011-02-19 03:14:45 +0000
+++ b/storage/ndb/tools/restore/consumer_restore.cpp	2011-03-25 08:01:16 +0000
@@ -1233,12 +1233,24 @@ BackupRestore::update_apply_status(const
   Uint32 server_id= 0;
   Uint64 epoch= Uint64(metaData.getStopGCP());
   Uint32 version= metaData.getNdbVersion();
-  if (version >= NDBD_MICRO_GCP_63)
-    epoch<<= 32; // Only gci_hi is saved...
-  else if (version >= NDBD_MICRO_GCP_62 &&
-           getMinor(version) == 2)
+
+  /**
+   * Bug#XXX, stopGCP is not really stop GCP, but stopGCP - 1
+   */
+  epoch += 1;
+
+  if (version >= NDBD_MICRO_GCP_63 ||
+      (version >= NDBD_MICRO_GCP_62 && getMinor(version) == 2))
+  {
     epoch<<= 32; // Only gci_hi is saved...
 
+    /**
+     * Backup contains all epochs with those top bits,
+     * so we indicate that with max setting
+     */
+    epoch += (Uint64(1) << 32) - 1;
+  }
+
   Uint64 zero= 0;
   char empty_string[1];
   empty_string[0]= 0;

No bundle (reason: revision is a merge).
Thread
bzr commit into mysql-5.1-telco-7.2 branch (jonas:4141) jonas oreland30 Mar