List:Commits« Previous MessageNext Message »
From:Pekka Nousiainen Date:November 15 2010 9:23am
Subject:bzr commit into mysql-5.1-telco-7.0 branch (pekka:4001)
View as plain text  
#At file:///export/space/pekka/ms/ms-bug57032-70/ based on revid:magnus.blaudd@stripped

 4001 Pekka Nousiainen	2010-11-15 [merge]
      merge 6.3 to 7.0

    modified:
      mysql-test/suite/ndb/r/ndb_index_unique.result
      mysql-test/suite/ndb/t/ndb_index_unique.test
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_cond.cc
      sql/ha_ndbcluster_cond.h
=== modified file 'mysql-test/suite/ndb/r/ndb_index_unique.result'
--- a/mysql-test/suite/ndb/r/ndb_index_unique.result	2010-11-01 13:15:22 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_unique.result	2010-11-14 14:16:10 +0000
@@ -737,3 +737,155 @@ drop table t1;
 alter tablespace ts1 drop datafile 'datafile.dat' engine=ndb;
 drop tablespace ts1 engine=ndb;
 drop logfile group lg1 engine=ndb;
+# bug#57032
+create table t1 (
+a int not null,
+b int,
+primary key using hash (a),
+unique key using hash (b)
+)
+engine ndb;
+Warnings:
+Warning	1121	Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan
+insert into t1 values
+(0,0),(1,1),(2,2),(3,3),(4,4),
+(5,null),(6,null),(7,null),(8,null),(9,null);
+set @old_ecpd = @@session.engine_condition_pushdown;
+set engine_condition_pushdown = 0;
+select a from t1 where b is not null order by a;
+a
+0
+1
+2
+3
+4
+select a from t1 where b is null order by a;
+a
+5
+6
+7
+8
+9
+set engine_condition_pushdown = 1;
+select a from t1 where b is not null order by a;
+a
+0
+1
+2
+3
+4
+select a from t1 where b is null order by a;
+a
+5
+6
+7
+8
+9
+set engine_condition_pushdown = @old_ecpd;
+drop table t1;
+create table t1 (
+a int not null,
+b int,
+c int,
+primary key using hash (a),
+unique key using hash (b,c)
+)
+engine ndb;
+Warnings:
+Warning	1121	Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan
+insert into t1 values
+(0,0,0),(1,1,1),(2,2,1),(3,3,1),(4,4,2),
+(5,null,0),(6,null,1),(7,null,1),(8,null,1),(9,null,2),
+(10,0,null),(11,1,null),(12,1,null),(13,1,null),(14,2,null),
+(15,null,null),(16,null,null),(17,null,null),(18,null,null),(19,null,null);
+set @old_ecpd = @@session.engine_condition_pushdown;
+set engine_condition_pushdown = 0;
+select a from t1 where b is not null and c = 1 order by a;
+a
+1
+2
+3
+select a from t1 where b is null and c = 1 order by a;
+a
+6
+7
+8
+select a from t1 where b = 1 and c is null order by a;
+a
+11
+12
+13
+select a from t1 where b is null and c is null order by a;
+a
+15
+16
+17
+18
+19
+select a from t1 where b is not null and c is null order by a;
+a
+10
+11
+12
+13
+14
+select a from t1 where b is null and c is not null order by a;
+a
+5
+6
+7
+8
+9
+select a from t1 where b is not null and c is not null order by a;
+a
+0
+1
+2
+3
+4
+set engine_condition_pushdown = 1;
+select a from t1 where b is not null and c = 1 order by a;
+a
+1
+2
+3
+select a from t1 where b is null and c = 1 order by a;
+a
+6
+7
+8
+select a from t1 where b = 1 and c is null order by a;
+a
+11
+12
+13
+select a from t1 where b is null and c is null order by a;
+a
+15
+16
+17
+18
+19
+select a from t1 where b is not null and c is null order by a;
+a
+10
+11
+12
+13
+14
+select a from t1 where b is null and c is not null order by a;
+a
+5
+6
+7
+8
+9
+select a from t1 where b is not null and c is not null order by a;
+a
+0
+1
+2
+3
+4
+set engine_condition_pushdown = @old_ecpd;
+drop table t1;

=== modified file 'mysql-test/suite/ndb/t/ndb_index_unique.test'
--- a/mysql-test/suite/ndb/t/ndb_index_unique.test	2010-11-01 13:15:22 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_unique.test	2010-11-14 14:16:10 +0000
@@ -433,4 +433,85 @@ alter tablespace ts1 drop datafile 'data
 drop tablespace ts1 engine=ndb;
 drop logfile group lg1 engine=ndb;
 
-# end of tests
\ No newline at end of file
+# bug#57032 'NOT NULL' evaluation is incorrect when using an 'unique index
+
+--echo # bug#57032
+
+create table t1 (
+  a int not null,
+  b int,
+  primary key using hash (a),
+  unique key using hash (b)
+)
+engine ndb;
+
+insert into t1 values
+  (0,0),(1,1),(2,2),(3,3),(4,4),
+  (5,null),(6,null),(7,null),(8,null),(9,null);
+
+set @old_ecpd = @@session.engine_condition_pushdown;
+
+set engine_condition_pushdown = 0;
+# failed, empty result
+select a from t1 where b is not null order by a;
+# worked
+select a from t1 where b is null order by a;
+
+set engine_condition_pushdown = 1;
+# failed, empty result
+select a from t1 where b is not null order by a;
+# worked
+select a from t1 where b is null order by a;
+
+set engine_condition_pushdown = @old_ecpd;
+
+drop table t1;
+
+create table t1 (
+  a int not null,
+  b int,
+  c int,
+  primary key using hash (a),
+  unique key using hash (b,c)
+)
+engine ndb;
+
+insert into t1 values
+  (0,0,0),(1,1,1),(2,2,1),(3,3,1),(4,4,2),
+  (5,null,0),(6,null,1),(7,null,1),(8,null,1),(9,null,2),
+  (10,0,null),(11,1,null),(12,1,null),(13,1,null),(14,2,null),
+  (15,null,null),(16,null,null),(17,null,null),(18,null,null),(19,null,null);
+
+set @old_ecpd = @@session.engine_condition_pushdown;
+
+set engine_condition_pushdown = 0;
+# worked
+select a from t1 where b is not null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b is null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b = 1 and c is null order by a;
+# worked
+select a from t1 where b is null and c is null order by a;
+select a from t1 where b is not null and c is null order by a;
+select a from t1 where b is null and c is not null order by a;
+select a from t1 where b is not null and c is not null order by a;
+
+set engine_condition_pushdown = 1;
+# worked
+select a from t1 where b is not null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b is null and c = 1 order by a;
+# failed, empty result
+select a from t1 where b = 1 and c is null order by a;
+# worked
+select a from t1 where b is null and c is null order by a;
+select a from t1 where b is not null and c is null order by a;
+select a from t1 where b is null and c is not null order by a;
+select a from t1 where b is not null and c is not null order by a;
+
+set engine_condition_pushdown = @old_ecpd;
+
+drop table t1;
+
+# end of tests

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2010-11-10 15:37:21 +0000
+++ b/sql/ha_ndbcluster.cc	2010-11-15 09:23:10 +0000
@@ -3416,8 +3416,8 @@ guess_scan_flags(NdbOperation::LockMode
  */
 
 int ha_ndbcluster::full_table_scan(const KEY* key_info, 
-                                   const uchar *key, 
-                                   uint key_len,
+                                   const key_range *start_key,
+                                   const key_range *end_key,
                                    uchar *buf)
 {
   int error;
@@ -3506,7 +3506,7 @@ int ha_ndbcluster::full_table_scan(const
         my_errno= HA_ERR_OUT_OF_MEM;
         DBUG_RETURN(my_errno);
       }       
-      if (m_cond->generate_scan_filter_from_key(&code, &options, key_info, key, key_len, buf))
+      if (m_cond->generate_scan_filter_from_key(&code, &options, key_info, start_key, end_key, buf))
         ERR_RETURN(code.getNdbError());
     }
 
@@ -5413,8 +5413,8 @@ int ha_ndbcluster::read_range_first_to_b
     }
     else if (type == UNIQUE_INDEX)
       DBUG_RETURN(full_table_scan(key_info, 
-                                  start_key->key, 
-                                  start_key->length, 
+                                  start_key,
+                                  end_key,
                                   buf));
     break;
   default:
@@ -5521,7 +5521,7 @@ int ha_ndbcluster::rnd_next(uchar *buf)
   ha_statistic_increment(&SSV::ha_read_rnd_next_count);
 
   if (!m_active_cursor)
-    DBUG_RETURN(full_table_scan(NULL, NULL, 0, buf));
+    DBUG_RETURN(full_table_scan(NULL, NULL, NULL, buf));
   DBUG_RETURN(next_result(buf));
 }
 

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2010-11-08 13:59:33 +0000
+++ b/sql/ha_ndbcluster.h	2010-11-15 09:23:10 +0000
@@ -659,8 +659,8 @@ private:
   int unique_index_read(const uchar *key, uint key_len, 
                         uchar *buf);
   int full_table_scan(const KEY* key_info, 
-                      const uchar *key, 
-                      uint key_len,
+                      const key_range *start_key,
+                      const key_range *end_key,
                       uchar *buf);
   int flush_bulk_insert(bool allow_batch= FALSE);
   int ndb_write_row(uchar *record, bool primary_key_update,

=== modified file 'sql/ha_ndbcluster_cond.cc'
--- a/sql/ha_ndbcluster_cond.cc	2010-11-11 08:21:34 +0000
+++ b/sql/ha_ndbcluster_cond.cc	2010-11-15 09:23:10 +0000
@@ -1452,44 +1452,163 @@ ha_ndbcluster_cond::generate_scan_filter
 }
 
 
+/*
+  Optimizer sometimes does hash index lookup of a key where some
+  key parts are null.  The set of cases where this happens makes
+  no sense but cannot be ignored since optimizer may expect the result
+  to be filtered accordingly.  The scan is actually on the table and
+  the index bounds are pushed down.
+*/
 int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbInterpretedCode* code,
                                                       NdbScanOperation::ScanOptions* options,
                                                       const KEY* key_info, 
-                                                      const uchar *key, 
-                                                      uint key_len,
+                                                      const key_range *start_key,
+                                                      const key_range *end_key,
                                                       uchar *buf)
 {
-  KEY_PART_INFO* key_part= key_info->key_part;
-  KEY_PART_INFO* end= key_part+key_info->key_parts;
-  NdbScanFilter filter(code);
-  int res;
   DBUG_ENTER("generate_scan_filter_from_key");
 
+#ifndef DBUG_OFF
+  {
+    DBUG_PRINT("info", ("key parts:%u length:%u",
+                        key_info->key_parts, key_info->key_length));
+    const key_range* keylist[2]={ start_key, end_key };
+    for (uint j=0; j <= 1; j++)
+    {
+      char buf[8192];
+      const key_range* key=keylist[j];
+      if (key == 0)
+      {
+        sprintf(buf, "key range %u: none", j);
+      }
+      else
+      {
+        sprintf(buf, "key range %u: flag:%u part", j, key->flag);
+        const KEY_PART_INFO* key_part=key_info->key_part;
+        const uchar* ptr=key->key;
+        for (uint i=0; i < key_info->key_parts; i++)
+        {
+          sprintf(buf+strlen(buf), " %u:", i);
+          for (uint k=0; k < key_part->store_length; k++)
+          {
+            sprintf(buf+strlen(buf), " %02x", ptr[k]);
+          }
+          ptr+=key_part->store_length;
+          if (ptr - key->key >= key->length)
+          {
+            /*
+              key_range has no count of parts so must test byte length.
+              But this is not the place for following assert.
+            */
+            // DBUG_ASSERT(ptr - key->key == key->length);
+            break;
+          }
+          key_part++;
+        }
+      }
+      DBUG_PRINT("info", ("%s", buf));
+    }
+  }
+#endif
+
+  NdbScanFilter filter(code);
+  int res;
   filter.begin(NdbScanFilter::AND);
-  for (; key_part != end; key_part++) 
+  do
   {
-    Field* field= key_part->field;
-    uint32 pack_len= field->pack_length();
-    const uchar* ptr= key;
-    DBUG_PRINT("info", ("Filtering value for %s", field->field_name));
-    DBUG_DUMP("key", ptr, pack_len);
-    if (key_part->null_bit)
+    /*
+      Case "x is not null".
+      Seen with index(x) where it becomes range "null < x".
+      Not seen with index(x,y) for any combination of bounds
+      which include "is not null".
+    */
+    if (start_key != 0 &&
+        start_key->flag == HA_READ_AFTER_KEY &&
+        end_key == 0 &&
+        key_info->key_parts == 1)
     {
-      DBUG_PRINT("info", ("Generating ISNULL filter"));
-      if (filter.isnull(key_part->fieldnr-1) == -1)
-	DBUG_RETURN(1);
+      const KEY_PART_INFO* key_part=key_info->key_part;
+      if (key_part->null_bit != 0) // nullable (must be)
+      {
+        const Field* field=key_part->field;
+        const uchar* ptr= start_key->key;
+        if (ptr[0] != 0) // null (in "null < x")
+        {
+          DBUG_PRINT("info", ("Generating ISNOTNULL filter for nullable %s",
+                              field->field_name));
+          if (filter.isnotnull(key_part->fieldnr-1) == -1)
+            DBUG_RETURN(1);
+          break;
+        }
+      }
     }
-    else
+
+    /*
+      Case "x is null" in an EQ range.
+      Seen with index(x) for "x is null".
+      Seen with index(x,y) for "x is null and y = 1".
+      Not seen with index(x,y) for "x is null and y is null".
+      Seen only when all key parts are present (but there is
+      no reason to limit the code to this case).
+    */
+    if (start_key != 0 &&
+        start_key->flag == HA_READ_KEY_EXACT &&
+        end_key != 0 &&
+        end_key->flag == HA_READ_AFTER_KEY &&
+        start_key->length == end_key->length &&
+        memcmp(start_key->key, end_key->key, start_key->length) == 0)
     {
-      DBUG_PRINT("info", ("Generating EQ filter"));
-      if (filter.cmp(NdbScanFilter::COND_EQ, 
-		     key_part->fieldnr-1,
-		     ptr,
-		     pack_len) == -1)
-	DBUG_RETURN(1);
+      const KEY_PART_INFO* key_part=key_info->key_part;
+      const uchar* ptr=start_key->key;
+      for (uint i=0; i < key_info->key_parts; i++)
+      {
+        const Field* field=key_part->field;
+        if (key_part->null_bit) // nullable
+        {
+          if (ptr[0] != 0) // null
+          {
+            DBUG_PRINT("info", ("Generating ISNULL filter for nullable %s",
+                                field->field_name));
+            if (filter.isnull(key_part->fieldnr-1) == -1)
+              DBUG_RETURN(1);
+          }
+          else
+          {
+            DBUG_PRINT("info", ("Generating EQ filter for nullable %s",
+                                field->field_name));
+            if (filter.cmp(NdbScanFilter::COND_EQ, 
+                           key_part->fieldnr-1,
+                           ptr + 1, // skip null-indicator byte
+                           field->pack_length()) == -1)
+              DBUG_RETURN(1);
+          }
+        }
+        else
+        {
+          DBUG_PRINT("info", ("Generating EQ filter for non-nullable %s",
+                              field->field_name));
+          if (filter.cmp(NdbScanFilter::COND_EQ, 
+                         key_part->fieldnr-1,
+                         ptr,
+                         field->pack_length()) == -1)
+            DBUG_RETURN(1);
+        }
+        ptr+=key_part->store_length;
+        if (ptr - start_key->key >= start_key->length)
+        {
+          break;
+        }
+        key_part++;
+      }
+      break;
     }
-    key += key_part->store_length;
-  }      
+
+    DBUG_PRINT("info", ("Unknown hash index scan"));
+    // enable to catch new cases when optimizer changes
+    // DBUG_ASSERT(false);
+  }
+  while (0);
+
   // Add any pushed condition
   if (m_cond_stack &&
       (res= generate_scan_filter_from_cond(filter)))

=== modified file 'sql/ha_ndbcluster_cond.h'
--- a/sql/ha_ndbcluster_cond.h	2010-11-11 08:21:34 +0000
+++ b/sql/ha_ndbcluster_cond.h	2010-11-15 09:23:10 +0000
@@ -661,8 +661,8 @@ public:
   int generate_scan_filter_from_key(NdbInterpretedCode* code,
                                     NdbScanOperation::ScanOptions* options,
                                     const KEY* key_info, 
-                                    const uchar *key, 
-                                    uint key_len,
+                                    const key_range *start_key,
+                                    const key_range *end_key,
                                     uchar *buf);
 private:
   bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond,


Attachment: [text/bzr-bundle] bzr/pekka@mysql.com-20101115092310-yr9m4d3s4gvg7zdf.bundle
Thread
bzr commit into mysql-5.1-telco-7.0 branch (pekka:4001) Pekka Nousiainen15 Nov