List:Commits« Previous MessageNext Message »
From:Sergey Petrunia Date:December 22 2005 11:11am
Subject:bk commit into 5.1 tree (sergefp:1.1982)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of psergey. When psergey does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.1982 05/12/22 14:10:56 sergefp@stripped +9 -0
  Merge

  sql/handler.h
    1.174 05/12/22 14:10:51 sergefp@stripped +1 -1
    Merge

  sql/sql_yacc.yy
    1.433 05/12/22 14:09:35 sergefp@stripped +0 -0
    Auto merged

  sql/sql_select.cc
    1.378 05/12/22 14:09:34 sergefp@stripped +0 -0
    Auto merged

  sql/sql_partition.cc
    1.18 05/12/22 14:09:34 sergefp@stripped +0 -0
    Auto merged

  sql/sql_lex.h
    1.213 05/12/22 14:09:34 sergefp@stripped +0 -0
    Auto merged

  sql/sql_class.cc
    1.228 05/12/22 14:09:34 sergefp@stripped +0 -0
    Auto merged

  sql/item.h
    1.186 05/12/22 14:09:34 sergefp@stripped +0 -0
    Auto merged

  sql/ha_partition.cc
    1.16 05/12/22 14:09:34 sergefp@stripped +0 -0
    Auto merged

  sql/ha_ndbcluster.cc
    1.227 05/12/22 14:09:33 sergefp@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	sergefp
# Host:	newbox.mylan
# Root:	/home/psergey/mysql-5.1-ppruning-merge-r2-pull/RESYNC

--- 1.173/sql/handler.h	2005-12-22 07:10:57 +03:00
+++ 1.174/sql/handler.h	2005-12-22 14:10:51 +03:00
@@ -547,19 +547,52 @@
 
   List<char> part_field_list;
   List<char> subpart_field_list;
-
+  
+  /* 
+    If there is no subpartitioning, use only this func to get partition ids.
+    If there is subpartitioning, use the this func to get partition id when
+    you have both partition and subpartition fields.
+  */
   get_part_id_func get_partition_id;
+
+  /* Get partition id when we don't have subpartition fields */
   get_part_id_func get_part_partition_id;
-  get_subpart_id_func get_subpartition_id;
 
+  /* 
+    Get subpartition id when we have don't have partition fields by we do
+    have subpartition ids.
+    Mikael said that for given constant tuple 
+    {subpart_field1, ..., subpart_fieldN} the subpartition id will be the
+    same in all subpartitions
+  */
+  get_subpart_id_func get_subpartition_id;
+  
+  /* NULL-terminated list of fields used in partitioned expression */
   Field **part_field_array;
+  /* NULL-terminated list of fields used in subpartitioned expression */
   Field **subpart_field_array;
+
+  /* 
+    Array of all fields used in partition and subpartition expression,
+    without duplicates, NULL-terminated.
+  */
   Field **full_part_field_array;
 
   Item *part_expr;
   Item *subpart_expr;
 
   Item *item_free_list;
+  
+  /* 
+    A bitmap of partitions used by the current query. 
+    Usage pattern:
+    * It is guaranteed that all partitions are set to be unused on query start.
+    * Before index/rnd_init(), partition pruning code sets the bits for used
+      partitions.
+    * The handler->extra(HA_EXTRA_RESET) call at query end sets all partitions
+      to be unused.
+  */
+  MY_BITMAP used_partitions;
 
   union {
     longlong *range_int_array;
@@ -760,6 +793,13 @@
 bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
                             uint part_info_len, TABLE *table,
                             handlerton *default_db_type);
+void make_used_partitions_str(partition_info *part_info, String *parts_str);
+uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
+                                       bool left_endpoint,
+                                       bool include_endpoint);
+uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
+                                           bool left_endpoint,
+                                           bool include_endpoint);
 #endif
 
 

--- 1.185/sql/item.h	2005-12-21 16:07:47 +03:00
+++ 1.186/sql/item.h	2005-12-22 14:09:34 +03:00
@@ -368,6 +368,28 @@
   }
 };
 
+
+/*
+  This enum is used to report information about monotonicity of function
+  represented by Item* tree.
+  Monotonicity is defined only for Item* trees that represent table
+  partitioning expressions (i.e. have no subselects/user vars/PS parameters
+  etc etc). An Item* tree is assumed to have the same monotonicity properties
+  as its correspoinding function F:
+
+  [signed] longlong F(field1, field2, ...) {
+    put values of field_i into table record buffer;
+    return item->val_int(); 
+  }
+*/
+
+typedef enum monotonicity_info 
+{
+   NON_MONOTONIC,              /* none of the below holds */
+   MONOTONIC_INCREASING,       /* F() is unary and "x < y" => "F(x) <  F(y)" */
+   MONOTONIC_STRICT_INCREASING /* F() is unary and "x < y" => "F(x) <= F(y)" */
+} enum_monotonicity_info;
+
 /*************************************************************************/
 
 typedef bool (Item::*Item_processor)(byte *arg);
@@ -466,6 +488,15 @@
   virtual Item_result cast_to_int_type() const { return result_type(); }
   virtual enum_field_types field_type() const;
   virtual enum Type type() const =0;
+  
+  /*
+    Return information about function monotonicity. See comment for
+    enum_monotonicity_info for details. This function can only be called
+    after fix_fields() call.
+  */
+  virtual enum_monotonicity_info get_monotonicity_info() const
+  { return NON_MONOTONIC; }
+
   /* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */
   /*
     Return double precision floating point representation of item.
@@ -1139,6 +1170,10 @@
   enum_field_types field_type() const
   {
     return field->type();
+  }
+  enum_monotonicity_info get_monotonicity_info() const
+  {
+    return MONOTONIC_STRICT_INCREASING;
   }
   Field *get_tmp_table_field() { return result_field; }
   Field *tmp_table_field(TABLE *t_arg) { return result_field; }

--- 1.227/sql/sql_class.cc	2005-12-22 07:10:53 +03:00
+++ 1.228/sql/sql_class.cc	2005-12-22 14:09:34 +03:00
@@ -755,6 +755,13 @@
   field_list.push_back(new Item_empty_string("select_type", 19, cs));
   field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs));
   item->maybe_null= 1;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+  if (lex->describe & DESCRIBE_PARTITIONS)
+  {
+    field_list.push_back(item= new Item_empty_string("partitions", 10, cs));
+    item->maybe_null= 1;
+  }
+#endif
   field_list.push_back(item= new Item_empty_string("type", 10, cs));
   item->maybe_null= 1;
   field_list.push_back(item=new Item_empty_string("possible_keys",

--- 1.212/sql/sql_lex.h	2005-12-22 07:10:51 +03:00
+++ 1.213/sql/sql_lex.h	2005-12-22 14:09:34 +03:00
@@ -103,6 +103,11 @@
 // describe/explain types
 #define DESCRIBE_NORMAL		1
 #define DESCRIBE_EXTENDED	2
+/*
+  This is not #ifdef'ed because we want "EXPLAIN PARTITIONS ..." to produce
+  additional "partitions" column even if partitioning is not compiled in.
+*/
+#define DESCRIBE_PARTITIONS	4
 
 enum enum_sp_suid_behaviour
 {

--- 1.377/sql/sql_select.cc	2005-12-22 07:10:53 +03:00
+++ 1.378/sql/sql_select.cc	2005-12-22 14:09:34 +03:00
@@ -633,6 +633,21 @@
     DBUG_RETURN(0);
   }
 
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+  {
+    TABLE_LIST *tbl;
+    for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
+    {
+      if (!tbl->embedding)
+      {
+        Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
+        tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
+	                                                 prune_cond);
+      }
+    }
+  }
+#endif
+
   /* Optimize count(*), min() and max() */
   if (tables_list && tmp_table_param.sum_func_count && ! group_list)
   {
@@ -2018,7 +2033,11 @@
     if (*s->on_expr_ref)
     {
       /* s is the only inner table of an outer join */
-      if (!table->file->records && !embedding)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+      if ((!table->file->records || table->no_partitions_used) && !embedding)
+#else
+      if (!table->file->records || && !embedding)
+#endif
       {						// Empty table
         s->dependent= 0;                        // Ignore LEFT JOIN depend.
 	set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2045,8 +2064,14 @@
       while (embedding);
       continue;
     }
-
-    if ((table->s->system || table->file->records <= 1) && ! s->dependent &&
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+    bool no_partitions_used= table->no_partitions_used;
+#else
+    const bool no_partitions_used= FALSE;
+#endif
+    if ((table->s->system || table->file->records <= 1 ||
+         no_partitions_used) &&
+	!s->dependent &&
 	!(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
         !table->fulltext_searched)
     {
@@ -13768,6 +13793,9 @@
 					strlen(join->select_lex->type), cs));
     for (uint i=0 ; i < 7; i++)
       item_list.push_back(item_null);
+    if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+      item_list.push_back(item_null);
+  
     item_list.push_back(new Item_string(message,strlen(message),cs));
     if (result->send_data(item_list))
       join->error= 1;
@@ -13888,7 +13916,28 @@
 	item_list.push_back(new Item_string(table->alias,
 					    strlen(table->alias),
 					    cs));
-      /* type */
+      /* "partitions" column */
+      if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+      {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+        partition_info *part_info;
+        if (!table->derived_select_number && 
+            (part_info= table->part_info))
+        {          
+          char parts_buff[128]; 
+          String parts_str(parts_buff,sizeof(parts_buff),cs);
+          make_used_partitions_str(part_info, &parts_str);
+          item_list.push_back(new Item_string(parts_str.ptr(),
+                                              parts_str.length(), cs));
+        }
+        else
+          item_list.push_back(item_null);
+#else
+        /* just produce empty column if partitioning is not compiled in */
+        item_list.push_back(item_null); 
+#endif
+      }
+      /* "type" column */
       item_list.push_back(new Item_string(join_type_str[tab->type],
 					  strlen(join_type_str[tab->type]),
 					  cs));

--- 1.432/sql/sql_yacc.yy	2005-12-22 07:10:54 +03:00
+++ 1.433/sql/sql_yacc.yy	2005-12-22 14:09:35 +03:00
@@ -7416,7 +7416,9 @@
 opt_extended_describe:
 	/* empty */ {}
 	| EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+	| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
 	;
+
 
 opt_describe_column:
 	/* empty */	{}

--- 1.226/sql/ha_ndbcluster.cc	2005-12-21 21:24:56 +03:00
+++ 1.227/sql/ha_ndbcluster.cc	2005-12-22 14:09:33 +03:00
@@ -3121,7 +3121,6 @@
   DBUG_VOID_RETURN;
 }
 
-
 int ha_ndbcluster::extra(enum ha_extra_function operation)
 {
   DBUG_ENTER("extra");
@@ -3130,6 +3129,8 @@
     DBUG_PRINT("info", ("HA_EXTRA_RESET"));
     DBUG_PRINT("info", ("Clearing condition stack"));
     cond_clear();
+    if (m_part_info)
+      bitmap_clear_all(&m_part_info->used_partitions);
     break;
   case HA_EXTRA_IGNORE_DUP_KEY:       /* Dup keys don't rollback everything*/
     DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));

--- 1.15/sql/ha_partition.cc	2005-12-22 07:10:56 +03:00
+++ 1.16/sql/ha_partition.cc	2005-12-22 14:09:34 +03:00
@@ -2799,6 +2799,8 @@
   handler **file;
   DBUG_ENTER("ha_partition::reset");
   file= m_file;
+  if (m_part_info)
+    bitmap_clear_all(&m_part_info->used_partitions);
   do
   {
     if ((tmp= (*file)->reset()))

--- 1.17/sql/sql_partition.cc	2005-12-21 21:24:58 +03:00
+++ 1.18/sql/sql_partition.cc	2005-12-22 14:09:34 +03:00
@@ -2478,16 +2478,94 @@
     if (list_value < part_func_value)
       min_list_index= list_index + 1;
     else if (list_value > part_func_value)
+    {
+      if (!list_index)
+        goto notfound;
       max_list_index= list_index - 1;
-    else {
+    }
+    else
+    {
       *part_id= (uint32)list_array[list_index].partition_id;
       DBUG_RETURN(FALSE);
     }
   }
+notfound:
   *part_id= 0;
   DBUG_RETURN(TRUE);
 }
 
+/*
+  Find the part of part_info->list_array that corresponds to given interval
+  
+  SYNOPSIS 
+    get_list_array_idx_for_endpoint()
+      part_info         Partitioning info (partitioning type must be LIST)
+      left_endpoint     TRUE  - the interval is [a; +inf) or (a; +inf)
+                        FALSE - the interval is (-inf; a] or (-inf; a)
+      include_endpoint  TRUE iff the interval includes the endpoint
+
+  DESCRIPTION
+    This function finds the part of part_info->list_array where values of
+    list_array[idx].list_value are contained within the specifed interval.
+    list_array is ordered by list_value, so
+    1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the 
+       sought array part starts at some index idx and continues till array 
+       end.
+       The function returns first number idx, such that 
+       list_array[idx].list_value is contained within the passed interval.
+       
+    2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
+       sought array part starts at array start and continues till some last 
+       index idx.
+       The function returns first number idx, such that 
+       list_array[idx].list_value is NOT contained within the passed interval.
+       If all array elements are contained, part_info->no_list_values is
+       returned.
+
+  NOTE
+    The caller will call this function and then will run along the part of 
+    list_array to collect partition ids. If the number of list values is 
+    significantly higher then number of partitions, this could be slow and
+    we could invent some other approach. The "run over list array" part is
+    already wrapped in a get_next()-like function.
+
+  RETURN
+    The edge of corresponding part_info->list_array part.
+*/
+
+uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
+                                       bool left_endpoint,
+                                       bool include_endpoint)
+{
+  DBUG_ENTER("get_list_array_idx_for_endpoint");
+  LIST_PART_ENTRY *list_array= part_info->list_array;
+  uint list_index;
+  longlong list_value;
+  uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+  longlong part_func_value= part_info->part_expr->val_int();
+  while (max_list_index >= min_list_index)
+  {
+    list_index= (max_list_index + min_list_index) >> 1;
+    list_value= list_array[list_index].list_value;
+    if (list_value < part_func_value)
+      min_list_index= list_index + 1;
+    else if (list_value > part_func_value)
+    {
+      if (!list_index)
+        goto notfound;
+      max_list_index= list_index - 1;
+    }
+    else 
+    {
+      DBUG_RETURN(list_index + test(left_endpoint ^ include_endpoint));
+    }
+  }
+notfound:
+  if (list_value < part_func_value)
+    list_index++;
+  DBUG_RETURN(list_index);
+}
+
 
 bool get_partition_id_range(partition_info *part_info,
                             uint32 *part_id)
@@ -2517,6 +2595,89 @@
   DBUG_RETURN(FALSE);
 }
 
+
+/*
+  Find the part of part_info->range_int_array that covers the given interval
+  
+  SYNOPSIS 
+    get_partition_id_range_for_endpoint()
+      part_info         Partitioning info (partitioning type must be RANGE)
+      left_endpoint     TRUE  - the interval is [a; +inf) or (a; +inf)
+                        FALSE - the interval is (-inf; a] or (-inf; a).
+      include_endpoint  TRUE <=> the endpoint itself is included in the
+                        interval
+
+  DESCRIPTION
+    This function finds the part of part_info->range_int_array where the
+    elements have non-empty intersections with the given interval.
+    
+    A range_int_array element at index idx represents the interval
+      
+      [range_int_array[idx-1], range_int_array[idx]),
+
+    intervals are disjoint and ordered by their right bound, so
+    
+    1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the
+       sought array part starts at some index idx and continues till array 
+       end.
+       The function returns first number idx, such that the interval
+       represented by range_int_array[idx] has non empty intersection with 
+       the passed interval.
+       
+    2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
+       sought array part starts at array start and continues till some last 
+       index idx.
+       The function returns first number idx, such that the interval
+       represented by range_int_array[idx] has EMPTY intersection with the
+       passed interval.
+       If the interval represented by the last array element has non-empty 
+       intersection with the passed interval, part_info->no_parts is
+       returned.
+       
+  RETURN
+    The edge of corresponding part_info->range_int_array part.
+*/
+
+uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
+                                           bool left_endpoint,
+                                           bool include_endpoint)
+{
+  DBUG_ENTER("get_partition_id_range_for_endpoint");
+  longlong *range_array= part_info->range_int_array;
+  uint max_partition= part_info->no_parts - 1;
+  uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+  longlong part_func_value= part_info->part_expr->val_int();
+  while (max_part_id > min_part_id)
+  {
+    loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+    if (range_array[loc_part_id] < part_func_value)
+      min_part_id= loc_part_id + 1;
+    else
+      max_part_id= loc_part_id - 1;
+  }
+  loc_part_id= max_part_id;
+  if (loc_part_id < max_partition && 
+      part_func_value >= range_array[loc_part_id+1])
+  {
+     loc_part_id++;
+  }
+  if (left_endpoint)
+  {
+    if (part_func_value >= range_array[loc_part_id])
+      loc_part_id++;
+  }
+  else 
+  {
+    if (part_func_value == range_array[loc_part_id])
+      loc_part_id += test(include_endpoint);
+    else if (part_func_value > range_array[loc_part_id])
+      loc_part_id++;
+    loc_part_id++;
+  }
+  DBUG_RETURN(loc_part_id);
+}
+
+
 bool get_partition_id_hash_nosub(partition_info *part_info,
                                  uint32 *part_id)
 {
@@ -3205,10 +3366,16 @@
   */
     uint part_func_len= part_info->part_func_len;
     uint subpart_func_len= part_info->subpart_func_len; 
+    uint bitmap_bits= part_info->no_subparts? 
+                       (part_info->no_subparts* part_info->no_parts):
+                        part_info->no_parts;
+    uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
+    uint32 *bitmap_buf;
     char *part_func_string, *subpart_func_string= NULL;
     if (!((part_func_string= thd->alloc(part_func_len))) ||
         (subpart_func_len &&
-        !((subpart_func_string= thd->alloc(subpart_func_len)))))
+        !((subpart_func_string= thd->alloc(subpart_func_len)))) ||
+        !((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))))
     {
       my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
       free_items(thd->free_list);
@@ -3221,6 +3388,8 @@
              subpart_func_len);
     part_info->part_func_string= part_func_string;
     part_info->subpart_func_string= subpart_func_string;
+
+    bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
   }
 
   result= FALSE;
@@ -3294,3 +3463,60 @@
   } while (++i < key_parts);
   DBUG_VOID_RETURN;
 }
+
+
+/*
+  Fill the string comma-separated line of used partitions names
+  SYNOPSIS
+    make_used_partitions_str()
+      part_info  IN  Partitioning info
+      parts_str  OUT The string to fill
+*/
+
+void make_used_partitions_str(partition_info *part_info, String *parts_str)
+{
+  parts_str->length(0);
+  partition_element *pe;
+  uint partition_id= 0;
+  List_iterator<partition_element> it(part_info->partitions);
+  
+  if (part_info->subpart_type != NOT_A_PARTITION)
+  {
+    partition_element *head_pe;
+    while ((head_pe= it++))
+    {
+      List_iterator<partition_element> it2(head_pe->subpartitions);
+      while ((pe= it2++))
+      {
+        if (bitmap_is_set(&part_info->used_partitions, partition_id))
+        {
+          if (parts_str->length())
+            parts_str->append(',');
+          parts_str->append(head_pe->partition_name,
+                           strlen(head_pe->partition_name),
+                           system_charset_info);
+          parts_str->append('_');
+          parts_str->append(pe->partition_name,
+                           strlen(pe->partition_name),
+                           system_charset_info);
+        }
+        partition_id++;
+      }
+    }
+  }
+  else
+  {
+    while ((pe= it++))
+    {
+      if (bitmap_is_set(&part_info->used_partitions, partition_id))
+      {
+        if (parts_str->length())
+          parts_str->append(',');
+        parts_str->append(pe->partition_name, strlen(pe->partition_name),
+                         system_charset_info);
+      }
+      partition_id++;
+    }
+  }
+}
+
Thread
bk commit into 5.1 tree (sergefp:1.1982)Sergey Petrunia22 Dec