List:Commits« Previous MessageNext Message »
From:timour Date:December 22 2005 9:46am
Subject:bk commit into 5.1 tree (timour:1.1956)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of timka. When timka does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.1956 05/12/22 11:46:05 timour@stripped[timka] +13 -0
  ms02.01.diff

  sql/sql_yacc.yy
    1.430 05/12/22 11:28:50 timour@stripped[timka] +2 -0
    Import patch ms02.01.diff

  sql/sql_select.cc
    1.375 05/12/22 11:28:50 timour@stripped[timka] +53 -4
    Import patch ms02.01.diff

  sql/sql_partition.cc
    1.16 05/12/22 11:28:50 timour@stripped[timka] +228 -2
    Import patch ms02.01.diff

  sql/sql_lex.h
    1.210 05/12/22 11:28:50 timour@stripped[timka] +5 -0
    Import patch ms02.01.diff

  sql/sql_class.cc
    1.227 05/12/22 11:28:50 timour@stripped[timka] +7 -0
    Import patch ms02.01.diff

  sql/opt_range.h
    1.58 05/12/22 11:28:50 timour@stripped[timka] +7 -2
    Import patch ms02.01.diff

  sql/opt_range.cc
    1.188 05/12/22 11:28:50 timour@stripped[timka] +926 -36
    Import patch ms02.01.diff

  sql/item_timefunc.h
    1.65 05/12/22 11:28:50 timour@stripped[timka] +2 -0
    Import patch ms02.01.diff

  sql/item_timefunc.cc
    1.102 05/12/22 11:28:50 timour@stripped[timka] +21 -0
    Import patch ms02.01.diff

  sql/item.h
    1.188 05/12/22 11:28:50 timour@stripped[timka] +35 -0
    Import patch ms02.01.diff

  sql/handler.h
    1.170 05/12/22 11:28:50 timour@stripped[timka] +42 -2
    Import patch ms02.01.diff

  sql/ha_partition.cc
    1.13 05/12/22 11:28:50 timour@stripped[timka] +2 -0
    Import patch ms02.01.diff

  sql/ha_ndbcluster.cc
    1.221 05/12/22 11:28:50 timour@stripped[timka] +2 -1
    Import patch ms02.01.diff

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	timour
# Host:	lamia.home
# Root:	/home/timka/mysql/src/5.1-2985

--- 1.169/sql/handler.h	2005-11-28 21:07:10 +02:00
+++ 1.170/sql/handler.h	2005-12-22 11:28:50 +02:00
@@ -534,19 +534,52 @@
 
   List<char> part_field_list;
   List<char> subpart_field_list;
-
+  
+  /* 
+    If there is no subpartitioning, use only this func to get partition ids.
+    If there is subpartitioning, use the this func to get partition id when
+    you have both partition and subpartition fields.
+  */
   get_part_id_func get_partition_id;
+
+  /* Get partition id when we don't have subpartition fields */
   get_part_id_func get_part_partition_id;
-  get_subpart_id_func get_subpartition_id;
 
+  /* 
+    Get subpartition id when we have don't have partition fields by we do
+    have subpartition ids.
+    Mikael said that for given constant tuple 
+    {subpart_field1, ..., subpart_fieldN} the subpartition id will be the
+    same in all subpartitions
+  */
+  get_subpart_id_func get_subpartition_id;
+  
+  /* NULL-terminated list of fields used in partitioned expression */
   Field **part_field_array;
+  /* NULL-terminated list of fields used in subpartitioned expression */
   Field **subpart_field_array;
+
+  /* 
+    Array of all fields used in partition and subpartition expression,
+    without duplicates, NULL-terminated.
+  */
   Field **full_part_field_array;
 
   Item *part_expr;
   Item *subpart_expr;
 
   Item *item_free_list;
+  
+  /* 
+    A bitmap of partitions used by the current query. 
+    Usage pattern:
+    * It is guaranteed that all partitions are set to be unused on query start.
+    * Before index/rnd_init(), partition pruning code sets the bits for used
+      partitions.
+    * The handler->extra(HA_EXTRA_RESET) call at query end sets all partitions
+      to be unused.
+  */
+  MY_BITMAP used_partitions;
 
   union {
     longlong *range_int_array;
@@ -744,6 +777,13 @@
 bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
                             uint part_info_len, TABLE *table,
                             enum db_type default_db_type);
+void make_used_partitions_str(partition_info *part_info, String *parts_str);
+uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
+                                       bool left_endpoint,
+                                       bool include_endpoint);
+uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
+                                           bool left_endpoint,
+                                           bool include_endpoint);
 #endif
 
 

--- 1.187/sql/item.h	2005-12-09 13:39:39 +02:00
+++ 1.188/sql/item.h	2005-12-22 11:28:50 +02:00
@@ -368,6 +368,28 @@
   }
 };
 
+
+/*
+  This enum is used to report information about monotonicity of function
+  represented by Item* tree.
+  Monotonicity is defined only for Item* trees that represent table
+  partitioning expressions (i.e. have no subselects/user vars/PS parameters
+  etc etc). An Item* tree is assumed to have the same monotonicity properties
+  as its correspoinding function F:
+
+  [signed] longlong F(field1, field2, ...) {
+    put values of field_i into table record buffer;
+    return item->val_int(); 
+  }
+*/
+
+typedef enum monotonicity_info 
+{
+   NON_MONOTONIC,              /* none of the below holds */
+   MONOTONIC_INCREASING,       /* F() is unary and "x < y" => "F(x) <  F(y)" */
+   MONOTONIC_STRICT_INCREASING /* F() is unary and "x < y" => "F(x) <= F(y)" */
+} enum_monotonicity_info;
+
 /*************************************************************************/
 
 typedef bool (Item::*Item_processor)(byte *arg);
@@ -465,6 +487,15 @@
   virtual Item_result cast_to_int_type() const { return result_type(); }
   virtual enum_field_types field_type() const;
   virtual enum Type type() const =0;
+  
+  /*
+    Return information about function monotonicity. See comment for
+    enum_monotonicity_info for details. This function can only be called
+    after fix_fields() call.
+  */
+  virtual enum_monotonicity_info get_monotonicity_info() const
+  { return NON_MONOTONIC; }
+
   /* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */
   /*
     Return double precision floating point representation of item.
@@ -1137,6 +1168,10 @@
   enum_field_types field_type() const
   {
     return field->type();
+  }
+  enum_monotonicity_info get_monotonicity_info() const
+  {
+    return MONOTONIC_STRICT_INCREASING;
   }
   Field *get_tmp_table_field() { return result_field; }
   Field *tmp_table_field(TABLE *t_arg) { return result_field; }

--- 1.101/sql/item_timefunc.cc	2005-11-23 22:47:27 +02:00
+++ 1.102/sql/item_timefunc.cc	2005-12-22 11:28:50 +02:00
@@ -885,6 +885,19 @@
   return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
 }
 
+enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
+{
+  if (args[0]->type() == Item::FIELD_ITEM)
+  {
+    if (args[0]->field_type() == MYSQL_TYPE_DATE)
+      return MONOTONIC_STRICT_INCREASING;
+    if (args[0]->field_type() == MYSQL_TYPE_DATETIME)
+      return MONOTONIC_INCREASING;
+  }
+  return NON_MONOTONIC;
+}
+
+
 longlong Item_func_dayofyear::val_int()
 {
   DBUG_ASSERT(fixed == 1);
@@ -1067,6 +1080,14 @@
   return (longlong) ltime.year;
 }
 
+enum_monotonicity_info Item_func_year::get_monotonicity_info() const
+{
+  if (args[0]->type() == Item::FIELD_ITEM &&
+      (args[0]->field_type() == MYSQL_TYPE_DATE ||
+       args[0]->field_type() == MYSQL_TYPE_DATETIME))
+    return MONOTONIC_INCREASING;
+  return NON_MONOTONIC;
+}
 
 longlong Item_func_unix_timestamp::val_int()
 {

--- 1.64/sql/item_timefunc.h	2005-11-23 22:44:56 +02:00
+++ 1.65/sql/item_timefunc.h	2005-12-22 11:28:50 +02:00
@@ -65,6 +65,7 @@
     max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
     maybe_null=1; 
   }
+  enum_monotonicity_info get_monotonicity_info() const;
 };
 
 
@@ -234,6 +235,7 @@
   Item_func_year(Item *a) :Item_int_func(a) {}
   longlong val_int();
   const char *func_name() const { return "year"; }
+  enum_monotonicity_info get_monotonicity_info() const;
   void fix_length_and_dec()
   { 
     decimals=0;

--- 1.187/sql/opt_range.cc	2005-12-06 09:29:02 +02:00
+++ 1.188/sql/opt_range.cc	2005-12-22 11:28:50 +02:00
@@ -286,6 +286,13 @@
     return parent->left == this ? &parent->left : &parent->right;
   }
   SEL_ARG *clone_tree();
+
+  /* Return TRUE if this represents "keypartK = const" or "keypartK IS NULL" */
+  bool is_singlepoint()
+  {
+    return !min_flag && !max_flag && 
+           !field->key_cmp((byte*) min_value, (byte*)max_value);
+  }
 };
 
 class SEL_IMERGE;
@@ -319,25 +326,51 @@
   /* Note that #records for each key scan is stored in table->quick_rows */
 };
 
+class RANGE_OPT_PARAM
+{
+public:
+  THD	*thd;   /* Current thread handle */
+  TABLE *table; /* Table being analyzed */
+  COND *cond;   /* Used inside get_mm_tree(). */
+  table_map prev_tables;
+  table_map read_tables;
+  table_map current_table; /* Bit of the table being analyzed */
+
+  /* Array of parts of all keys for which range analysis is performed */
+  KEY_PART *key_parts;
+  KEY_PART *key_parts_end;
+  MEM_ROOT *mem_root; /* Memory that will be freed when range analysis completes */
+  MEM_ROOT *old_root; /* Memory that will last until the query end */
+  /*
+    Number of indexes used in range analysis (In SEL_TREE::keys only first
+    #keys elements are not empty)
+  */
+  uint keys;
+  
+  /* 
+    If true, the index descriptions describe real indexes (and it is ok to
+    call field->optimize_range(real_keynr[...], ...).
+    Otherwise index description describes fake indexes.
+  */
+  bool using_real_indexes;
+  
+  /*
+    used_key_no -> table_key_no translation table. Only makes sense if
+    using_real_indexes==TRUE
+  */
+  uint real_keynr[MAX_KEY];
+};
 
-typedef struct st_qsel_param {
-  THD	*thd;
-  TABLE *table;
-  KEY_PART *key_parts,*key_parts_end;
+class PARAM : public RANGE_OPT_PARAM
+{
+public:
   KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
-  MEM_ROOT *mem_root, *old_root;
-  table_map prev_tables,read_tables,current_table;
   uint baseflag, max_key_part, range_count;
 
-  uint keys; /* number of keys used in the query */
-
-  /* used_key_no -> table_key_no translation table */
-  uint real_keynr[MAX_KEY];
 
   char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
     max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
   bool quick;				// Don't calulate possible keys
-  COND *cond;
 
   uint fields_bitmap_size;
   MY_BITMAP needed_fields;    /* bitmask of fields needed by the query */
@@ -349,7 +382,7 @@
 
  /* TRUE if last checked tree->key can be used for ROR-scan */
   bool is_ror_scan;
-} PARAM;
+};
 
 class TABLE_READ_PLAN;
   class TRP_RANGE;
@@ -360,13 +393,13 @@
 
 struct st_ror_scan_info;
 
-static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field,
+static SEL_TREE * get_mm_parts(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
 			       Item_func::Functype type,Item *value,
 			       Item_result cmp_type);
-static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field,
+static SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
 			    KEY_PART *key_part,
 			    Item_func::Functype type,Item *value);
-static SEL_TREE *get_mm_tree(PARAM *param,COND *cond);
+static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond);
 
 static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
 static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree);
@@ -409,8 +442,8 @@
 static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg);
 #endif
 
-static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
-static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
+static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
+static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
 static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2);
 static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2);
 static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag);
@@ -423,7 +456,7 @@
 static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE);
 static bool null_part_in_key(KEY_PART *key_part, const char *key,
                              uint length);
-bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param);
+bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, RANGE_OPT_PARAM* param);
 
 
 /*
@@ -455,9 +488,9 @@
     trees_next(trees),
     trees_end(trees + PREALLOCED_TREES)
   {}
-  int or_sel_tree(PARAM *param, SEL_TREE *tree);
-  int or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree);
-  int or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge);
+  int or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree);
+  int or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree);
+  int or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge);
 };
 
 
@@ -473,7 +506,7 @@
     -1 - Out of memory.
 */
 
-int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree)
+int SEL_IMERGE::or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree)
 {
   if (trees_next == trees_end)
   {
@@ -524,7 +557,7 @@
    -1  An error occurred.
 */
 
-int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree)
+int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree)
 {
   for (SEL_TREE** tree = trees;
        tree != trees_next;
@@ -558,7 +591,7 @@
    -1 - An error occurred
 */
 
-int SEL_IMERGE::or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge)
+int SEL_IMERGE::or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge)
 {
   for (SEL_TREE** tree= imerge->trees;
        tree != imerge->trees_next;
@@ -604,7 +637,7 @@
     other Error, both passed lists are unusable
 */
 
-int imerge_list_or_list(PARAM *param,
+int imerge_list_or_list(RANGE_OPT_PARAM *param,
                         List<SEL_IMERGE> *im1,
                         List<SEL_IMERGE> *im2)
 {
@@ -624,7 +657,7 @@
     other Error
 */
 
-int imerge_list_or_tree(PARAM *param,
+int imerge_list_or_tree(RANGE_OPT_PARAM *param,
                         List<SEL_IMERGE> *im1,
                         SEL_TREE *tree)
 {
@@ -1776,6 +1809,7 @@
     param.old_root= thd->mem_root;
     param.needed_reg= &needed_reg;
     param.imerge_cost_buff_size= 0;
+    param.using_real_indexes= TRUE;
 
     thd->no_errors=1;				// Don't warn about NULL
     init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
@@ -1966,6 +2000,859 @@
   DBUG_RETURN(records ? test(quick) : -1);
 }
 
+/****************************************************************************
+ * Partition pruning starts
+ ****************************************************************************/
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+
+struct st_part_prune_param;
+struct st_part_opt_info;
+
+typedef void (*mark_full_part_func)(partition_info*, uint32);
+typedef uint32 (*part_num_to_partition_id_func)(struct st_part_prune_param*,
+                                                uint32);
+typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
+                                    bool include_endpoint);
+
+/*
+  Partition pruning operation context
+*/
+typedef struct st_part_prune_param
+{
+  RANGE_OPT_PARAM range_param; /* Range optimizer parameters */
+  
+  /***************************************************************
+   Following fields are filled in based solely on partitioning 
+   definition and not modified after that:
+   **************************************************************/
+  partition_info *part_info; /* Copy of table->part_info */
+  /* Function to get partition id from partitioning fields only */
+  get_part_id_func get_top_partition_id_func;
+  /* Function to mark a partition as used (w/all subpartitions if they exist)*/
+  mark_full_part_func mark_full_partition_used;
+ 
+  /* Partitioning 'index' description, array of key parts */
+  KEY_PART *key;
+  
+  /*
+    Number of fields in partitioning 'index' definition created for
+    partitioning (0 if partitioning 'index' doesn't include partitioning
+    fields)
+  */
+  uint part_fields;
+  uint subpart_fields; /* Same as above for subpartitioning */
+  
+  /* 
+    Number of the last partitioning field keypart in the index, or -1 if
+    partitioning index definition doesn't include partitioning fields.
+  */
+  int last_part_partno;
+  int last_subpart_partno; /* Same as above for supartitioning */
+
+  /*
+    Function to be used to analyze non-singlepoint intervals (Can be pointer
+    to one of two functions - for RANGE and for LIST types).  NULL means 
+    partitioning type and/or expression doesn't allow non-singlepoint interval
+    analysis.
+    See get_list_array_idx_for_endpoint (or get_range_...) for description of
+    what the function does.
+  */
+  get_endpoint_func   get_endpoint;
+
+  /* Maximum possible value that can be returned by get_endpoint function */
+  uint32  max_endpoint_val;
+
+  /* 
+    For RANGE partitioning, part_num_to_part_id_range, for LIST partitioning,
+    part_num_to_part_id_list. Just to avoid the if-else clutter.
+  */
+  part_num_to_partition_id_func endpoints_walk_func;
+
+  /*
+    If true, process "key < const" as "part_func(key) < part_func(const)",
+    otherwise as "part_func(key) <= part_func(const)". Same for '>' and '>='.
+    This is defined iff get_endpoint != NULL.
+  */
+  bool force_include_bounds;
+ 
+  /*
+    is_part_keypart[i] == test(keypart #i in partitioning index is a member
+                               used in partitioning)
+    Used to maintain current values of cur_part_fields and cur_subpart_fields
+  */
+  my_bool *is_part_keypart;
+  /* Same as above for subpartitioning */
+  my_bool *is_subpart_keypart;
+
+  /***************************************************************
+   Following fields form find_used_partitions() recursion context:
+   **************************************************************/
+  SEL_ARG **arg_stack;     /* "Stack" of SEL_ARGs */
+  SEL_ARG **arg_stack_end; /* Top of the stack    */
+  /* Number of partitioning fields for which we have a SEL_ARG* in arg_stack */
+  uint cur_part_fields;
+  /* Same as cur_part_fields, but for subpartitioning */
+  uint cur_subpart_fields;
+ 
+  /***************************************************************
+   Following fields are used to store an 'iterator' that can be 
+   used to obtain a set of used of used partitions.
+   **************************************************************/
+  /* 
+    Start number, end number and 
+  */
+  part_num_to_partition_id_func part_num_to_part_id;
+  uint32 start_part_num;
+  uint32 end_part_num;
+} PART_PRUNE_PARAM;
+
+static bool create_partition_index_descrition(PART_PRUNE_PARAM *prune_par);
+static int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree);
+static void mark_all_partitions_as_used(partition_info *part_info);
+static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par, 
+                                        uint32 num);
+
+#ifndef DBUG_OFF
+static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end);
+static void dbug_print_field(Field *field);
+static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part);
+static void dbug_print_onepoint_range(SEL_ARG **start, uint num);
+#endif
+
+
+/*
+  Perform partition pruning for a given table and condition.
+
+  SYNOPSIS
+    prune_partitions()
+      thd           Thread handle
+      table         Table to perform partition pruning for
+      pprune_cond   Condition to use for partition pruning
+  
+  DESCRIPTION
+    This function assumes that all partitions are marked as unused when it
+    is invoked. The function analyzes the condition, finds partitions that
+    need to be used to retrieve the records that match the condition, and 
+    marks them as used by setting appropriate bit in part_info->used_partitions
+    In the worst case all partitions are marked as used.
+
+  NOTE
+    This function returns promptly if called for non-partitioned table.
+
+  RETURN
+    TRUE   We've inferred that no partitions need to be used (i.e. no table
+           records will satisfy pprune_cond)
+    FALSE  Otherwise
+*/
+
+bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
+{
+  bool retval= FALSE;
+  partition_info *part_info = table->part_info;
+  DBUG_ENTER("prune_partitions");
+
+  if (!part_info)
+    DBUG_RETURN(FALSE); /* not a partitioned table */
+  
+  if (!pprune_cond)
+  {
+    mark_all_partitions_as_used(part_info);
+    DBUG_RETURN(FALSE);
+  }
+  
+  PART_PRUNE_PARAM prune_param;
+  MEM_ROOT alloc;
+  RANGE_OPT_PARAM  *range_par= &prune_param.range_param;
+
+  prune_param.part_info= part_info;
+
+  init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+  range_par->mem_root= &alloc;
+  range_par->old_root= thd->mem_root;
+
+  if (create_partition_index_descrition(&prune_param))
+  {
+    mark_all_partitions_as_used(part_info);
+    free_root(&alloc,MYF(0));		// Return memory & allocator
+    DBUG_RETURN(FALSE);
+  }
+  
+  range_par->thd= thd;
+  range_par->table= table;
+  /* range_par->cond doesn't need initialization */
+  range_par->prev_tables= range_par->read_tables= 0;
+  range_par->current_table= table->map;
+
+  range_par->keys= 1; // one index
+  range_par->using_real_indexes= FALSE;
+  range_par->real_keynr[0]= 0;
+
+  thd->no_errors=1;				// Don't warn about NULL
+  thd->mem_root=&alloc;
+  
+  prune_param.key= prune_param.range_param.key_parts;
+  SEL_TREE *tree;
+  SEL_ARG *arg;
+  int res;
+
+  tree= get_mm_tree(range_par, pprune_cond);
+  if (!tree || (tree->type != SEL_TREE::KEY &&
+                tree->type != SEL_TREE::KEY_SMALLER))
+    goto all_used;
+
+  if (tree->type == SEL_TREE::IMPOSSIBLE)
+  {
+    retval= TRUE;
+    goto end;
+  }
+   
+  if (tree->merges.is_empty())
+  {
+    prune_param.arg_stack_end= prune_param.arg_stack;
+    prune_param.cur_part_fields= 0;
+    prune_param.cur_subpart_fields= 0;
+    prune_param.part_num_to_part_id= part_num_to_part_id_range;
+    prune_param.start_part_num= 0;
+    prune_param.end_part_num= prune_param.part_info->no_parts;
+    if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param,
+                                                            tree->keys[0]))))
+      goto all_used;
+  }
+  else
+  {
+    res= 0;
+    DBUG_ASSERT(tree->merges.elements == 1);
+    SEL_IMERGE *imerge= tree->merges.head();
+    for (SEL_TREE **ptree= imerge->trees; ptree < imerge->trees_next; ptree++)
+    {
+      prune_param.arg_stack_end= prune_param.arg_stack;
+      prune_param.cur_part_fields= 0;
+      prune_param.cur_subpart_fields= 0;
+      prune_param.part_num_to_part_id= part_num_to_part_id_range;
+      prune_param.start_part_num= 0;
+      prune_param.end_part_num= prune_param.part_info->no_parts;
+      if (-1 == (res |= find_used_partitions(&prune_param, (*ptree)->keys[0])))
+        goto all_used;
+    }
+  }
+  
+  if (!res)
+    retval= TRUE;
+  goto end;
+
+all_used:
+  mark_all_partitions_as_used(prune_param.part_info);
+end:
+  thd->no_errors=0;
+  thd->mem_root= range_par->old_root;
+  free_root(&alloc,MYF(0));			// Return memory & allocator
+  DBUG_RETURN(retval);
+}
+
+
+/*
+  Store key image to table record
+
+  SYNOPSIS
+    field  Field which key image should be stored.
+    ptr    Field value in key format.
+    len    Length of the value, in bytes.
+*/
+
+static void store_key_image_to_rec(Field *field, char *ptr, uint len)
+{
+  /* Do the same as print_key() does */ 
+  if (field->real_maybe_null())
+  {
+    if (*ptr)
+    {
+      field->set_null();
+      return;
+    }
+    ptr++;
+  }    
+  field->set_key_image(ptr, len); 
+}
+
+
+/*
+  For SEL_ARG* array, store sel_arg->min values into table record buffer
+
+  SYNOPSIS
+    store_selargs_to_rec()
+      ppar   Partition pruning context
+      start  Array SEL_ARG* for which the minimum values should be stored
+      num    Number of elements in the array
+*/
+
+static void store_selargs_to_rec(PART_PRUNE_PARAM *ppar, SEL_ARG **start,
+                                 int num)
+{
+  KEY_PART *parts= ppar->range_param.key_parts;
+  for (SEL_ARG **end= start + num; start != end; start++)
+  {
+    SEL_ARG *sel_arg= (*start);
+    store_key_image_to_rec(sel_arg->field, sel_arg->min_value,
+                           parts[sel_arg->part].length);
+  }
+}
+
+
+/* Mark a partition as used in the case when there are no subpartitions */
+static void mark_full_partition_used_no_parts(partition_info* part_info,
+                                              uint32 part_id)
+{
+  bitmap_set_bit(&part_info->used_partitions, part_id);
+}
+
+
+/* Mark a partition as used in the case when there are subpartitions */
+static void mark_full_partition_used_with_parts(partition_info *part_info,
+                                                uint32 part_id)
+{
+  uint32 start= part_id * part_info->no_subparts;
+  uint32 end=   start + part_info->no_subparts; 
+  for (; start != end; start++)
+    bitmap_set_bit(&part_info->used_partitions, start);
+}
+
+static
+uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par, uint32 num)
+{
+  return num; 
+}
+
+static
+uint32 part_num_to_part_id_list(PART_PRUNE_PARAM* prune_par, uint32 num)
+{
+  return prune_par->part_info->list_array[num].partition_id;
+}
+
+
+/*
+  Recursively walk the SEL_ARG tree, find/mark partitions that need to be used
+
+  SYNOPSIS
+    find_used_partitions()
+      ppar      Partition pruning context.
+      key_tree  Intervals tree to perform pruning for.
+
+  DESCRIPTION
+    This function 
+      * recursively walks the SEL_ARG* tree, collecting partitioning 
+        "intervals";
+      * finds the partitions one needs to use to get rows in these intervals;
+      * marks these partitions as used.
+      
+    WHAT IS CONSIDERED TO BE "INTERVALS"
+    A partition pruning "interval" is equivalent to condition in one of the 
+    forms:
+    
+    "partition_field1=const1 AND ... partition_fieldN=constN"          (1)
+    "subpartition_field1=const1 AND ... subpartition_fieldN=constN"    (2)
+    "(1) AND (2)"                                                      (3)
+    
+    In (1) and (2) all [sub]partitioning fields must be used, and "x=const"
+    includes "x IS NULL". 
+    
+    If partitioning is performed using 
+       
+       PARTITION BY RANGE(unary_monotonic_func(single_partition_field)),
+       
+    then the following is also an interval:
+
+           "   const1 OP1 single_partition_field OR const2"            (4)
+     
+    where OP1 and OP2 are '<' OR '<=', and const_i can be +/- inf.
+    Everything else is not a partition pruning "interval".
+
+  RETURN
+    1   OK, one or more [sub]partitions are marked as used.
+    0   The passed condition doesn't match any partitions
+   -1   Couldn't infer any partition pruning "intervals" from the passed 
+        SEL_ARG* tree. Marking partitions as used is the responsibility of
+        the caller.
+*/
+
+static 
+int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
+{
+  int res, left_res=0, right_res=0;
+  int partno= (int)key_tree->part;
+  bool pushed= FALSE;
+  bool set_full_part_if_bad_ret= FALSE;
+
+  if (key_tree->left != &null_element)
+  {
+    if (-1 == (left_res= find_used_partitions(ppar,key_tree->left)))
+      return -1;
+  }
+
+  if (key_tree->type == SEL_ARG::KEY_RANGE)
+  {
+    if (partno == 0 && (NULL != ppar->get_endpoint))
+    {
+      /* 
+        Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and
+        we got "const1 < fieldX < const2" interval.
+      */
+      DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
+                                                    ppar->range_param.
+                                                    key_parts););
+      /* Find minimum */
+      if (key_tree->min_flag & NO_MIN_RANGE)
+      {
+        ppar->start_part_num= 0;
+      }
+      else
+      {
+        store_key_image_to_rec(key_tree->field, key_tree->min_value, 
+                               ppar->range_param.key_parts[0].length);
+        bool include_endp= ppar->force_include_bounds ||
+                           !test(key_tree->min_flag & NEAR_MIN);
+        ppar->start_part_num= ppar->get_endpoint(ppar->part_info, 1,
+                                                 include_endp);
+        if (ppar->start_part_num == ppar->max_endpoint_val)
+        {
+          res= 0; /* No satisfying partitions */
+          goto pop_and_go_right;
+        }
+      }
+
+      /* Find maximum */
+      if (key_tree->min_flag & NO_MAX_RANGE)
+      {
+        ppar->end_part_num= ppar->max_endpoint_val;
+      }
+      else
+      {
+        store_key_image_to_rec(key_tree->field, key_tree->max_value,
+                               ppar->range_param.key_parts[0].length);
+        bool include_endp= ppar->force_include_bounds ||
+                           !test(key_tree->max_flag & NEAR_MAX);
+        ppar->end_part_num= ppar->get_endpoint(ppar->part_info, 0,
+                                               include_endp);
+        if (ppar->start_part_num == ppar->end_part_num)
+        {
+          res= 0; /* No satisfying partitions */
+          goto pop_and_go_right;
+        }
+      }
+      ppar->part_num_to_part_id= ppar->endpoints_walk_func;
+
+      /* 
+        Save our intent to mark full partition as used if we will not be able 
+        to obtain further limits on subpartitions
+      */
+      set_full_part_if_bad_ret= TRUE;
+      goto process_next_key_part;
+    }
+
+    if (key_tree->is_singlepoint())
+    {
+      pushed= TRUE;
+      ppar->cur_part_fields+=    ppar->is_part_keypart[partno];
+      ppar->cur_subpart_fields+= ppar->is_subpart_keypart[partno];
+      *(ppar->arg_stack_end++) = key_tree;
+
+      if (partno == ppar->last_part_partno &&
+          ppar->cur_part_fields == ppar->part_fields)
+      {
+        /* 
+          Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all partitioning
+          fields. Save all constN constants into table record buffer.
+        */
+        store_selargs_to_rec(ppar, ppar->arg_stack, ppar->part_fields);
+        DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack,
+                                                       ppar->part_fields););
+        uint32 part_id;
+        /* then find in which partition the {const1, ...,constN} tuple goes */
+        if (ppar->get_top_partition_id_func(ppar->part_info, &part_id))
+        {
+          res= 0; /* No satisfying partitions */
+          goto pop_and_go_right;
+        }
+        /* Rembember the limit we got - single partition #part_id */
+        ppar->part_num_to_part_id= part_num_to_part_id_range;
+        ppar->start_part_num= part_id;
+        ppar->end_part_num=   part_id + 1;
+        
+        /*
+          If there are no subpartitions/we fail to get any limit for them, 
+          then we'll mark full partition as used. 
+        */
+        set_full_part_if_bad_ret= TRUE;
+        goto process_next_key_part;
+      }
+
+      if (partno == ppar->last_subpart_partno)
+      {
+        /* 
+          Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all subpartitioning
+          fields. Save all constN constants into table record buffer.
+        */
+        store_selargs_to_rec(ppar, ppar->arg_stack_end - ppar->subpart_fields,
+                             ppar->subpart_fields);
+        DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack_end - 
+                                                       ppar->subpart_fields,
+                                                       ppar->subpart_fields););
+        /* Find the subpartition (it's HASH/KEY so we always have one) */
+        partition_info *part_info= ppar->part_info;
+        uint32 subpart_id= part_info->get_subpartition_id(part_info);
+        
+        /* Mark this partition as used in each subpartition. */
+        for (uint32 num= ppar->start_part_num; num != ppar->end_part_num; 
+             num++)
+        {
+          bitmap_set_bit(&part_info->used_partitions,
+                         ppar->part_num_to_part_id(ppar, num) * 
+                         part_info->no_subparts + subpart_id);
+          ppar->start_part_num++;
+        }
+        res= 1; /* Some partitions were marked as used */
+        goto pop_and_go_right;
+      }
+    }
+    else
+    {
+      /* 
+        Can't handle condition on current key part. If we're that deep that 
+        we're processing subpartititoning's key parts, this means we'll not be
+        able to infer any suitable condition, so bail out.
+      */
+      if (partno >= ppar->last_part_partno)
+        return -1;
+    }
+  }
+
+process_next_key_part:
+  if (key_tree->next_key_part)
+    res= find_used_partitions(ppar, key_tree->next_key_part);
+  else 
+    res= -1;
+  
+  if (res == -1) /* Got "full range" for key_tree->next_key_part call */
+  {
+    if (set_full_part_if_bad_ret)
+    {
+      for (uint32 num= ppar->start_part_num; num != ppar->end_part_num; 
+           num++)
+      {
+        ppar->mark_full_partition_used(ppar->part_info,
+                                       ppar->part_num_to_part_id(ppar, num));
+      }
+      res= 1;
+    }
+    else
+      return -1;
+  }
+
+  if (set_full_part_if_bad_ret)
+  {
+    /* Restore the "used partition iterator" to its default */
+    ppar->part_num_to_part_id= part_num_to_part_id_range;
+    ppar->start_part_num= 0;
+    ppar->end_part_num= ppar->part_info->no_parts;
+  }
+
+  if (pushed)
+  {
+pop_and_go_right:
+    /* Pop this key part info off the "stack" */
+    ppar->arg_stack_end--;
+    ppar->cur_part_fields-=    ppar->is_part_keypart[partno];
+    ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno];
+  }
+  
+  if (key_tree->right != &null_element)
+  {
+    if (-1 == (right_res= find_used_partitions(ppar,key_tree->right)))
+      return -1;
+  }
+  return (left_res || right_res || res);
+}
+ 
+
+static void mark_all_partitions_as_used(partition_info *part_info)
+{
+  bitmap_set_all(&part_info->used_partitions);
+}
+
+
+/*
+  Check if field types allow to construct partitioning index description
+ 
+  SYNOPSIS
+    fields_ok_for_partition_index()
+      pfield  NULL-terminated array of pointers to fields.
+
+  DESCRIPTION
+    For an array of fields, check if we can use all of the fields to create
+    partitioning index description.
+    
+    We can't process GEOMETRY fields - for these fields singlepoint intervals
+    cant be generated, and non-singlepoint are "special" kinds of intervals
+    to which our processing logic can't be applied.
+
+    It is not known if we could process ENUM fields, so they are disabled to be
+    on the safe side.
+
+  RETURN 
+    TRUE   Yes, fields can be used in partitioning index
+    FALSE  Otherwise
+*/
+
+static bool fields_ok_for_partition_index(Field **pfield)
+{
+  if (!pfield)
+    return FALSE;
+  for (; (*pfield); pfield++)
+  {
+    enum_field_types ftype= (*pfield)->real_type();
+    if (ftype == FIELD_TYPE_ENUM || ftype == FIELD_TYPE_GEOMETRY)
+      return FALSE;
+  }
+  return TRUE;
+}
+
+
+/*
+  Create partition index description and fill related info in the context
+  struct
+
+  SYNOPSIS
+    create_partition_index_descrition()
+      prune_par  INOUT Partition pruning context
+
+  DESCRIPTION
+    Create partition index description. Partition index description is:
+
+      part_index(used_fields_list(part_expr), used_fields_list(subpart_expr))
+
+    If partitioning/sub-partitioning uses BLOB or Geometry fields, then
+    corresponding fields_list(...) is not included into index description
+    and we don't perform partition pruning for partitions/subpartitions.
+
+  RETURN
+    TRUE   Out of memory or can't do partition pruning at all
+    FALSE  OK
+*/
+
+static bool create_partition_index_descrition(PART_PRUNE_PARAM *ppar)
+{
+  RANGE_OPT_PARAM *range_par= &(ppar->range_param);
+  partition_info *part_info= ppar->part_info;
+  uint used_part_fields, used_subpart_fields;
+
+  used_part_fields= fields_ok_for_partition_index(part_info->part_field_array) ?
+                      part_info->no_part_fields : 0;
+  used_subpart_fields= 
+    fields_ok_for_partition_index(part_info->subpart_field_array)? 
+      part_info->no_subpart_fields : 0;
+  
+  uint total_parts= used_part_fields + used_subpart_fields;
+
+  ppar->part_fields=      used_part_fields;
+  ppar->last_part_partno= (int)used_part_fields - 1;
+
+  ppar->subpart_fields= used_subpart_fields;
+  ppar->last_subpart_partno= 
+    used_subpart_fields?(int)(used_part_fields + used_subpart_fields - 1): -1;
+
+  if (is_sub_partitioned(part_info))
+  {
+    ppar->mark_full_partition_used=  mark_full_partition_used_with_parts;
+    ppar->get_top_partition_id_func= part_info->get_part_partition_id;
+  }
+  else
+  {
+    ppar->mark_full_partition_used=  mark_full_partition_used_no_parts;
+    ppar->get_top_partition_id_func= part_info->get_partition_id;
+  }
+
+  enum_monotonicity_info minfo;
+  ppar->get_endpoint= NULL;
+  if (part_info->part_expr && 
+      (minfo= part_info->part_expr->get_monotonicity_info()) != NON_MONOTONIC)
+  {
+    /*
+      ppar->force_include_bounds controls how we'll process "field < C" and 
+      "field > C" intervals.
+      If the partitioning function F is strictly increasing, then for any x, y
+      "x < y" => "F(x) < F(y)" (*), i.e. when we get interval "field < C" 
+      we can perform partition pruning on the equivalent "F(field) < F(C)".
+
+      If the partitioning function not strictly increasing (it is simply
+      increasing), then instead of (*) we get "x < y" => "F(x) <= F(y)"
+      i.e. for interval "field < C" we can perform partition pruning for
+      "F(field) <= F(C)".
+    */
+    ppar->force_include_bounds= test(minfo == MONOTONIC_INCREASING);
+    if (part_info->part_type == RANGE_PARTITION)
+    {
+      ppar->get_endpoint=        get_partition_id_range_for_endpoint;
+      ppar->endpoints_walk_func= part_num_to_part_id_range;
+      ppar->max_endpoint_val=    part_info->no_parts;
+    }
+    else if (part_info->part_type == LIST_PARTITION)
+    {
+      ppar->get_endpoint=        get_list_array_idx_for_endpoint;
+      ppar->endpoints_walk_func= part_num_to_part_id_list;
+      ppar->max_endpoint_val=    part_info->no_list_values;
+    }
+  }
+
+  KEY_PART *key_part;
+  MEM_ROOT *alloc= range_par->mem_root;
+  if (!total_parts || 
+      !(key_part= (KEY_PART*)alloc_root(alloc, sizeof(KEY_PART)*
+                                               total_parts)) ||
+      !(ppar->arg_stack= (SEL_ARG**)alloc_root(alloc, sizeof(SEL_ARG*)* 
+                                                      total_parts)) ||
+      !(ppar->is_part_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
+                                                           total_parts)) ||
+      !(ppar->is_subpart_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
+                                                           total_parts)))
+    return TRUE;
+
+  range_par->key_parts= key_part;
+  Field **field= (ppar->part_fields)? part_info->part_field_array :
+                                           part_info->subpart_field_array;
+  bool subpart_fields= FALSE;
+  for (uint part= 0; part < total_parts; part++, key_part++)
+  {
+    key_part->key=          0;
+    key_part->part=	    part;
+    key_part->length=       (*field)->pack_length_in_rec();
+    /* 
+      psergey-todo: check yet again if this is correct for tricky field types,
+        e.g. see "Fix a fatal error in decimal key handling" in 
+        open_binary_frm().
+    */
+    key_part->store_length= (*field)->pack_length();
+    if ((*field)->real_maybe_null())
+      key_part->store_length+= HA_KEY_NULL_LENGTH;
+    if ((*field)->type() == FIELD_TYPE_BLOB || 
+        (*field)->real_type() == MYSQL_TYPE_VARCHAR)
+      key_part->store_length+= HA_KEY_BLOB_LENGTH;
+
+    key_part->field=        (*field);
+    key_part->image_type =  Field::itRAW;
+    /* We don't set key_parts->null_bit as it will not be used */
+
+    ppar->is_part_keypart[part]= !subpart_fields;
+    ppar->is_subpart_keypart[part]= subpart_fields;
+ 
+    if (!*(++field))
+    {
+      field= part_info->subpart_field_array;
+      subpart_fields= TRUE;
+    }
+  }
+  range_par->key_parts_end= key_part;
+
+  DBUG_EXECUTE("info", print_partitioning_index(range_par->key_parts,
+                                                range_par->key_parts_end););
+  return FALSE;
+}
+
+
+#ifndef DBUG_OFF
+
+static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end)
+{
+  DBUG_ENTER("print_partitioning_index");
+  DBUG_LOCK_FILE;
+  fprintf(DBUG_FILE, "partitioning INDEX(");
+  for (KEY_PART *p=parts; p != parts_end; p++)
+  {
+    fprintf(DBUG_FILE, "%s%s", p==parts?"":" ,", p->field->field_name);
+  }
+  fprintf(DBUG_FILE, ");\n");
+  DBUG_UNLOCK_FILE;
+  DBUG_VOID_RETURN;
+}
+
+/* Print field value into debug trace, in NULL-aware way. */
+static void dbug_print_field(Field *field)
+{
+  if (field->is_real_null())
+    fprintf(DBUG_FILE, "NULL");
+  else
+  {
+    String str;
+    String *pstr;
+    pstr= field->val_str(&str);
+    fprintf(DBUG_FILE, "'%s'", pstr->c_ptr_safe());
+  }
+}
+
+
+/* Print a "c1 < keypartX < c2" - type interval into debug trace. */
+static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part)
+{
+  DBUG_ENTER("dbug_print_segment_range");
+  DBUG_LOCK_FILE;
+  if (!(arg->min_flag & NO_MIN_RANGE))
+  {
+    arg->field->set_key_image((char*)(arg->min_value), part->length); 
+    dbug_print_field(part->field);
+    if (arg->min_flag & NEAR_MIN)
+      fputs(" < ", DBUG_FILE);
+    else
+      fputs(" <= ", DBUG_FILE);
+  }
+
+  fprintf(DBUG_FILE, "%s", part->field->field_name);
+
+  if (!(arg->max_flag & NO_MAX_RANGE))
+  {
+    if (arg->max_flag & NEAR_MAX)
+      fputs(" < ", DBUG_FILE);
+    else
+      fputs(" <= ", DBUG_FILE);
+    arg->field->set_key_image((char*)(arg->max_value), part->length); 
+    dbug_print_field(part->field);
+  }
+  DBUG_UNLOCK_FILE;
+  DBUG_VOID_RETURN;
+}
+
+
+/*
+  Print a singlepoint multi-keypart range interval to debug trace
+ 
+  SYNOPSIS
+    dbug_print_onepoint_range()
+      start  Array of SEL_ARG* ptrs representing conditions on key parts
+      num    Number of elements in the array.
+
+  DESCRIPTION
+    This function prints a "keypartN=constN AND ... AND keypartK=constK"-type 
+    interval to debug trace.
+*/
+
+static void dbug_print_onepoint_range(SEL_ARG **start, uint num)
+{
+  DBUG_ENTER("dbug_print_onepoint_range");
+  DBUG_LOCK_FILE;
+  SEL_ARG **end= start + num;
+  for (SEL_ARG **arg= start; arg != end; arg++)
+  {
+    Field *field= (*arg)->field;
+    fprintf(DBUG_FILE, "%s%s=", (arg==start)?"":", ", field->field_name);
+    dbug_print_field(field);
+  }
+  DBUG_UNLOCK_FILE;
+  DBUG_VOID_RETURN;
+}
+#endif
+
+/****************************************************************************
+ * Partition pruning code ends
+ ****************************************************************************/
+#endif
+
 
 /*
   Get cost of 'sweep' full records retrieval.
@@ -3424,7 +4311,7 @@
     0  on error
 */
 
-static SEL_TREE *get_ne_mm_tree(PARAM *param, Item_func *cond_func, 
+static SEL_TREE *get_ne_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func, 
                                 Field *field,
                                 Item *lt_value, Item *gt_value,
                                 Item_result cmp_type)
@@ -3459,7 +4346,7 @@
     Pointer to the tree built tree
 */
 
-static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func, 
+static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func, 
                                   Field *field, Item *value,
                                   Item_result cmp_type, bool inv)
 {
@@ -3552,7 +4439,7 @@
 
 	/* make a select tree of all keys in condition */
 
-static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
+static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond)
 {
   SEL_TREE *tree=0;
   SEL_TREE *ftree= 0;
@@ -3725,7 +4612,7 @@
 
 
 static SEL_TREE *
-get_mm_parts(PARAM *param, COND *cond_func, Field *field,
+get_mm_parts(RANGE_OPT_PARAM *param, COND *cond_func, Field *field,
 	     Item_func::Functype type,
 	     Item *value, Item_result cmp_type)
 {
@@ -3775,7 +4662,7 @@
 
 
 static SEL_ARG *
-get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
+get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
 	    Item_func::Functype type,Item *value)
 {
   uint maybe_null=(uint) field->real_maybe_null();
@@ -3834,8 +4721,11 @@
       !(conf_func->compare_collation()->state & MY_CS_BINSORT))
     goto end;
 
-  optimize_range= field->optimize_range(param->real_keynr[key_part->key],
-                                        key_part->part);
+  if (param->using_real_indexes)
+    optimize_range= field->optimize_range(param->real_keynr[key_part->key],
+                                          key_part->part);
+  else
+    optimize_range= TRUE;
 
   if (type == Item_func::LIKE_FUNC)
   {
@@ -4102,7 +4992,7 @@
 
 
 static SEL_TREE *
-tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
+tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
 {
   DBUG_ENTER("tree_and");
   if (!tree1)
@@ -4172,7 +5062,7 @@
   using index_merge.
 */
 
-bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param)
+bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, RANGE_OPT_PARAM* param)
 {
   key_map common_keys= tree1->keys_map;
   DBUG_ENTER("sel_trees_can_be_ored");
@@ -4199,7 +5089,7 @@
 }
 
 static SEL_TREE *
-tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
+tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
 {
   DBUG_ENTER("tree_or");
   if (!tree1 || !tree2)

--- 1.57/sql/opt_range.h	2005-10-19 00:52:02 +03:00
+++ 1.58/sql/opt_range.h	2005-12-22 11:28:50 +02:00
@@ -249,6 +249,7 @@
 
 
 struct st_qsel_param;
+class PARAM;
 class SEL_ARG;
 
 /*
@@ -283,12 +284,12 @@
   QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
                                                struct st_table_ref *ref,
                                                ha_rows records);
-  friend bool get_quick_keys(struct st_qsel_param *param,
+  friend bool get_quick_keys(PARAM *param,
                              QUICK_RANGE_SELECT *quick,KEY_PART *key,
                              SEL_ARG *key_tree,
                              char *min_key, uint min_key_flag,
                              char *max_key, uint max_key_flag);
-  friend QUICK_RANGE_SELECT *get_quick_select(struct st_qsel_param*,uint idx,
+  friend QUICK_RANGE_SELECT *get_quick_select(PARAM*,uint idx,
                                               SEL_ARG *key_tree,
                                               MEM_ROOT *alloc);
   friend class QUICK_SELECT_DESC;
@@ -717,5 +718,9 @@
                                              struct st_table_ref *ref,
                                              ha_rows records);
 uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit);
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
+#endif
 
 #endif

--- 1.226/sql/sql_class.cc	2005-12-09 13:39:39 +02:00
+++ 1.227/sql/sql_class.cc	2005-12-22 11:28:50 +02:00
@@ -743,6 +743,13 @@
   field_list.push_back(new Item_empty_string("select_type", 19, cs));
   field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs));
   item->maybe_null= 1;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+  if (lex->describe & DESCRIBE_PARTITIONS)
+  {
+    field_list.push_back(item= new Item_empty_string("partitions", 10, cs));
+    item->maybe_null= 1;
+  }
+#endif
   field_list.push_back(item= new Item_empty_string("type", 10, cs));
   item->maybe_null= 1;
   field_list.push_back(item=new Item_empty_string("possible_keys",

--- 1.209/sql/sql_lex.h	2005-12-07 08:28:12 +02:00
+++ 1.210/sql/sql_lex.h	2005-12-22 11:28:50 +02:00
@@ -102,6 +102,11 @@
 // describe/explain types
 #define DESCRIBE_NORMAL		1
 #define DESCRIBE_EXTENDED	2
+/*
+  This is not #ifdef'ed because we want "EXPLAIN PARTITIONS ..." to produce
+  additional "partitions" column even if partitioning is not compiled in.
+*/
+#define DESCRIBE_PARTITIONS	4
 
 enum enum_sp_suid_behaviour
 {

--- 1.374/sql/sql_select.cc	2005-12-12 12:29:42 +02:00
+++ 1.375/sql/sql_select.cc	2005-12-22 11:28:50 +02:00
@@ -621,6 +621,21 @@
     DBUG_RETURN(0);
   }
 
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+  {
+    TABLE_LIST *tbl;
+    for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
+    {
+      if (!tbl->embedding)
+      {
+        Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
+        tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
+	                                                 prune_cond);
+      }
+    }
+  }
+#endif
+
   /* Optimize count(*), min() and max() */
   if (tables_list && tmp_table_param.sum_func_count && ! group_list)
   {
@@ -2006,7 +2021,11 @@
     if (*s->on_expr_ref)
     {
       /* s is the only inner table of an outer join */
-      if (!table->file->records && !embedding)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+      if ((!table->file->records || table->no_partitions_used) && !embedding)
+#else
+      if (!table->file->records || && !embedding)
+#endif
       {						// Empty table
         s->dependent= 0;                        // Ignore LEFT JOIN depend.
 	set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2033,8 +2052,14 @@
       while (embedding);
       continue;
     }
-
-    if ((table->s->system || table->file->records <= 1) && ! s->dependent &&
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+    bool no_partitions_used= table->no_partitions_used;
+#else
+    const bool no_partitions_used= FALSE;
+#endif
+    if ((table->s->system || table->file->records <= 1 ||
+         no_partitions_used) &&
+	!s->dependent &&
 	!(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
         !table->fulltext_searched)
     {
@@ -13753,6 +13778,9 @@
 					strlen(join->select_lex->type), cs));
     for (uint i=0 ; i < 7; i++)
       item_list.push_back(item_null);
+    if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+      item_list.push_back(item_null);
+  
     item_list.push_back(new Item_string(message,strlen(message),cs));
     if (result->send_data(item_list))
       join->error= 1;
@@ -13873,7 +13901,28 @@
 	item_list.push_back(new Item_string(table->alias,
 					    strlen(table->alias),
 					    cs));
-      /* type */
+      /* "partitions" column */
+      if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+      {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+        partition_info *part_info;
+        if (!table->derived_select_number && 
+            (part_info= table->part_info))
+        {          
+          char parts_buff[128]; 
+          String parts_str(parts_buff,sizeof(parts_buff),cs);
+          make_used_partitions_str(part_info, &parts_str);
+          item_list.push_back(new Item_string(parts_str.ptr(),
+                                              parts_str.length(), cs));
+        }
+        else
+          item_list.push_back(item_null);
+#else
+        /* just produce empty column if partitioning is not compiled in */
+        item_list.push_back(item_null); 
+#endif
+      }
+      /* "type" column */
       item_list.push_back(new Item_string(join_type_str[tab->type],
 					  strlen(join_type_str[tab->type]),
 					  cs));

--- 1.429/sql/sql_yacc.yy	2005-12-09 13:39:41 +02:00
+++ 1.430/sql/sql_yacc.yy	2005-12-22 11:28:50 +02:00
@@ -7380,7 +7380,9 @@
 opt_extended_describe:
 	/* empty */ {}
 	| EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+	| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
 	;
+
 
 opt_describe_column:
 	/* empty */	{}

--- 1.220/sql/ha_ndbcluster.cc	2005-11-28 21:14:03 +02:00
+++ 1.221/sql/ha_ndbcluster.cc	2005-12-22 11:28:50 +02:00
@@ -3105,7 +3105,6 @@
   DBUG_VOID_RETURN;
 }
 
-
 int ha_ndbcluster::extra(enum ha_extra_function operation)
 {
   DBUG_ENTER("extra");
@@ -3114,6 +3113,8 @@
     DBUG_PRINT("info", ("HA_EXTRA_RESET"));
     DBUG_PRINT("info", ("Clearing condition stack"));
     cond_clear();
+    if (m_part_info)
+      bitmap_clear_all(&m_part_info->used_partitions);
     break;
   case HA_EXTRA_IGNORE_DUP_KEY:       /* Dup keys don't rollback everything*/
     DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));

--- 1.12/sql/ha_partition.cc	2005-11-26 06:17:12 +02:00
+++ 1.13/sql/ha_partition.cc	2005-12-22 11:28:50 +02:00
@@ -2795,6 +2795,8 @@
   handler **file;
   DBUG_ENTER("ha_partition::reset");
   file= m_file;
+  if (m_part_info)
+    bitmap_clear_all(&m_part_info->used_partitions);
   do
   {
     if ((tmp= (*file)->reset()))

--- 1.15/sql/sql_partition.cc	2005-11-26 06:16:18 +02:00
+++ 1.16/sql/sql_partition.cc	2005-12-22 11:28:50 +02:00
@@ -2422,16 +2422,94 @@
     if (list_value < part_func_value)
       min_list_index= list_index + 1;
     else if (list_value > part_func_value)
+    {
+      if (!list_index)
+        goto notfound;
       max_list_index= list_index - 1;
-    else {
+    }
+    else
+    {
       *part_id= (uint32)list_array[list_index].partition_id;
       DBUG_RETURN(FALSE);
     }
   }
+notfound:
   *part_id= 0;
   DBUG_RETURN(TRUE);
 }
 
+/*
+  Find the part of part_info->list_array that corresponds to given interval
+  
+  SYNOPSIS 
+    get_list_array_idx_for_endpoint()
+      part_info         Partitioning info (partitioning type must be LIST)
+      left_endpoint     TRUE  - the interval is [a; +inf) or (a; +inf)
+                        FALSE - the interval is (-inf; a] or (-inf; a)
+      include_endpoint  TRUE iff the interval includes the endpoint
+
+  DESCRIPTION
+    This function finds the part of part_info->list_array where values of
+    list_array[idx].list_value are contained within the specifed interval.
+    list_array is ordered by list_value, so
+    1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the 
+       sought array part starts at some index idx and continues till array 
+       end.
+       The function returns first number idx, such that 
+       list_array[idx].list_value is contained within the passed interval.
+       
+    2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
+       sought array part starts at array start and continues till some last 
+       index idx.
+       The function returns first number idx, such that 
+       list_array[idx].list_value is NOT contained within the passed interval.
+       If all array elements are contained, part_info->no_list_values is
+       returned.
+
+  NOTE
+    The caller will call this function and then will run along the part of 
+    list_array to collect partition ids. If the number of list values is 
+    significantly higher then number of partitions, this could be slow and
+    we could invent some other approach. The "run over list array" part is
+    already wrapped in a get_next()-like function.
+
+  RETURN
+    The edge of corresponding part_info->list_array part.
+*/
+
+uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
+                                       bool left_endpoint,
+                                       bool include_endpoint)
+{
+  DBUG_ENTER("get_list_array_idx_for_endpoint");
+  LIST_PART_ENTRY *list_array= part_info->list_array;
+  uint list_index;
+  longlong list_value;
+  uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+  longlong part_func_value= part_info->part_expr->val_int();
+  while (max_list_index >= min_list_index)
+  {
+    list_index= (max_list_index + min_list_index) >> 1;
+    list_value= list_array[list_index].list_value;
+    if (list_value < part_func_value)
+      min_list_index= list_index + 1;
+    else if (list_value > part_func_value)
+    {
+      if (!list_index)
+        goto notfound;
+      max_list_index= list_index - 1;
+    }
+    else 
+    {
+      DBUG_RETURN(list_index + test(left_endpoint ^ include_endpoint));
+    }
+  }
+notfound:
+  if (list_value < part_func_value)
+    list_index++;
+  DBUG_RETURN(list_index);
+}
+
 
 bool get_partition_id_range(partition_info *part_info,
                             uint32 *part_id)
@@ -2461,6 +2539,89 @@
   DBUG_RETURN(FALSE);
 }
 
+
+/*
+  Find the part of part_info->range_int_array that covers the given interval
+  
+  SYNOPSIS 
+    get_partition_id_range_for_endpoint()
+      part_info         Partitioning info (partitioning type must be RANGE)
+      left_endpoint     TRUE  - the interval is [a; +inf) or (a; +inf)
+                        FALSE - the interval is (-inf; a] or (-inf; a).
+      include_endpoint  TRUE <=> the endpoint itself is included in the
+                        interval
+
+  DESCRIPTION
+    This function finds the part of part_info->range_int_array where the
+    elements have non-empty intersections with the given interval.
+    
+    A range_int_array element at index idx represents the interval
+      
+      [range_int_array[idx-1], range_int_array[idx]),
+
+    intervals are disjoint and ordered by their right bound, so
+    
+    1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the
+       sought array part starts at some index idx and continues till array 
+       end.
+       The function returns first number idx, such that the interval
+       represented by range_int_array[idx] has non empty intersection with 
+       the passed interval.
+       
+    2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
+       sought array part starts at array start and continues till some last 
+       index idx.
+       The function returns first number idx, such that the interval
+       represented by range_int_array[idx] has EMPTY intersection with the
+       passed interval.
+       If the interval represented by the last array element has non-empty 
+       intersection with the passed interval, part_info->no_parts is
+       returned.
+       
+  RETURN
+    The edge of corresponding part_info->range_int_array part.
+*/
+
+uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
+                                           bool left_endpoint,
+                                           bool include_endpoint)
+{
+  DBUG_ENTER("get_partition_id_range_for_endpoint");
+  longlong *range_array= part_info->range_int_array;
+  uint max_partition= part_info->no_parts - 1;
+  uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+  longlong part_func_value= part_info->part_expr->val_int();
+  while (max_part_id > min_part_id)
+  {
+    loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+    if (range_array[loc_part_id] < part_func_value)
+      min_part_id= loc_part_id + 1;
+    else
+      max_part_id= loc_part_id - 1;
+  }
+  loc_part_id= max_part_id;
+  if (loc_part_id < max_partition && 
+      part_func_value >= range_array[loc_part_id+1])
+  {
+     loc_part_id++;
+  }
+  if (left_endpoint)
+  {
+    if (part_func_value >= range_array[loc_part_id])
+      loc_part_id++;
+  }
+  else 
+  {
+    if (part_func_value == range_array[loc_part_id])
+      loc_part_id += test(include_endpoint);
+    else if (part_func_value > range_array[loc_part_id])
+      loc_part_id++;
+    loc_part_id++;
+  }
+  DBUG_RETURN(loc_part_id);
+}
+
+
 bool get_partition_id_hash_nosub(partition_info *part_info,
                                  uint32 *part_id)
 {
@@ -3149,10 +3310,16 @@
   */
     uint part_func_len= part_info->part_func_len;
     uint subpart_func_len= part_info->subpart_func_len; 
+    uint bitmap_bits= part_info->no_subparts? 
+                       (part_info->no_subparts* part_info->no_parts):
+                        part_info->no_parts;
+    uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
+    uint32 *bitmap_buf;
     char *part_func_string, *subpart_func_string= NULL;
     if (!((part_func_string= thd->alloc(part_func_len))) ||
         (subpart_func_len &&
-        !((subpart_func_string= thd->alloc(subpart_func_len)))))
+        !((subpart_func_string= thd->alloc(subpart_func_len)))) ||
+        !((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))))
     {
       my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
       free_items(thd->free_list);
@@ -3165,6 +3332,8 @@
              subpart_func_len);
     part_info->part_func_string= part_func_string;
     part_info->subpart_func_string= subpart_func_string;
+
+    bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
   }
 
   result= FALSE;
@@ -3238,3 +3407,60 @@
   } while (++i < key_parts);
   DBUG_VOID_RETURN;
 }
+
+
+/*
+  Fill the string comma-separated line of used partitions names
+  SYNOPSIS
+    make_used_partitions_str()
+      part_info  IN  Partitioning info
+      parts_str  OUT The string to fill
+*/
+
+void make_used_partitions_str(partition_info *part_info, String *parts_str)
+{
+  parts_str->length(0);
+  partition_element *pe;
+  uint partition_id= 0;
+  List_iterator<partition_element> it(part_info->partitions);
+  
+  if (part_info->subpart_type != NOT_A_PARTITION)
+  {
+    partition_element *head_pe;
+    while ((head_pe= it++))
+    {
+      List_iterator<partition_element> it2(head_pe->subpartitions);
+      while ((pe= it2++))
+      {
+        if (bitmap_is_set(&part_info->used_partitions, partition_id))
+        {
+          if (parts_str->length())
+            parts_str->append(',');
+          parts_str->append(head_pe->partition_name,
+                           strlen(head_pe->partition_name),
+                           system_charset_info);
+          parts_str->append('_');
+          parts_str->append(pe->partition_name,
+                           strlen(pe->partition_name),
+                           system_charset_info);
+        }
+        partition_id++;
+      }
+    }
+  }
+  else
+  {
+    while ((pe= it++))
+    {
+      if (bitmap_is_set(&part_info->used_partitions, partition_id))
+      {
+        if (parts_str->length())
+          parts_str->append(',');
+        parts_str->append(pe->partition_name, strlen(pe->partition_name),
+                         system_charset_info);
+      }
+      partition_id++;
+    }
+  }
+}
+
Thread
bk commit into 5.1 tree (timour:1.1956)timour22 Dec