List:Commits« Previous MessageNext Message »
From:Guilhem Bichot Date:November 10 2010 10:05pm
Subject:Re: bzr commit into mysql-next-mr-bugfixing branch (jorgen.loland:3229)
WL#5594
View as plain text  
Hello Jorgen,

Jorgen Loland a écrit, Le 22.10.2010 10:31:
 > #At
file:///export/home/jl208045/mysql/wl4800/mysql-next-mr-opt-backporting-wl4800-patchcleanup/
based on revid:guilhem@stripped
 >
 >  3229 Jorgen Loland	2010-10-22
 >       WL#5594 - Add optimizer traces to the range optimizer

 > --- a/mysql-test/r/optimizer_trace_charset.result	2010-10-09 15:04:30 +0000
 > +++ b/mysql-test/r/optimizer_trace_charset.result	2010-10-22 08:31:43 +0000

 > @@ -181,4 +209,135 @@ select * from v1 where v1.col = '���	
 >  }
 >  drop table t1;
 >  drop view v1;
 > +create table t1(c char(4) primary key);
 > +insert into t1 values ('aaa'), ('��');
 > +select * from t1 where c < '���;
 > +c
 > +aaa
 > +��
 > +select query, trace from information_schema.OPTIMIZER_TRACE;
 > +query	trace
 > +select * from t1 where c < '���	{
 > +  "steps": [
 > +    {
 > +      "expanded_query": "/* select#1 */ select `*` AS `*` from `test`.`t1` where
(`c` < '?????')"
 > +    },
...
 > +                  "analyzing_range_alternatives": {
 > +                    "range_scan_alternatives": [
 > +                      {
 > +                        "index": "PRIMARY",
 > +                        "ranges": [
 > +                          "c <= ��"

GB the condition is printed as
"c <= ��"
in "ranges", but as
"(`test`.`t1`.`c` < '?????')"
in "condition_processing"... what is correct and what is wrong...
The "ranges" one shows 2 symbols, though in the condition
there were 3 symbols...
I propose that I address this myself later (see further below).

 > === modified file 'mysql-test/r/optimizer_trace_no_prot.result'

GB optimizer_trace_ps_prot.result also needs an update.

 > --- a/mysql-test/r/optimizer_trace_no_prot.result	2010-10-21 15:22:32 +0000
 > +++ b/mysql-test/r/optimizer_trace_no_prot.result	2010-10-22 08:31:43 +0000
 > @@ -1,5 +1,6 @@
 >  select * from information_schema.OPTIMIZER_TRACE;
 >  QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
 > +set optimizer_trace_max_mem_size=1048576;
 >  set @@session.optimizer_trace="enabled=on";
 >  select * from information_schema.OPTIMIZER_TRACE;
 >  QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
 > @@ -40,6 +41,13 @@ SELECT (SELECT 1 FROM D WHERE d = c) AS
 >                "C"
 >              ],
 >              "records_estimation": [
 > +              {
 > +                "database": "test",
 > +                "table": "C",
 > +                "records": 1,
 > +                "cost": 1,
 > +                "cause": "system_table"

GB this is the "cause"... of what?

 > @@ -836,21 +1059,23 @@ explain SELECT c FROM C where c+1 in (se
 >                  {
 >                    "considered_execution_plans": [
 >                      {
 > +                      "database": "test",
 >                        "table": "D",
 >                        "best_access_path": {
 >                          "considered_access_paths": [
 >                            {
 >                              "access_type": "index",
 >                              "index": "d",
 > -                            "cost": 1,
 >                              "records": 1,
 > +                            "cost": 1,
 >                              "chosen": true
 >                            },
 >                            {
 > -                            "access_type": "table scan",
 > +                            "access_type": "scan",
 >                              "cost": 2,
 >                              "records": 2,
 > -                            "chosen": false
 > +                            "chosen": false,
 > +                            "cause": "index_cheaper"

GB or should we standardize on
"cause": "cost"
to mean: "this has a cost higher than the previously examined methods"?

 > @@ -3720,6 +4185,59 @@ update D set d=5 where D is NULL	{
 >    "steps": [
 >      {
 >        "expanded_query": "/* select#1 */ select `d` AS `d` from `test`.`D` where
isnull(`D`)"
 > +    },
 > +    {
 > +      "range_analysis": {
 > +        "table_scan": {
 > +          "records": 2,
 > +          "cost": 4.5034
 > +        } /* table_scan */,
 > +        "potential_range_indices": [
 > +          {
 > +            "index": "d",
 > +            "usable": true,
 > +            "key_parts": [
 > +              "d"
 > +            ] /* key_parts */
 > +          }
 > +        ] /* potential_range_indices */,
 > +        "group_index_range": {
 > +          "chosen": false,
 > +          "cause": "no_join"
 > +        } /* group_index_range */,
 > +        "analyzing_range_alternatives": {
 > +          "range_scan_alternatives": [
 > +            {
 > +              "index": "d",
 > +              "ranges": [
 > +                "NULL <= d <= NULL"
 > +              ] /* ranges */,
 > +              "index_only": false,
 > +              "records": 1,
 > +              "cost": 2.21,
 > +              "rowid_ordered": true,
 > +              "chosen": true
 > +            }
 > +          ] /* range_scan_alternatives */,
 > +          "analyzing_roworder_intersect": {
 > +            "usable": false,
 > +            "cause": "too_few_roworder_scans"
 > +          } /* analyzing_roworder_intersect */
 > +        } /* analyzing_range_alternatives */,
 > +        "chosen_range_access_summary": {
 > +          "range_access_plan": {
 > +            "type": "range_scan",
 > +            "index": "d",
 > +            "records": 1,
 > +            "ranges": [
 > +              "NULL <= d <= NULL"
 > +            ] /* ranges */
 > +          } /* range_access_plan */,
 > +          "records_in_plan": 1,
 > +          "cost_of_plan": 2.21,

GB best_extension_by_limited_search() uses "cost_for_plan" and
"records_for_plan". There is also "cumulated_cost/records"
elsewhere. We should standardize on one pair of words to represent
this concept of "cost after including previous costs".

 > +          "chosen": true
 > +        } /* chosen_range_access_summary */
 > +      } /* range_analysis */
 >      }
 >    ] /* steps */
 >  }	0	0
 > @@ -3730,6 +4248,55 @@ delete from D where d=5	{
 >    "steps": [
 >      {
 >        "expanded_query": "/* select#1 */ select  from dual where (`d` = 5)"
 > +    },
 > +    {
 > +      "range_analysis": {
 > +        "table_scan": {
 > +          "records": 2,

GB there is no indication of the table, but as this is a single-table
DELETE anyway, it's ok. Or should we give it a longer introduction,
like in SELECT, which has:
           {
             "records_estimation": [
               {
                 "database": "test",
                 "table": "D",
                 "range_analysis": {
?

 > @@ -5477,6 +6425,7 @@ explain select * from v1 where id="b"	{
 >              ] /* constant_tables */,
 >              "records_estimation": [
 >                {
 > +                "database": "test",

GB Question: should we omit printing the database if it's equal to
the default database (THD::db)?
Pros: in 99% of our debugging cases, there is a single database, so we
wouldn't see "database" in the traces at all
Cons: less uniform.

 >                  "table": "t1",
 >                  "table_scan": {
 >                    "records": 3,

 > @@ -5548,6 +6500,7 @@ explain select * from v1 where id="b"	{
 >              ] /* constant_tables */,
 >              "records_estimation": [
 >                {
 > +                "database": "",
 >                  "table": "v1",

GB The view "v1" is said to have database "". It's not very intuitive;
I assume it's because the optimizer is rather using a temp table
materializing the view, and such internal temp tables don't have a
database.
So, probably nothing to worry about.

 > === added file 'mysql-test/r/optimizer_trace_range.result'

 > +                    },
 > +                    {
 > +                      "index": "i8",
 > +                      "usable": false,
 > +                      "cause": "not_usable_by_conditions"
 > +                    }
 > +                  ] /* potential_range_indices */,
 > +                  "group_index_range": {
 > +                    "chosen": false,
 > +                    "cause": "not_group_by_or_distinct"
 > +                  } /* group_index_range */,
 > +                  "analyzing_range_alternatives": {
 > +                    "range_scan_alternatives": [
 > +                      {
 > +                        "index": "i2",
 > +                        "ranges": [
 > +                          "key2 < 5",
 > +                          "1020 < key2"

GB here we have a nice comma-delimited list of ranges...

 > +EXPLAIN SELECT * FROM t2 WHERE key1a = 5 and key1b < 10	{
 > +  "steps": [
 > +    {
 > +      "expanded_query": "/* select#1 */ select `*` AS `*` from `test`.`t2` where
((`key1a` = 5) and (`key1b` < 10))"
 > +    },
...
 > +                  "group_index_range": {
 > +                    "chosen": false,
 > +                    "cause": "not_group_by_or_distinct"
 > +                  } /* group_index_range */,
 > +                  "analyzing_range_alternatives": {
 > +                    "range_scan_alternatives": [
 > +                      {
 > +                        "index": "PRIMARY",
 > +                        "ranges": [
 > +                          "5 <= key1a <= 5 : key1b < 10"

GB ... but here we have it delimited with ":" ?

 > +                        ] /* ranges */,
 > +                        "index_only": false,
 > +                        "records": 1,
 > +                        "cost": 2.21,
 > +                        "rowid_ordered": false,
 > +                        "chosen": true

 > +static void append_range(String *out,
 > +                         const KEY_PART_INFO *key_parts,
 > +                         const uchar *min_key, const uint16 min_length,
 > +                         const uchar *max_key, const uint16 max_length,
 > +                         const uint flag);

GB no problem with the new "const" integers, though I was more
insisting on the pointers in particular.
So you can keep the const int-s min/max_length, or go back to
non-const int-s, what you prefer.

 >  #endif
 >
 >  static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
 > @@ -1956,12 +1956,68 @@ public:
 >    static void operator delete(void *ptr, MEM_ROOT *mem_root) { /* Never called */ }
 >    virtual ~TABLE_READ_PLAN() {}               /* Remove gcc warning */
 >
 > +#ifdef OPTIMIZER_TRACE
 > +  /**
 > +     Add basic info for this TABLE_READ_PLAN to the optimizer trace.
 > +
 > +     @param param        Parameters for range analysis of this table
 > +     @param trace_object The optimizer trace object the info is appended to
 > +   */
 > +  virtual void trace_basic_info(const PARAM *param,
 > +                                Opt_trace_object *trace_object) const =0;

GB space after equal

 > +#endif
 >  };
 >
 > -class TRP_ROR_INTERSECT;
 > -class TRP_ROR_UNION;
 > -class TRP_INDEX_MERGE;
 > +/**
 > +  Print a key into a stream
 >
 > +  @param[out] out          String the key is appended to
 > +  @param[in]  key_part     Index components description
 > +  @param[in]  key          Key tuple
 > +  @param[in]  used_length  Key tuple length
 > +*/
 > +static void
 > +print_key2(String *out, const KEY_PART_INFO *key_part, const uchar *key,
 > +           uint used_length)
 > +{
 > +  const uchar *key_end= key + used_length;
 > +  String tmp;
 > +  uint store_length;
 > +  TABLE *table= key_part->field->table;
 > +  my_bitmap_map *old_sets[2];
 > +
 > +  dbug_tmp_use_all_columns(table, old_sets, table->read_set,
table->write_set);
 > +
 > +  for (; key < key_end; key+= store_length, key_part++)
 > +  {
 > +    Field *field= key_part->field;
 > +    store_length= key_part->store_length;
 > +
 > +    if (field->real_maybe_null())
 > +    {
 > +      /* Byte 0 of key is the null-byte. If set, key is NULL.
 > +         Otherwise, print the key value starting immediately after the
 > +         null-byte
 > +       */
 > +      if (*key)
 > +      {
 > +        out->append(STRING_WITH_LEN("NULL"));
 > +        continue;
 > +      }
 > +      key++;                                    // Skip null byte
 > +      store_length--;
 > +    }
 > +    field->set_key_image(key, key_part->length);
 > +    if (field->type() == MYSQL_TYPE_BIT)
 > +      (void) field->val_int_as_str(&tmp, 1);
 > +    else
 > +      field->val_str(&tmp);

GB I would put a comment saying that the val_* above have changed
tmp's charset.

 > +    out->append(tmp.ptr(), tmp.length(), tmp.charset());
 > +    if (key + store_length < key_end)
 > +      out->append('\'');
 > +  }
 > +  dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
 > +}
 >
 >  /*
 >    Plan for a QUICK_RANGE_SELECT scan.
 > @@ -1996,8 +2052,65 @@ public:
 >      }
 >      DBUG_RETURN(quick);
 >    }
 > +
 > +#ifdef OPTIMIZER_TRACE
 > +  void trace_basic_info(const PARAM *param, Opt_trace_object *trace) const
 > +  {
 > +    DBUG_ASSERT(param->using_real_indexes);
 > +    uint keynr_in_table= param->real_keynr[key_idx];

GB keynr_in_table could be const. I realize that it's inconsistent
from me to:
- not insist on integer function arguments being const (above in the
review)
- and at the same time suggest a const for this integer local
variable.
They are all local variables after all...
I think I follow the idea that it is more common to change an integer
local variable than an integer function argument, so "const" on
the first conveys more information for the reader than "const" on the
second.
But as I said earlier, no problem with having const integer
arguments where you like.

 > +
 > +    const KEY cur_key= param->table->key_info[keynr_in_table];
 > +    const KEY_PART_INFO *key_part= cur_key.key_part;
 > +
 > +    trace->add_str("type", "range_scan").
 > +      add_str("index", cur_key.name, strlen(cur_key.name)).
 > +      add("records", records);
 > +
 > +    Opt_trace_array trace_range(param->thd->opt_trace,"ranges");

GB space after ,
Other places like this can be found with:
bzr diff -rX| egrep "^\+.*,[^ ]"

 > @@ -2015,6 +2128,52 @@ public:
 >    struct st_ror_scan_info *cpk_scan;  /* Clustered PK scan, if there is one */
 >    bool is_covering; /* TRUE if no row retrieval phase is necessary */
 >    double index_scan_costs; /* SUM(cost(index_scan)) */
 > +
 > +#ifdef OPTIMIZER_TRACE
 > +  void trace_basic_info(const PARAM *param, Opt_trace_object *trace) const
 > +  {
 > +    trace->add_str("type", "index_roworder_intersect").
 > +      add("records", records).
 > +      add("cost", read_cost).
 > +      add("covering", is_covering).
 > +      add("cpk_scan", cpk_scan != NULL);
 > +
 > +    Opt_trace_context *trace_ctx= param->thd->opt_trace;
 > +    Opt_trace_array ota(trace_ctx,"intersect_of");

GB space after =

 > +    for (st_ror_scan_info **cur_scan= first_scan;
 > +         cur_scan != last_scan;
 > +         cur_scan++)
 > +    {
 > +      const KEY cur_key= param->table->key_info[(*cur_scan)->keynr];
 > +      const KEY_PART_INFO *key_part= cur_key.key_part;
 > +
 > +      Opt_trace_object trace_isect_idx(trace_ctx);
 > +      trace_isect_idx.add_str("type", "range_scan").
 > +        add_str("index", cur_key.name, strlen(cur_key.name)).

GB there are few add_str() calls in the patch, they may need escaping
or not. The current API is that
- add_str() (with or without a "length" parameter) expects UTF8
without special characters (like, no \n), so does no escaping.
- add_str_escaped() expects UTF8 and does escaping (of
\n and other special characters)
So add_str_escaped() is when data may contain "whatever" (a range
would fall in this category: it can contain a varchar with \n in the
middle...).
add_str_escaped() is slower (tests each byte for escaping), which is
why add_str() exists.
add_str() is ok for hard-coded strings made of A-Za-z0-9_-
(non-special ASCII characters) like
   trace->add_str("type", "index_roworder_intersect")
or
   trace_summary.add_str("cause", cause);
When the string is variable and user-provided, like the cur_key.name
above, we should use add_str_escaped(). Because it's possible to
create an index named index\n1, as Bar showed to me:
create table... index `index
1`));
Don't bother with fixing this now. I can, after your push, make a pass
over all add_str() calls and fix what needs to be (there are problems
in my code too).
There are also less performant solutions (but less a burden on the
developer): I can remove add_str_escaped() and make add_str() always
do escaping. Feels stupid to spend CPU time on escaping "cost" which
doesn't need to be escaped, though.
What do you think?

 > +        add("records", (*cur_scan)->records);
 > +
 > +      Opt_trace_array trace_range(trace_ctx, "ranges");
 > +      for (const SEL_ARG *current= (*cur_scan)->sel_arg;
 > +           current;

GB the two lines above have whitespace at end, which the coding style
now bans.
If this patch was committed onto revision X, you can do
bzr diff -rX| egrep "^\+.* $"
to catch added lines ending with whitespace.
It's even possible to generate the diff, revert changes, eliminate all
trailing whitespace in the diff with "sed", apply the new diff. If one
trusts his own scripts ;-)

 > @@ -2061,23 +2250,71 @@ public:
 >  class TRP_GROUP_MIN_MAX : public TABLE_READ_PLAN
 >  {
 >  private:
 > -  bool is_index_scan; /* Use index_next() instead of random read */
 > +  bool have_min;             ///< TRUE if there is a MIN function
 > +  bool have_max;             ///< TRUE if there is a MAX function
 > +  /** TRUE if there is an aggregate distinct function, e.g.

GB /** has to be alone on its line.

 > +      "COUNT(DISTINCT x)"
 > +   */
 > +  bool have_agg_distinct;
 > +  /** The key_part of the only field used by all MIN/MAX functions.

GB /** has to be alone on its line

 > +  void trace_basic_info(const PARAM *param, Opt_trace_object *trace) const
 > +  {
 > +    trace->add_str("type", "index_group").
 > +      add_str("index", index_info->name, strlen(index_info->name)).
 > +      add_str("group_attribute", min_max_arg_part->field->field_name,
 > +              strlen(min_max_arg_part->field->field_name)).
 > +      add("min_aggregate", have_min).
 > +      add("max_aggregate", have_max).
 > +      add("distinct_aggregate", have_agg_distinct).
 > +      add("key_parts_used_for_access", used_key_parts).

GB when printing "potential_range_indices" key_part is printed in a
human-readable form (names of columns), should be do the same here
instead of printing a number?

 > +      add("records", records).
 > +      add("cost", read_cost);
 > +
 > +    const KEY_PART_INFO *key_part= index_info->key_part;
 > +
 > +    Opt_trace_array trace_range(param->thd->opt_trace,"ranges");

GB space after ,

 > @@ -2312,17 +2547,28 @@ int SQL_SELECT::test_quick_select(THD *t
 >        key_info= head->key_info;
 >        for (idx=0 ; idx < head->s->keys ; idx++, key_info++)
 >        {
 > -#ifndef NO_OPT_TRACE_FOR_RANGE_OPT
 > -        oto1.add(key_info->name, "index");
 > -#endif
 > +        Opt_trace_object trace_idx_details(thd->opt_trace);
 > +        trace_idx_details.add_str("index",
 > +                                  key_info->name, strlen(key_info->name));
 >          KEY_PART_INFO *key_part_info;
 >          if (!keys_to_use.is_set(idx))
 > +        {
 > +          trace_idx_details.add("usable", false).
 > +            add_str("cause", "not_usable_by_conditions");
 >            continue;
 > +        }
 >          if (key_info->flags & HA_FULLTEXT)
 > +        {
 > +          trace_idx_details.add("usable", false).
 > +            add_str("cause", "fulltext");
 >            continue;    // ToDo: ft-keys in non-ft ranges, if possible   SerG
 > +        }
 > +
 > +        trace_idx_details.add("usable", true);
 >
 >          param.key[param.keys]=key_parts;
 >          key_part_info= key_info->key_part;
 > +        Opt_trace_array trace_keypart(thd->opt_trace, "key_parts");

GB you might want to cache thd->opt_trace in some pointer like
   Opt_trace_context * const trace= thd->opt_trace.

 > @@ -2494,18 +2777,26 @@ int SQL_SELECT::test_quick_select(THD *t
 >        }
 >      }
 >
 > -#ifndef NO_OPT_TRACE_FOR_RANGE_OPT
 > -    oto.add(param.table->quick_condition_rows, "quick_condition_rows");
 > -#endif
 > -  free_mem:
 > +free_mem:
 > +    if (quick)
 > +    {
 > +      Opt_trace_object trace_range_summary(thd->opt_trace,
 > +                                           "chosen_range_access_summary");
 > +      {
 > +        Opt_trace_object trace_range_plan(thd->opt_trace,
 > +                                           "range_access_plan");
 > +        best_trp->trace_basic_info(&param, &trace_range_plan);
 > +
 > +      }
 > +      trace_range_summary.add("records_in_plan", quick->records).
 > +        add("cost_of_plan", quick->read_time).
 > +        add("chosen", true);
 > +    }
 > +
 >      free_root(&alloc,MYF(0));			// Return memory & allocator
 >      thd->mem_root= param.old_root;
 >      thd->no_errors=0;
 >    }
 > -#ifndef NO_OPT_TRACE_FOR_RANGE_OPT
 > -  oto.add(records, "records");
 > -  OPT_TRACE2(thd->opt_trace, print_quick(quick, &needed_reg));

GB after that change, print_quick() is not used anymore; could you
please delete it (I get a compiler warning)? Or if it's code which we
want to keep around, maybe disable it inside "#if 0"?
It may be a good time to diff opt_range.cc between opt-backporting and
your tree: maybe some old 2009 prototype stuff is not needed
anymore?

 > -#endif
 >
 >    /*
 >      Assume that if the user is using 'limit' we will only need to scan
 > @@ -3837,49 +4128,63 @@ TABLE_READ_PLAN *get_best_disjunct_quick
 >    DBUG_ENTER("get_best_disjunct_quick");
 >    DBUG_PRINT("info", ("Full table scan cost: %g", read_time));
 >
 > +  Opt_trace_context * const trace= param->thd->opt_trace;
 > +  Opt_trace_object trace_best_disjunct(trace);
 >    if (!(range_scans= (TRP_RANGE**)alloc_root(param->mem_root,
 >                                               sizeof(TRP_RANGE*)*
 >                                               n_child_scans)))
 >      DBUG_RETURN(NULL);
 > -  /*
 > -    Collect best 'range' scan for each of disjuncts, and, while doing so,
 > -    analyze possibility of ROR scans. Also calculate some values needed by
 > -    other parts of the code.
 > -  */
 > -  for (ptree= imerge->trees, cur_child= range_scans;
 > -       ptree != imerge->trees_next;
 > -       ptree++, cur_child++)
 > -  {
 > -    DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map,
 > -                                        "tree in SEL_IMERGE"););
 > -    if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, FALSE, read_time)))
 > -    {
 > -      /*
 > -        One of index scans in this index_merge is more expensive than entire
 > -        table read for another available option. The entire index_merge (and
 > -        any possible ROR-union) will be more expensive then, too. We continue
 > -        here only to update SQL_SELECT members.
 > -      */
 > -      imerge_too_expensive= TRUE;
 > -    }
 > -    if (imerge_too_expensive)
 > -      continue;
 > +  {

GB about adding {} blocks, for some cases where the implied
indentation touch too many lines, there is a new
Opt_trace_struct::end() function; depends on cases...

 > +    /*
 > +      Collect best 'range' scan for each of disjuncts, and, while doing so,
 > +      analyze possibility of ROR scans. Also calculate some values needed by
 > +      other parts of the code.
 > +    */
 > +    Opt_trace_array ota(trace,"indices_to_merge");
 > +    for (ptree= imerge->trees, cur_child= range_scans;
 > +         ptree != imerge->trees_next;
 > +         ptree++, cur_child++)
 > +    {
 > +      DBUG_EXECUTE("info", print_sel_tree(param, *ptree,
&(*ptree)->keys_map,
 > +                                          "tree in SEL_IMERGE"););
 > +      Opt_trace_object trace_idx(trace);
 > +      if (!(*cur_child=
 > +            get_key_scans_params(param, *ptree, TRUE, FALSE, read_time)))

GB The coding style also allows true/false in C++ code now; you can use
that if you like.

 > +      {
 > +        /*
 > +          One of index scans in this index_merge is more expensive than entire
 > +          table read for another available option. The entire index_merge (and
 > +          any possible ROR-union) will be more expensive then, too. We continue
 > +          here only to update SQL_SELECT members.
 > +        */
 > +        imerge_too_expensive= TRUE;
 > +      }
 > +      if (imerge_too_expensive)
 > +      {
 > +        trace_idx.add("chosen", false).add_str("cause", "cost");
 > +        continue;
 > +      }
 >
 > -    imerge_cost += (*cur_child)->read_cost;
 > -    all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
 > -    all_scans_rors &= (*cur_child)->is_ror;
 > -    if (pk_is_clustered &&
 > -        param->real_keynr[(*cur_child)->key_idx] ==
 > -        param->table->s->primary_key)
 > -    {
 > -      cpk_scan= cur_child;
 > -      cpk_scan_records= (*cur_child)->records;
 > +      uint keynr_in_table= param->real_keynr[(*cur_child)->key_idx];

GB could be const

 > @@ -3911,7 +4220,10 @@ TABLE_READ_PLAN *get_best_disjunct_quick
 >        Add one ROWID comparison for each row retrieved on non-CPK scan.  (it
 >        is done in QUICK_RANGE_SELECT::row_in_ranges)
 >       */
 > +
 >      imerge_cost += non_cpk_scan_records / TIME_FOR_COMPARE_ROWID;
 > +    trace_best_disjunct.add("cost_of_mapping_rowid_in_non_cpk_scan",
 > +                            non_cpk_scan_records / TIME_FOR_COMPARE_ROWID);

GB could cache the division's result in a const, to not compute it
twice; like is done with dup_removal_cost below.

 > @@ -4683,28 +5003,42 @@ TRP_ROR_INTERSECT *get_best_ror_intersec
 >    ROR_SCAN_INFO **intersect_scans_best;
 >    cur_ror_scan= tree->ror_scans;
 >    intersect_scans_best= intersect_scans;
 > -  while (cur_ror_scan != tree->ror_scans_end &&
!intersect->is_covering)
 >    {
 > -    /* S= S + first(R);  R= R - first(R); */
 > -    if (!ror_intersect_add(intersect, *cur_ror_scan, FALSE))
 > +    Opt_trace_array ota(param->thd->opt_trace, "intersecting_indices");
 > +    while (cur_ror_scan != tree->ror_scans_end &&
!intersect->is_covering)
 >      {
 > -      cur_ror_scan++;
 > -      continue;
 > -    }
 > +      Opt_trace_object trace_idx(param->thd->opt_trace);
 > +      char *idx_name= param->table->key_info[(*cur_ror_scan)->keynr].name;
 > +      trace_idx.add_str("index", idx_name, strlen(idx_name));
 > +      /* S= S + first(R);  R= R - first(R); */
 > +      if (!ror_intersect_add(intersect, *cur_ror_scan, FALSE))
 > +      {
 > +        trace_idx.add("used_in_intersect", false).
 > +          add_str("cause", "does_not_reduce_cost_of_intersect");

GB would add_str("cause, "cost") be clear enough? Maybe not...

 >    if (intersect_scans_best == intersect_scans)
 >    {
 > +    trace_ror.add("increases_selectivity", false).add("chosen", false);

GB should it be
trace_ror.add("chosen", false).add("cause", "does_not_increase_selectivity")?

 >      DBUG_PRINT("info", ("None of scans increase selectivity"));
 >      DBUG_RETURN(NULL);
 >    }
 > @@ -4728,9 +5062,22 @@ TRP_ROR_INTERSECT *get_best_ror_intersec
 >      if (ror_intersect_add(intersect, cpk_scan, TRUE) &&
 >          (intersect->total_cost < min_cost))
 >      {
 > +      Opt_trace_object (param->thd->opt_trace, "clustered_pk").
 > +        add("cpk_scan_added_to_intersect", true).
 > +        add("cumulated_cost", intersect->total_cost);
 >        cpk_scan_used= TRUE;
 >        intersect_best= intersect; //just set pointer here
 >      }
 > +    else
 > +      Opt_trace_object (param->thd->opt_trace, "clustered_pk").
 > +        add("cpk_added_to_intersect", false).add_str("cause", "cost");
 > +  }
 > +  else
 > +  {
 > +    Opt_trace_object trace_cpk(param->thd->opt_trace, "clustered_pk");
 > +    trace_cpk.add("cpk_added_to_intersect", false);
 > +    cpk_scan ? trace_cpk.add_str("cause", "ror_is_covering")
 > +             : trace_cpk.add_str("cause", "no_clustered_pk_index");

GB The two last lines could be
trace_cpk.add_str("cause", cpk_scan ? "ror_is_covering" : "no_clustered_pk_index");
(I guess it generates less function calls).

 >    /* Ok, return ROR-intersect plan if we have found one */
 > @@ -4755,10 +5102,18 @@ TRP_ROR_INTERSECT *get_best_ror_intersec
 >      trp->records= best_rows;
 >      trp->index_scan_costs= intersect_best->index_scan_costs;
 >      trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
 > +
 > +    trace_ror.add("records", trp->records).
 > +      add("cost", trp->read_cost).
 > +      add("covering", trp->is_covering).
 > +      add("chosen", true);
 > +
 >      DBUG_PRINT("info", ("Returning non-covering ROR-intersect plan:"
 >                          "cost %g, records %lu",
 >                          trp->read_cost, (ulong) trp->records));
 >    }
 > +  else
 > +    trace_ror.add("chosen", false);

GB the two trace_ror.add("chosen", true/false) could be replaced with
one final
trace_ror.add("chosen", trp != NULL);

 > @@ -6010,6 +6364,12 @@ get_mm_leaf(RANGE_OPT_PARAM *param, Item
 >            value->result_type() == item_cmp_type(field->result_type(),
 >                                                  value->result_type()))
 >        {
 > +        Opt_trace_object (param->thd->opt_trace, "impossible_condition",
 > +                          Opt_trace_context::RANGE_OPTIMIZER).
 > +          add_str("table", *field->table_name, strlen(*field->table_name)).
 > +          add_str("field", field->field_name, strlen(field->field_name)).
 > +          add_str("cause", "incomparable_types");
 > +
 >          tree= new (alloc) SEL_ARG(field, 0, 0);
 >          tree->type= SEL_ARG::IMPOSSIBLE;
 >          field->table->in_use->variables.sql_mode= orig_sql_mode;
 > @@ -6072,6 +6432,12 @@ get_mm_leaf(RANGE_OPT_PARAM *param, Item
 >    }
 >    else if (err < 0)
 >    {
 > +    Opt_trace_object (param->thd->opt_trace, "impossible_condition",
 > +                      Opt_trace_context::RANGE_OPTIMIZER).
 > +      add_str("table", *field->table_name, strlen(*field->table_name)).
 > +      add_str("field", field->field_name, strlen(field->field_name)).
 > +      add_str("cause", "null_field_in_non_null_column");
 > +
 >      field->table->in_use->variables.sql_mode= orig_sql_mode;
 >      /* This happens when we try to insert a NULL field in a not null column */
 >      tree= &null_element;                        // cmp with NULL is never TRUE
 > @@ -6085,6 +6451,11 @@ get_mm_leaf(RANGE_OPT_PARAM *param, Item
 >    */
 >    if (type != Item_func::EQUAL_FUNC && field->is_real_null())
 >    {
 > +    Opt_trace_object (param->thd->opt_trace, "impossible_condition",
 > +                      Opt_trace_context::RANGE_OPTIMIZER).
 > +      add_str("table", *field->table_name, strlen(*field->table_name)).
 > +      add_str("field", field->field_name, strlen(field->field_name)).
 > +      add_str("cause", "comparison_with_null_always_false");
 >      tree= &null_element;
 >      goto end;
 >    }
 > @@ -6121,6 +6492,11 @@ get_mm_leaf(RANGE_OPT_PARAM *param, Item
 >      {
 >        if (type == Item_func::LT_FUNC || type == Item_func::LE_FUNC)
 >        {
 > +        Opt_trace_object (param->thd->opt_trace, "impossible_condition",
 > +                          Opt_trace_context::RANGE_OPTIMIZER).
 > +          add_str("table", *field->table_name, strlen(*field->table_name)).
 > +          add_str("field", field->field_name, strlen(field->field_name)).
 > +          add_str("cause", "unsigned_int_cannot_be_negative");

GB We have three such blocks which follow the same pattern: they
create "impossible_condition", fill "table" and "field" and then
"cause", they differ only by "cause", and then go to label
"end". I propose to reduce code duplication by setting a "const char
*cause" to the cause, and doing the duplicated trace code at label
"end".
I worry a bit about growing the code's size too much: unlike DBUG, the
optimizer trace code will be put into release binaries where speed
matters. My nightmare is that the QA team runs a benchmark and finds a
notable difference between when opt trace is not compiled in and when
it is compiled in.

 >          tree->type= SEL_ARG::IMPOSSIBLE;
 >          goto end;
 >        }
 > @@ -7632,8 +8008,26 @@ range_seq_t sel_arg_range_seq_init(void
 >  }
 >
 >
 > -static void step_down_to(SEL_ARG_RANGE_SEQ *arg, SEL_ARG *key_tree)
 > +static void step_down_to(String *s, SEL_ARG_RANGE_SEQ *arg, SEL_ARG *key_tree)
 >  {
 > +
 > +  if (arg->param->thd->opt_trace &&
arg->param->thd->opt_trace->is_started())

GB this will not compile if OPTIMIZER_TRACE is not defined (cmake
-DOPTIMIZER_TRACE=0).

 > +  {
 > +    /*
 > +       Stepping down will append the range for the current keypart (in
 > +       key_tree) to seq. Trace range here since this is where it is
 > +       human readable.
 > +    */
 > +    KEY_PART_INFO *key_part=

GB could be const

 > +      arg->param->table->key_info[arg->real_keyno].key_part +
key_tree->part;
 > +

 > @@ -9722,14 +10126,30 @@ get_best_group_min_max(PARAM *param, SEL
 >
 >    DBUG_ENTER("get_best_group_min_max");
 >
 > +  Opt_trace_object trace_group(thd->opt_trace, "group_index_range",
 > +                               Opt_trace_context::RANGE_OPTIMIZER);
 > +
 >    /* Perform few 'cheap' tests whether this access method is applicable. */
 >    if (!join)
 > +  {
 > +    trace_group.add("chosen", false).add_str("cause", "no_join");
 >      DBUG_RETURN(NULL);        /* This is not a select statement. */
 > -  if ((join->tables != 1) ||  /* The query must reference one table. */
 > -      (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
 > +  }
 > +  if (join->tables != 1)   /* The query must reference one table. */
 > +  {
 > +    trace_group.add("chosen", false).add_str("cause", "not_single_table");
 >      DBUG_RETURN(NULL);
 > +  }
 > +  if (join->select_lex->olap == ROLLUP_TYPE) /* Check (B3) for ROLLUP */
 > +  {
 > +    trace_group.add("chosen", false).add_str("cause", "is_rollup");
 > +    DBUG_RETURN(NULL);
 > +  }
 >    if (table->s->keys == 0)        /* There are no indexes to use. */
 > +  {
 > +    trace_group.add("chosen", false).add_str("cause", "no_index");
 >      DBUG_RETURN(NULL);
 > +  }
 >
 >    /* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/
 >    if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
 > @@ -9741,7 +10161,10 @@ get_best_group_min_max(PARAM *param, SEL
 >    if ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query.
*/
 >        (!join->select_distinct) &&
 >        !is_agg_distinct)
 > +  {
 > +    trace_group.add("chosen", false).add_str("cause", "not_group_by_or_distinct");
 >      DBUG_RETURN(NULL);
 > +  }
 >    /* Analyze the query in more detail. */
 >
 >    if (join->sum_funcs[0])
 > @@ -9759,7 +10182,11 @@ get_best_group_min_max(PARAM *param, SEL
 >                 min_max_item->sum_func() == Item_sum::AVG_DISTINCT_FUNC)
 >          continue;
 >        else
 > +      {
 > +        trace_group.add("chosen", false).
 > +          add_str("cause", "not_applicable_aggregate_function");
 >          DBUG_RETURN(NULL);

GB we have a lot of
add("chosen",false).add_str("cause",some cause);DBUG_RETURN(NULL);
Maybe there could be a "const char *cause" variable, and a jump to a
"return_null" label, where we would execute the duplicated code.

 > +      }
 >
 >        /* The argument of MIN/MAX. */
 >        Item *expr= min_max_item->get_arg(0)->real_item();
 > @@ -10102,8 +10579,11 @@ get_best_group_min_max(PARAM *param, SEL
 >        !check_group_min_max_predicates(join->conds, min_max_arg_item,
 >                                        (index_info->flags & HA_SPATIAL) ?
 >                                        Field::itMBR : Field::itRAW))
 > +  {
 > +    trace_group.add("usable",false).
 > +      add_str("cause", "unsupported_predicate_on_agg_attribute");

GB same suggestion: a "cause" variable (but this time at the final
label we would print "usable" instead of "chosen").

 >      DBUG_RETURN(NULL);
 > -
 > +  }
 >    /* The query passes all tests, so construct a new TRP object. */

 >  void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose)
 >  {
 >    Opt_trace_context *out= current_thd->opt_trace;
 > +  Opt_trace_object oto(out, "range_scan");
 > +  oto.add_str("index", head->key_info[index].name,

GB as agreed in a previous mail, we should revert that to their state
of opt-backporting.
I believe that, as print_quick() is unused now, dbug_dump() is unused
except in TEST_join() (which prints to DBUG); but maybe even that is
redundant and we don't need to call it from TEST_join() anymore,
because the range optimizer traces go to DBUG? In that case, we could
remove dbug_dump() functions... Let me know if you want to do it, or I
should put it on my TODO.

 >  void QUICK_ROR_INTERSECT_SELECT::dbug_dump(int indent, bool verbose)
 >  {
 > -#ifndef NO_OPT_TRACE_FOR_RANGE_OPT
 >    Opt_trace_context *out= current_thd->opt_trace;
 > -#endif
 >    List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
 >    QUICK_RANGE_SELECT *quick;
 > -#ifndef NO_OPT_TRACE_FOR_RANGE_OPT
 > -  Opt_trace_object oto(out, "index_merge_intersect");
 > -#endif
 > +  Opt_trace_array ota(out, "index_roworder_intersect");
 >    while ((quick= it++))
 > +  {
 > +    Opt_trace_object wrapper(out);
 >      quick->dbug_dump(indent+2, verbose);
 > +  }
 >    if (cpk_quick)
 >    {
 > -#ifndef NO_OPT_TRACE_FOR_RANGE_OPT
 >      Opt_trace_object oto2(out, "clustered_pk_scan");

GB sometimes we use "cpk" sometimes "clustered_pk", could we
standardize?

 > -#endif
 >      cpk_quick->dbug_dump(indent+2, verbose);
 >    }

 > === modified file 'sql/opt_range.h'
 > --- a/sql/opt_range.h	2010-08-05 13:51:44 +0000
 > +++ b/sql/opt_range.h	2010-10-22 08:31:43 +0000
 > @@ -33,6 +33,7 @@
 >  */
 >  #include "sql_class.h"                          // set_var.h: THD
 >  #include "set_var.h"                            /* Item */
 > +#include "opt_trace.h"
 >
 >  class JOIN;
 >  class Item_sum;
 > @@ -863,6 +864,12 @@ class SQL_SELECT :public Sql_alloc {
 >    bool check_quick(THD *thd, bool force_quick_range, ha_rows limit)
 >    {
 >      key_map tmp(key_map::ALL_BITS);
 > +
 > +    /* Entrypoint for insert/update/deletes with condition.

GB I suggest
/*
   Entry point etc


 > +       test_quick_select adds tracing with keys, and we are currently
 > +       in a trace array that does not accept keys.
 > +    */
 > +    Opt_trace_object wrapper(thd->opt_trace);
 >      return test_quick_select(thd, tmp, 0, limit, force_quick_range, FALSE) < 0;
 >    }

GB how about leaving check_quick() as it was, and rather creating the
wrapper object directly in the UPDATE and DELETE code? I think it would be
more logical. Otherwise it introduces a kind of "API difference"
between check_quick() and test_quick_select(): both functions become
different because one can be called in a context and the other
cannot.

 >    inline bool skip_record(THD *thd, bool *skip_record)
 >
 > === modified file 'sql/opt_trace.cc'
 > --- a/sql/opt_trace.cc	2010-10-21 13:03:30 +0000
 > +++ b/sql/opt_trace.cc	2010-10-22 08:31:43 +0000
 > @@ -377,12 +377,13 @@ const char *Opt_trace_context::flag_name
 >
 >  const char *Opt_trace_context::feature_names[]=
 >  {
 > -  "misc", "greedy_search", "default", NullS
 > +  "misc", "greedy_search", "range_optimizer", "default", NullS

GB The option's description in sys_vars.cc and
mysql-test/r/mysqld--help-*.result need an update.

 > === modified file 'sql/sql_select.cc'
 > --- a/sql/sql_select.cc	2010-10-21 13:03:30 +0000
 > +++ b/sql/sql_select.cc	2010-10-22 08:31:43 +0000
 > @@ -259,6 +259,10 @@ void select_describe(JOIN *join, bool ne
 >  			    bool distinct, const char *message=NullS);
 >  static Item *remove_additional_cond(Item* conds);
 >  static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab);
 > +static void trace_indices_added_group_distinct(Opt_trace_context *trace,
 > +                                               const JOIN_TAB *join_tab,
 > +                                               const key_map new_keys,
 > +                                               const char* cause);
 >  static bool replace_subcondition(JOIN *join, Item **tree,
 >                                   Item *old_cond, Item *new_cond,
 >                                   bool do_fix_fields);
 > @@ -4362,7 +4366,9 @@ int pull_out_semijoin_tables(JOIN *join)
 >          if (tbl->table->map & join->const_table_map)
 >          {
 >            pulled_tables |= tbl->table->map;
 > -          Opt_trace_object(trace).add_str("table", tbl->table->alias).
 > +          Opt_trace_object(trace).
 > +            add_str("database", tbl->db, strlen(tbl->db)).
 > +            add_str("table", tbl->alias, strlen(tbl->alias)).
 >              add("constant", true);
 >          }
 >        }
 > @@ -4388,7 +4394,9 @@ int pull_out_semijoin_tables(JOIN *join)
 >            {
 >              pulled_a_table= TRUE;
 >              pulled_tables |= tbl->table->map;
 > -            Opt_trace_object(trace).add_str("table", tbl->table->alias).
 > +            Opt_trace_object(trace).
 > +              add_str("database", tbl->db, strlen(tbl->db)).
 > +              add_str("table", tbl->alias, strlen(tbl->alias)).
 >                add("functionally_dependent", true);
 >              /*
 >                Pulling a table out of uncorrelated subquery in general makes
 > @@ -4947,14 +4955,22 @@ make_join_statistics(JOIN *join, TABLE_L
 >
 >        for (s= stat ; s < stat_end ; s++)
 >        {
 > +        Opt_trace_object trace_table(join->thd->opt_trace);

GB there is a "trace" variable already.

 > +        trace_table.add_str("database",s->join->tables_list->db,
 > +                            strlen(s->join->tables_list->db)).
 > +          add_str("table", s->table->alias, strlen(s->table->alias));
 >          if (s->type == JT_SYSTEM || s->type == JT_CONST)
 >          {
 > +          trace_table.add("records", 1).
 > +            add("cost", 1);

GB maybe those two add() above can be on one line?

 > +
 > +          trace_table.add_str("cause", (s->type == JT_SYSTEM) ?
 > +                          "system_table": "const_table");

 > @@ -6498,11 +6517,14 @@ add_group_and_distinct_keys(JOIN *join,
 >    Item_field *cur_item;
 >    key_map possible_keys;
 >
 > +  const char* cause;
 > +
 >    if (join->group_list)
 >    { /* Collect all query fields referenced in the GROUP clause. */
 >      for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
 >        (*cur_group->item)->walk(&Item::collect_item_field_processor, 0,
 >                                 (uchar*) &indexed_fields);
 > +    cause= (char*)"group_by";

GB the (char*) casts above and below are not needed.

 >    }
 >    else if (join->select_distinct)
 >    { /* Collect all query fields referenced in the SELECT clause. */
 > @@ -6512,10 +6534,12 @@ add_group_and_distinct_keys(JOIN *join,
 >      while ((item= select_items_it++))
 >        item->walk(&Item::collect_item_field_processor, 0,
 >                   (uchar*) &indexed_fields);
 > +    cause= (char*)"distinct";
 >    }
 >    else if (is_indexed_agg_distinct(join, &indexed_fields))
 >    {
 >      join->sort_and_group= 1;
 > +    cause= (char*) "indexed_distinct_aggregate";
 >    }
 >    else
 >      return;
 > @@ -6531,8 +6555,49 @@ add_group_and_distinct_keys(JOIN *join,
 >      possible_keys.intersect(cur_item->field->part_of_key);
 >    }
 >
 > -  if (!possible_keys.is_clear_all())
 > +  if (!possible_keys.is_clear_all() &&
 > +      !(possible_keys == join_tab->const_keys))
 > +  {
 > +    Opt_trace_context *trace= join->thd->opt_trace;
 > +    if (trace && trace->is_started())
 > +    {

GB unneeded {}

 > +      trace_indices_added_group_distinct(trace, join_tab,
 > +                                         possible_keys, cause);
 > +    }

GB you may want to put in trace_indices_added_group_distinct() this:
if (trace == NULL || !trace->is_started()) return;
and then you can just call
trace_indices_added_group_distinct(join->thd->opt_trace) above.

 >      join_tab->const_keys.merge(possible_keys);
 > +  }
 > +}
 > +
 > +/**
 > +  Print keys that were appended to join_tab->const_keys because they
 > +  can be used for GROUP BY or DISTINCT to the optimizer trace.
 > +
 > +  @param thd       Thread for the connection that submitted the query

GB there is no THD parameter actually

 > +  @param join_tab  The table the indices cover
 > +  @param new_keys  The keys that are considered useful because they can
 > +                   be used for GROUP BY or DISTINCT
 > +  @param cause     Zero-terminated string with reason for adding indices
 > +                   to const_keys
 > +
 > +  @see add_group_and_distinct_keys()
 > + */
 > +static void trace_indices_added_group_distinct(Opt_trace_context *trace,
 > +                                               const JOIN_TAB *join_tab,
 > +                                               const key_map new_keys,
 > +                                               const char* cause)

 > @@ -6974,9 +7039,9 @@ best_access_path(JOIN      *join,
 >        loose_scan_opt.next_ref_key();
 >        DBUG_PRINT("info", ("Considering ref access on key %s",
 >                            keyuse->table->key_info[keyuse->key].name));
 > -      Opt_trace_object trace_index_path(trace);
 > -      trace_index_path.add_str("access_type", "index").
 > -        add_str("index", keyinfo->name);
 > +      Opt_trace_object trace_access_idx(thd->opt_trace);

GB there is already a "trace" variable.

 > +      trace_access_idx.add_str("access_type", "index").
 > +        add_str("index", keyinfo->name, strlen(keyinfo->name));

 > @@ -7434,6 +7509,7 @@ best_access_path(JOIN      *join,
 >      }
 >      else
 >      {
 > +      trace_access_scan.add_str("access_type", "scan");

GB do we, further down in the trace, tell the user whether
we decided to use _table_ scan or _index_ scan?

 >        /* Estimate cost of reading table. */
 >        if (s->table->force_index && !best_key)
 >          tmp= s->table->file->read_time(s->ref.key, 1, s->records);

GB A comment about SQL_SELECT::test_quick_select(): it looks like we
generate different plans, print the description of each plan to the
trace, and at function's the very end, print the chosen one. Thus the
chosen one is printed twice (when it's considered, and in
conclusion). An alternative (which I used when advance_sj_state()
tries semijoin strategies one after the other), would be to not print
at the very end, but when we generate each plan, print a "chosen"
information for it. The user can understand which one was the final
choice, by looking at the last one which has chosen==true. Though if
you do that, then the order of plans matters, so there needs to be a
list of plans, not a dictionary of plans (a dictionary is not ordered,
a JSON viewer may shuffle its attributes).
I'm ok with the current solution if you prefer it.

The addendum patch "In addition to step_down_to()" is ok.

Thread
bzr commit into mysql-next-mr-bugfixing branch (jorgen.loland:3229) WL#5594Jorgen Loland22 Oct
  • Re: bzr commit into mysql-next-mr-bugfixing branch (jorgen.loland:3229)WL#5594Guilhem Bichot10 Nov
    • Re: bzr commit into mysql-next-mr-bugfixing branch (jorgen.loland:3229)WL#5594Jorgen Loland17 Nov
      • Re: bzr commit into mysql-next-mr-bugfixing branch (jorgen.loland:3229)WL#5594Guilhem Bichot18 Nov
        • Re: bzr commit into mysql-next-mr-bugfixing branch (jorgen.loland:3229)WL#5594Jorgen Loland22 Nov