List:Commits« Previous MessageNext Message »
From:Guilhem Bichot Date:February 15 2011 8:53pm
Subject:bzr commit into mysql-next-mr-bugfixing branch (guilhem.bichot:3265)
View as plain text  
#At file:///home/mysql_src/bzrrepos_new/mysql-next-mr-opt-backporting-wl4800-new/ based on revid:guilhem.bichot@oracle.com-20110125100605-phj929pxpwm4jwsj

 3265 Guilhem Bichot	2011-02-15 [merge]
      merge from separate feature tree where I had applied
      a lot of Tor's review comments, into main feature tree.

    added:
      mysql-test/r/optimizer_trace_oom.result
      mysql-test/t/optimizer_trace_oom.test
      unittest/gunit/opt_notrace-t.cc
    modified:
      WL4800_TODO.txt
      include/my_sys.h
      mysql-test/include/optimizer_trace.inc
      mysql-test/include/optimizer_trace2.inc
      mysql-test/r/optimizer_trace2_no_prot.result
      mysql-test/r/optimizer_trace2_ps_prot.result
      mysql-test/r/optimizer_trace_no_prot.result
      mysql-test/r/optimizer_trace_ps_prot.result
      mysys/array.c
      sql/mysqld.cc
      sql/opt_range.cc
      sql/opt_trace.cc
      sql/opt_trace.h
      sql/opt_trace2server.cc
      sql/sql_array.h
      sql/sql_parse.cc
      sql/sys_vars.cc
      unittest/gunit/CMakeLists.txt
      unittest/gunit/opt_trace-t.cc
=== modified file 'WL4800_TODO.txt'
--- a/WL4800_TODO.txt	2011-01-21 15:11:00 +0000
+++ b/WL4800_TODO.txt	2011-02-15 20:53:19 +0000
@@ -49,3 +49,5 @@ should the debug binary really assert(0)
 good idea at the customer's? On the other hand, how to make sure a
 developer notices a syntax error when running tests?
 sql_print_warning() is an idea.
+
+make the charset parameter "const CHARSET_INFO *", see the @todo in opt_trace.h

=== modified file 'include/my_sys.h'
--- a/include/my_sys.h	2011-01-21 15:11:00 +0000
+++ b/include/my_sys.h	2011-02-15 20:53:19 +0000
@@ -784,16 +784,17 @@ extern my_bool init_dynamic_array2(DYNAM
 /* init_dynamic_array() function is deprecated */
 extern my_bool init_dynamic_array(DYNAMIC_ARRAY *array, uint element_size,
                                   uint init_alloc, uint alloc_increment);
-extern my_bool insert_dynamic(DYNAMIC_ARRAY *array,uchar * element);
-extern uchar *alloc_dynamic(DYNAMIC_ARRAY *array);
-extern uchar *pop_dynamic(DYNAMIC_ARRAY*);
-extern my_bool set_dynamic(DYNAMIC_ARRAY *array,uchar * element,uint array_index);
+extern my_bool insert_dynamic(DYNAMIC_ARRAY *array, const void *element);
+extern void *alloc_dynamic(DYNAMIC_ARRAY *array);
+extern void *pop_dynamic(DYNAMIC_ARRAY*);
+extern my_bool set_dynamic(DYNAMIC_ARRAY *array, const void *element,
+                           uint array_index);
 extern my_bool allocate_dynamic(DYNAMIC_ARRAY *array, uint max_elements);
-extern void get_dynamic(DYNAMIC_ARRAY *array,uchar * element,uint array_index);
+extern void get_dynamic(DYNAMIC_ARRAY *array, void *element,
+                        uint array_index);
 extern void delete_dynamic(DYNAMIC_ARRAY *array);
 extern void delete_dynamic_element(DYNAMIC_ARRAY *array, uint array_index);
 extern void freeze_size(DYNAMIC_ARRAY *array);
-extern int  get_index_dynamic(DYNAMIC_ARRAY *array, uchar * element);
 #define dynamic_array_ptr(array,array_index) ((array)->buffer+(array_index)*(array)->size_of_element)
 #define dynamic_element(array,array_index,type) ((type)((array)->buffer) +(array_index))
 #define push_dynamic(A,B) insert_dynamic((A),(B))

=== modified file 'mysql-test/include/optimizer_trace.inc'
--- a/mysql-test/include/optimizer_trace.inc	2011-01-18 13:44:12 +0000
+++ b/mysql-test/include/optimizer_trace.inc	2011-02-15 20:53:19 +0000
@@ -435,7 +435,6 @@ delimiter ;|
 
 # PREPARE/EXECUTE/EXECUTE
 prepare stmt from 'call p1(?)';
-# PREPARE CALL is not traced, we should see previous statement:
 select QUERY from information_schema.OPTIMIZER_TRACE;
 set @param="c";
 execute stmt using @param;

=== modified file 'mysql-test/include/optimizer_trace2.inc'
--- a/mysql-test/include/optimizer_trace2.inc	2011-01-21 15:22:42 +0000
+++ b/mysql-test/include/optimizer_trace2.inc	2011-02-15 20:53:19 +0000
@@ -4,7 +4,6 @@
 
 set optimizer_trace="enabled=on,end_marker=on";
 
-
 --echo # check that if a sub-statement should not be traced,
 --echo # it is not traced even if inside a traced top statement
 --echo
@@ -32,6 +31,43 @@ set optimizer_trace_offset=default, opti
 drop function f1;
 
 --echo
+--echo # Check that if a sub-statement reads OPTIMIZER_TRACE,
+--echo # thus reading the unfinished trace of its caller statement,
+--echo # there is no crash.
+--echo
+
+create temporary table optt
+(id int primary key auto_increment,
+QUERY varchar(200),
+TRACE text);
+create table t1 (a int, key(a));
+insert into t1 values(2);
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+delimiter |;
+create function f1(arg char(1)) returns int
+begin
+  declare res int;
+  insert into optt select NULL, QUERY, TRACE from information_schema.OPTIMIZER_TRACE;
+  return 3;
+end|
+select * from t1 where a in (select f1("c") from t1)|
+--echo
+delimiter ;|
+set optimizer_trace="enabled=off";
+--echo this should find unfinished traces
+select count(*) from optt where TRACE NOT LIKE "%] /* steps */\n}";
+select count(*)<>0 from optt;
+--echo this should not
+select count(*) from information_schema.OPTIMIZER_TRACE where TRACE NOT LIKE "%] /* steps */\n}";
+select count(*)<>0 from information_schema.OPTIMIZER_TRACE;
+
+set optimizer_trace_offset=default, optimizer_trace_limit=default;
+drop temporary table optt;
+drop function f1;
+drop table t1;
+set optimizer_trace="enabled=on";
+
+--echo
 --echo # check of crash with I_S.VIEWS (TABLE_LIST::alias==NULL)
 --echo
 create table t1(a int, b int);
@@ -108,3 +144,14 @@ insert into t1 values (1,1), (2,null), (
 select max(x) from (select sum(a) as x from t1 group by b) as teeone;
 select TRACE from information_schema.OPTIMIZER_TRACE;
 drop table t1;
+
+# Check that SQL PREPARE produces one statement, and
+# check SQL EXECUTE produces two
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+prepare stmt from "select 1";
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+execute stmt;
+select * from information_schema.OPTIMIZER_TRACE;
+deallocate prepare stmt;
+set optimizer_trace_offset=default, optimizer_trace_limit=default;

=== modified file 'mysql-test/r/optimizer_trace2_no_prot.result'
--- a/mysql-test/r/optimizer_trace2_no_prot.result	2011-01-21 15:22:42 +0000
+++ b/mysql-test/r/optimizer_trace2_no_prot.result	2011-02-15 20:53:19 +0000
@@ -24,6 +24,47 @@ select 2 into res from dual
 set optimizer_trace_offset=default, optimizer_trace_limit=default;
 drop function f1;
 
+# Check that if a sub-statement reads OPTIMIZER_TRACE,
+# thus reading the unfinished trace of its caller statement,
+# there is no crash.
+
+create temporary table optt
+(id int primary key auto_increment,
+QUERY varchar(200),
+TRACE text);
+create table t1 (a int, key(a));
+insert into t1 values(2);
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+create function f1(arg char(1)) returns int
+begin
+declare res int;
+insert into optt select NULL, QUERY, TRACE from information_schema.OPTIMIZER_TRACE;
+return 3;
+end|
+select * from t1 where a in (select f1("c") from t1)|
+a
+
+set optimizer_trace="enabled=off";
+this should find unfinished traces
+select count(*) from optt where TRACE NOT LIKE "%] /* steps */\n}";
+count(*)
+1
+select count(*)<>0 from optt;
+count(*)<>0
+1
+this should not
+select count(*) from information_schema.OPTIMIZER_TRACE where TRACE NOT LIKE "%] /* steps */\n}";
+count(*)
+0
+select count(*)<>0 from information_schema.OPTIMIZER_TRACE;
+count(*)<>0
+1
+set optimizer_trace_offset=default, optimizer_trace_limit=default;
+drop temporary table optt;
+drop function f1;
+drop table t1;
+set optimizer_trace="enabled=on";
+
 # check of crash with I_S.VIEWS (TABLE_LIST::alias==NULL)
 
 create table t1(a int, b int);
@@ -970,3 +1011,65 @@ TRACE
   ] /* steps */
 }
 drop table t1;
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+prepare stmt from "select 1";
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+prepare stmt from "select 1"	{
+  "steps": [
+  ] /* steps */
+}	0	0
+select 1	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 1 AS `1`"
+          }
+        ] /* steps */
+      } /* join_preparation */
+    }
+  ] /* steps */
+}	0	0
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+execute stmt;
+1
+1
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+execute stmt	{
+  "steps": [
+  ] /* steps */
+}	0	0
+select 1	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 1 AS `1`"
+          }
+        ] /* steps */
+      } /* join_preparation */
+    },
+    {
+      "join_optimization": {
+        "select#": 1,
+        "steps": [
+        ] /* steps */
+      } /* join_optimization */
+    },
+    {
+      "join_execution": {
+        "select#": 1,
+        "steps": [
+        ] /* steps */
+      } /* join_execution */
+    }
+  ] /* steps */
+}	0	0
+deallocate prepare stmt;
+set optimizer_trace_offset=default, optimizer_trace_limit=default;

=== modified file 'mysql-test/r/optimizer_trace2_ps_prot.result'
--- a/mysql-test/r/optimizer_trace2_ps_prot.result	2011-01-21 15:22:42 +0000
+++ b/mysql-test/r/optimizer_trace2_ps_prot.result	2011-02-15 20:53:19 +0000
@@ -24,6 +24,47 @@ select 2 into res from dual
 set optimizer_trace_offset=default, optimizer_trace_limit=default;
 drop function f1;
 
+# Check that if a sub-statement reads OPTIMIZER_TRACE,
+# thus reading the unfinished trace of its caller statement,
+# there is no crash.
+
+create temporary table optt
+(id int primary key auto_increment,
+QUERY varchar(200),
+TRACE text);
+create table t1 (a int, key(a));
+insert into t1 values(2);
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+create function f1(arg char(1)) returns int
+begin
+declare res int;
+insert into optt select NULL, QUERY, TRACE from information_schema.OPTIMIZER_TRACE;
+return 3;
+end|
+select * from t1 where a in (select f1("c") from t1)|
+a
+
+set optimizer_trace="enabled=off";
+this should find unfinished traces
+select count(*) from optt where TRACE NOT LIKE "%] /* steps */\n}";
+count(*)
+1
+select count(*)<>0 from optt;
+count(*)<>0
+1
+this should not
+select count(*) from information_schema.OPTIMIZER_TRACE where TRACE NOT LIKE "%] /* steps */\n}";
+count(*)
+0
+select count(*)<>0 from information_schema.OPTIMIZER_TRACE;
+count(*)<>0
+1
+set optimizer_trace_offset=default, optimizer_trace_limit=default;
+drop temporary table optt;
+drop function f1;
+drop table t1;
+set optimizer_trace="enabled=on";
+
 # check of crash with I_S.VIEWS (TABLE_LIST::alias==NULL)
 
 create table t1(a int, b int);
@@ -962,3 +1003,65 @@ TRACE
   ] /* steps */
 }
 drop table t1;
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+prepare stmt from "select 1";
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+prepare stmt from "select 1"	{
+  "steps": [
+  ] /* steps */
+}	0	0
+select 1	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 1 AS `1`"
+          }
+        ] /* steps */
+      } /* join_preparation */
+    }
+  ] /* steps */
+}	0	0
+set optimizer_trace_offset=0, optimizer_trace_limit=100;
+execute stmt;
+1
+1
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+execute stmt	{
+  "steps": [
+  ] /* steps */
+}	0	0
+select 1	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 1 AS `1`"
+          }
+        ] /* steps */
+      } /* join_preparation */
+    },
+    {
+      "join_optimization": {
+        "select#": 1,
+        "steps": [
+        ] /* steps */
+      } /* join_optimization */
+    },
+    {
+      "join_execution": {
+        "select#": 1,
+        "steps": [
+        ] /* steps */
+      } /* join_execution */
+    }
+  ] /* steps */
+}	0	0
+deallocate prepare stmt;
+set optimizer_trace_offset=default, optimizer_trace_limit=default;

=== modified file 'mysql-test/r/optimizer_trace_no_prot.result'
--- a/mysql-test/r/optimizer_trace_no_prot.result	2011-01-18 13:44:12 +0000
+++ b/mysql-test/r/optimizer_trace_no_prot.result	2011-02-15 20:53:19 +0000
@@ -7256,7 +7256,7 @@ c	3	3
 prepare stmt from 'call p1(?)';
 select QUERY from information_schema.OPTIMIZER_TRACE;
 QUERY
-select * from t2
+call p1(?)
 set @param="c";
 execute stmt using @param;
 Warnings:

=== added file 'mysql-test/r/optimizer_trace_oom.result'
--- a/mysql-test/r/optimizer_trace_oom.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/optimizer_trace_oom.result	2011-02-15 20:53:19 +0000
@@ -0,0 +1,112 @@
+set @@session.optimizer_trace="enabled=on";
+select @@debug;
+@@debug
+
+select 1;
+1
+1
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+select 1	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 1 AS `1`"
+          }
+        ]
+      }
+    },
+    {
+      "join_optimization": {
+        "select#": 1,
+        "steps": [
+        ]
+      }
+    },
+    {
+      "join_execution": {
+        "select#": 1,
+        "steps": [
+        ]
+      }
+    }
+  ]
+}	0	0
+set session debug="+d,opt_trace_oom1";
+select @@debug;
+@@debug
+d,opt_trace_oom1
+select 2;
+2
+2
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+select 1	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 1 AS `1`"
+          }
+        ]
+      }
+    },
+    {
+      "join_optimization": {
+        "select#": 1,
+        "steps": [
+        ]
+      }
+    },
+    {
+      "join_execution": {
+        "select#": 1,
+        "steps": [
+        ]
+      }
+    }
+  ]
+}	0	0
+set @@session.optimizer_trace="enabled=on";
+set session debug=default;
+select @@debug;
+@@debug
+
+select 3;
+3
+3
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY	TRACE	MISSING_BYTES_BEYOND_MAX_MEM_SIZE	OS_MALLOC_ERROR
+select 3	{
+  "steps": [
+    {
+      "join_preparation": {
+        "select#": 1,
+        "steps": [
+          {
+            "expanded_query": "/* select#1 */ select 3 AS `3`"
+          }
+        ]
+      }
+    },
+    {
+      "join_optimization": {
+        "select#": 1,
+        "steps": [
+        ]
+      }
+    },
+    {
+      "join_execution": {
+        "select#": 1,
+        "steps": [
+        ]
+      }
+    }
+  ]
+}	0	0

=== modified file 'mysql-test/r/optimizer_trace_ps_prot.result'
--- a/mysql-test/r/optimizer_trace_ps_prot.result	2011-01-18 13:44:12 +0000
+++ b/mysql-test/r/optimizer_trace_ps_prot.result	2011-02-15 20:53:19 +0000
@@ -7194,7 +7194,7 @@ c	3	3
 prepare stmt from 'call p1(?)';
 select QUERY from information_schema.OPTIMIZER_TRACE;
 QUERY
-select * from t2
+call p1(?)
 set @param="c";
 execute stmt using @param;
 Warnings:

=== added file 'mysql-test/t/optimizer_trace_oom.test'
--- a/mysql-test/t/optimizer_trace_oom.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/optimizer_trace_oom.test	2011-02-15 17:19:30 +0000
@@ -0,0 +1,28 @@
+# Test how opt trace reacts to out-of-memory
+
+--source include/have_optimizer_trace.inc
+--source include/have_debug.inc
+
+set @@session.optimizer_trace="enabled=on";
+
+select @@debug;
+
+select 1;
+select * from information_schema.OPTIMIZER_TRACE;
+
+set session debug="+d,opt_trace_oom1";
+select @@debug;
+
+# this shouldn't be traced
+select 2;
+
+# so we should still see "select 1":
+select * from information_schema.OPTIMIZER_TRACE;
+
+set @@session.optimizer_trace="enabled=on";
+set session debug=default;
+select @@debug;
+
+# this should be traced:
+select 3;
+select * from information_schema.OPTIMIZER_TRACE;

=== modified file 'mysys/array.c'
--- a/mysys/array.c	2010-07-08 21:20:08 +0000
+++ b/mysys/array.c	2011-02-15 17:19:30 +0000
@@ -92,7 +92,7 @@ my_bool init_dynamic_array(DYNAMIC_ARRAY
     FALSE	Ok
 */
 
-my_bool insert_dynamic(DYNAMIC_ARRAY *array, uchar* element)
+my_bool insert_dynamic(DYNAMIC_ARRAY *array, const void* element)
 {
   uchar* buffer;
   if (array->elements == array->max_element)
@@ -127,7 +127,7 @@ my_bool insert_dynamic(DYNAMIC_ARRAY *ar
     0		Error
 */
 
-uchar *alloc_dynamic(DYNAMIC_ARRAY *array)
+void *alloc_dynamic(DYNAMIC_ARRAY *array)
 {
   if (array->elements == array->max_element)
   {
@@ -171,7 +171,7 @@ uchar *alloc_dynamic(DYNAMIC_ARRAY *arra
     0		Array is empty
 */
 
-uchar *pop_dynamic(DYNAMIC_ARRAY *array)
+void *pop_dynamic(DYNAMIC_ARRAY *array)
 {
   if (array->elements)
     return array->buffer+(--array->elements * array->size_of_element);
@@ -196,7 +196,7 @@ uchar *pop_dynamic(DYNAMIC_ARRAY *array)
     FALSE	Ok
 */
 
-my_bool set_dynamic(DYNAMIC_ARRAY *array, uchar* element, uint idx)
+my_bool set_dynamic(DYNAMIC_ARRAY *array, const void *element, uint idx)
 {
   if (idx >= array->elements)
   {
@@ -273,7 +273,7 @@ my_bool allocate_dynamic(DYNAMIC_ARRAY *
       idx	Index of element wanted. 
 */
 
-void get_dynamic(DYNAMIC_ARRAY *array, uchar* element, uint idx)
+void get_dynamic(DYNAMIC_ARRAY *array, void *element, uint idx)
 {
   if (idx >= array->elements)
   {
@@ -356,28 +356,3 @@ void freeze_size(DYNAMIC_ARRAY *array)
     array->max_element=elements;
   }
 }
-
-
-/*
-  Get the index of a dynamic element
-
-  SYNOPSIS
-    get_index_dynamic()
-     array	Array
-     element Whose element index 
-
-*/
-
-int get_index_dynamic(DYNAMIC_ARRAY *array, uchar* element)
-{
-  size_t ret;
-  if (array->buffer > element)
-    return -1;
-
-  ret= (element - array->buffer) /  array->size_of_element;
-  if (ret > array->elements)
-    return -1;
-
-  return ret;
-
-}

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2010-09-27 09:26:39 +0000
+++ b/sql/mysqld.cc	2011-02-15 17:19:30 +0000
@@ -2468,7 +2468,7 @@ Some pointers may be invalid and cause t
     fprintf(stderr, "thd->thread_id=%lu\n", (ulong) thd->thread_id);
     fprintf(stderr, "thd->killed=%s\n", kreason);
 #ifdef OPTIMIZER_TRACE
-    if (thd->opt_trace != NULL)
+    if ((thd->opt_trace != NULL) && thd->opt_trace->is_started())
     {
       const size_t max_print_len= 4096; // print those final bytes
       const char *tail= thd->opt_trace->get_tail(max_print_len);

=== modified file 'sql/opt_range.cc'
--- a/sql/opt_range.cc	2011-01-14 15:53:48 +0000
+++ b/sql/opt_range.cc	2011-02-15 20:53:19 +0000
@@ -795,6 +795,7 @@ TRP_GROUP_MIN_MAX *get_best_group_min_ma
 #ifndef DBUG_OFF
 static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
                            const char *msg);
+// print_ror_scans_arr declared 'static' but never defined
 static void print_ror_scans_arr(TABLE *table, const char *msg,
                                 struct st_ror_scan_info **start,
                                 struct st_ror_scan_info **end);
@@ -12402,6 +12403,7 @@ print_multiple_key_values(KEY_PART *key_
   dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
 }
 
+// print_quick defined but not used.
 static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
 {
   char buf[MAX_KEY/8+1];

=== modified file 'sql/opt_trace.cc'
--- a/sql/opt_trace.cc	2011-01-21 15:11:00 +0000
+++ b/sql/opt_trace.cc	2011-02-15 20:53:19 +0000
@@ -11,289 +11,385 @@
 
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
-   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA  */
 
 /**
    @file
    Implementation of the Optimizer trace API (WL#5257)
 */
 
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation				// gcc: Class implementation
-#endif
-
 #include "opt_trace.h"
 #include "mysqld.h"    // system_charset_info
 #include "item.h"      // Item
+#include "sql_string.h" // String
 #include "m_string.h"  // _dig_vec_lower
 
 #ifdef OPTIMIZER_TRACE
 
-/* Opt_trace_struct class */
+namespace {
+/**
+   Minimum amount of cells we want in our Dynamic_array-s when they're just
+   created. In our tests, nesting of structures is below 20: 48 is
+   reasonable.
+*/
+const int array_min_room= 48;
+/**
+   By how many cells the Dynamic_array should be extended, after the initial
+   allocation.
+*/
+const int array_extend=   16;
+}
 
-const char Opt_trace_struct::brackets[]= { '[', '{', ']', '}' };
-const char *Opt_trace_struct::types[]= { "array", "object" };
-bool Opt_trace_struct::dbug_assert_on_syntax_error= true;
+/**
+  @class Opt_trace_stmt
 
-void Opt_trace_struct::syntax_error(const char *key)
+  The trace of one statement. For example, executing a stored procedure
+  containing 3 sub-statements will produce 4 traces (one for the CALL
+  statement, one for each sub-statement), so 4 Opt_trace_stmt linked together
+  into Opt_trace_context's lists.
+*/
+class Opt_trace_stmt
 {
-  DBUG_PRINT("opt", ("syntax error key: %s", key));
-  DBUG_ASSERT(stmt->support_I_S);
-  DBUG_ASSERT(!dbug_assert_on_syntax_error);
-  /*
-    Inserting some warning text into the trace below helps locating where
-    things went wrong.
+public:
+  /**
+     Constructor, starts a trace
+     @param  ctx_arg          context
+     @param  support_I_S_arg  should trace be in information_schema
+     @note make sure to call @c well_constructed() after this; if it returns
+     false don't use the instance.
   */
-  if (key != NULL)
+  Opt_trace_stmt(Opt_trace_context *ctx_arg,
+                 enum enum_support_I_S support_I_S_arg);
+
+  /**
+     @returns whether the constructor went perfectly well.
+     @sa Opt_trace_context::well_constructed()
+  */
+  bool well_constructed() const
   {
-    stmt->buffer.append(STRING_WITH_LEN("** invalid JSON"
-                                        " (unexpected key \""));
-    stmt->buffer.append(key);
-    stmt->buffer.append(STRING_WITH_LEN("\") ** "));
+    return
+      stack_of_current_structs.guaranteed_room(array_min_room) ||
+      stack_of_values_of_support_I_S.guaranteed_room(array_min_room);
   }
-  else
-    stmt->buffer.append(STRING_WITH_LEN("** invalid JSON"
-                                        " (missing key) ** "));
-}
 
+  /**
+     Ends a trace; destruction may not be possible immediately as we may have
+     to keep the trace in case the user later reads it from I_S.
+  */
+  void end();
 
-void Opt_trace_struct::do_construct(Opt_trace_stmt *stmt_arg,
-                                    Opt_trace_struct *parent_arg,
-                                    bool requires_key_arg,
-                                    const char *key,
-                                    Opt_trace_context::feature_value feature)
-{
-  saved_key= key;
-  requires_key= requires_key_arg;
-  /* DBUG_PRINT supports NULL in %s */
-  DBUG_PRINT("opt", ("%s: starting %s", key, types[requires_key]));
-  stmt= stmt_arg;
-  parent= parent_arg;
-  DBUG_ASSERT(parent == NULL || stmt == parent->stmt);
-  started= true;
-#ifndef DBUG_OFF
-  previous_key[0]= 0;
-#endif
-  save_stmt_support_I_S=
-    stmt->support_I_S;
-  if (save_stmt_support_I_S &&
-      (feature & stmt->ctx->features) == 0)
+  /// @returns whether @c end() has been called on this instance.
+  bool has_ended() const { return ended; }
+
+  /// Sets the quantity of allowed memory for this trace.
+  void set_allowed_mem_size(size_t size);
+
+  /// @sa Opt_trace_context::set_query()
+  bool set_query(const char* query, size_t length, CHARSET_INFO *charset);
+
+  /* Below, functions for filling the statement's trace */
+
+  /**
+     When creating an Opt_trace_struct: adds a key and the opening bracket to
+     the trace buffer, updates current_struct.
+     @param  key              key or NULL
+     @param  struc            structure being created
+     @param  feature          optimizer feature to which the structure belongs
+     @param  opening_bracket  opening bracket to use
+  */
+  void open_struct(const char *key, Opt_trace_struct *struc,
+                   Opt_trace_context::feature_value feature,
+                   char opening_bracket);
+  /**
+     When closing an Opt_trace_struct: adds the closing bracket and optionally
+     the key to the trace buffer, updates current_struct.
+     @param  saved_key        key or NULL
+     @param  closing_bracket  closing bracket to use
+  */
+  void close_struct(const char *saved_key, char closing_bracket);
+
+  /// Put optional comma, newline and indentation
+  void separator();
+  /// Put newline and indentation
+  void next_line();
+
+  /**
+     Adds a key/value pair to the trace buffer.
+     @param  key  key or NULL
+     @param  val  representation of value as string
+     @param  val_length  length of value
+     @param  quotes  should value be delimited with '"' (false when the value
+     is the representation of a number, boolean or null)
+     @param  escape  does value need escaping (has special characters)
+
+     @note Structures prepare a string representation of their value-to-add
+     and call this function.
+  */
+  void add(const char *key, const char *val, size_t val_length,
+           bool quotes, bool escape);
+
+  /**
+     Emits a JSON syntax error into the trace.
+     @param key  key involved in the error, NULL if there is no key.
+
+     When adding a value (or array or object) to an array, or a key/value pair
+     to an object, we need to know this outer array or object.
+
+     It would be possible, when trying to add a key to an array, which is wrong
+     in JSON, or similarly when trying to add a value without any key to an
+     object, to catch it at compilation time, if the adder received, as function
+     parameter, the type of the structure (like @c Opt_trace_array*). Then
+     the @c add(key,val) call would not compile as Opt_trace_array wouldn't
+     feature it.
+
+     But as explained in comment of class Opt_trace_context we
+     cannot pass down the object, have to maintain a "current object or
+     array" in the Opt_trace_context context (pointer to an instance of
+     Opt_trace_struct), and the adder grabs it from the context.
+
+     As this current structure is of type "object or array", we cannot do
+     compile-time checks that only suitable functions are used. A call to @c
+     add(key,value) is necessarily legal for the compiler as the structure may
+     be an object, though it will be wrong in case the structure is actually
+     an array at run-time. Thus we have the risk of an untested particular
+     situation where the current structure is not an object (but an array)
+     though the code expected it to be one. We catch that at run-time:
+     our functions detect wrong usage, like adding a value to an object
+     without specifying a key, and then call syntax_error() which:
+     @li in debug build, asserts
+     @li in release builds, emits a warning string in the trace and should not
+     crash. The trace is invalid JSON but still human-readable (our best
+     effort).
+  */
+  void syntax_error(const char *key);
+
+  /* Below, functions to request information from this instance */
+
+  /// Fills user-level information @sa Opt_trace_iterator
+  void fill_info(Opt_trace_info *info) const;
+
+  /// @returns 'size' last bytes of the trace buffer
+  const char *trace_buffer_tail(size_t size);
+
+  enum enum_support_I_S get_support_I_S() const { return support_I_S; }
+
+  /// @returns total memory used by this trace
+  size_t alloced_length() const
+  { return trace_buffer.alloced_length() + query_buffer.alloced_length(); }
+
+  /**
+    Not "logical const", bad.
+    Will go away when get_get_current_struct() is not used in opt_range.cc
+    anymore.
+  */
+  Opt_trace_struct *get_current_struct() { return current_struct; }
+
+private:
+
+  bool ended;           ///< Whether @c end() has been called on this instance
+
+  /// Should this trace be in information_schema
+  enum enum_support_I_S support_I_S;
+
+  Opt_trace_context *ctx;                       ///< context
+  Opt_trace_struct *current_struct;             ///< current open structure
+
+  /**
+     Whether we added data (scalars, structures) to the current struct.
+     This is used and maintained only if sending to I_S.
+  */
+  bool current_struct_empty;
+  /// @sa stack_of_current_structs
+  struct Struct_desc
+  {
+    Opt_trace_struct *current_struct;
+    bool has_disabled_I_S;
+    bool current_struct_empty;
+  };
+  /**
+     Same logic as Opt_trace_context::stack_of_current_stmts. We need to
+     remember:
+     - each value of current_struct
+     - whether such structure caused tracing to be disabled in this statement
+     (happens when the structure belongs to a not-traced optimizer feature,
+     in accordance with the value of @@@@optimizer_trace_features)
+     - whether such structure was empty.
+  */
+  Dynamic_array<Struct_desc> stack_of_current_structs;
+
+  /**
+     When we temporarily disable I_S (because of Opt_trace_disable_I_S, or
+     because we are entering a structure belonging to a not-traced optimizer
+     feature), we need to remember the pre-disabling state, to restore it
+     later.
+  */
+  Dynamic_array<enum enum_support_I_S> stack_of_values_of_support_I_S;
+
+  /**
+     Temporarily disables I_S. This is private because only our friend
+     Opt_trace_disable_I_S is trusted enough to use it.
+  */
+  void disable_I_S_for_this_and_children()
   {
-    /*
-      User requested no tracing for this structure's feature. We are entering
-      a disabled portion; put an ellipsis "..." to alert the user
-    */
-    if (stmt->current_struct != NULL)
-    {
-      if (key != NULL)
-        stmt->current_struct->add_alnum(key, "...");
-      else
-        stmt->current_struct->add_alnum("...");
-    }
-    stmt->support_I_S= false; // disable tracing for this struct and children
+    stack_of_values_of_support_I_S.append(support_I_S);
+    support_I_S= NO_FOR_THIS_AND_CHILDREN;
   }
-  if (stmt->support_I_S)
+  void restore_I_S()  ///< Restores after disable_I_S_for_this_and_children().
   {
-    const size_t alloced=   stmt->buffer.alloced_length();
-    const size_t increment= Opt_trace_stmt::buffer_alloc_increment;
-    if ((alloced - stmt->buffer.length()) < (increment / 3))
+    support_I_S= stack_of_values_of_support_I_S.pop();
+  }
+
+  /// A wrapper of class String, for storing query or trace.
+  class Buffer
+  {
+  private:
+    size_t allowed_mem_size;   ///< allowed memory size for this String
+    size_t missing_bytes;      ///< how many bytes could not be added
+    bool   malloc_error;       ///< whether there was a malloc/realloc() error
+    String string_buf;
+  public:
+    Buffer() : allowed_mem_size(0), missing_bytes(0), malloc_error(false) {}
+    uint32 alloced_length() const { return string_buf.alloced_length(); }
+    uint32 length() const { return string_buf.length(); }
+    bool prealloc();    ///< pro-actively extend buffer if soon short of space
+    char *c_ptr_safe()
     {
-      /*
-        Support for I_S will produce long strings, and there is little free
-        space left in the allocated buffer, so it looks like
-        realloc is soon unavoidable; so let's get many bytes at a time.
-        Note that if this re-allocation fails, or any String::append(), we
-        will get a weird trace; either truncated if the server stops, or maybe
-        with a hole if there is later memory again for the trace's
-        continuation. That will be visible in the OS_MALLOC_ERROR column.
-      */
-      size_t new_size= alloced + increment;
-      size_t max_size= stmt->buffer.allowed_mem_size;
-      /*
-        Determine a safety margin:
-        (A) String::realloc() adds at most ALIGN_SIZE(1) bytes to requested
-        length, so we need to decrement max_size by this amount, to be sure that
-        we don't allocate more than max_size
-        (B) We need to stay at least one byte under that max_size, or the next
-        append() would trigger up-front truncation, which is potentially wrong
-        for a "pre-emptive allocation" as we do here.
-      */
-      const size_t safety_margin= ALIGN_SIZE(1) /* (A) */ + 1 /* (B) */;
-      if (max_size >= safety_margin)
-      {
-        max_size-= safety_margin;
-        if (new_size > max_size) // don't pre-allocate more than the limit
-          new_size= max_size;
-        if (new_size >= alloced) // never shrink string
-          stmt->buffer.realloc(new_size);
-      }
+      // Alas, String::c_ptr_safe() does no realloc error checking
+      return string_buf.c_ptr_safe();
     }
-  }
-  if (likely(parent != NULL))
-    parent->add_struct(key);
-  stmt->current_struct= this;
-  if (!stmt->support_I_S)
-    return;
-  stmt->buffer.append(brackets[requires_key]);
-  stmt->push();
-}
+    const char *ptr() const { return string_buf.ptr(); }
 
+    CHARSET_INFO *charset() const { return string_buf.charset(); }
+    void set_charset(CHARSET_INFO *charset) { string_buf.set_charset(charset); }
 
-void Opt_trace_struct::add_key_name(const char *key)
-{
-  /* user should always add to the innermost open object, not outside */
-  DBUG_ASSERT(stmt->current_struct == this);
-  bool has_key= key != NULL;
-  if (unlikely(has_key != requires_key))
-  {
-    syntax_error(key);
-    key= has_key ? NULL : "?";
-    has_key= !has_key;
-  }
-  if (has_key)
-  {
-#ifndef DBUG_OFF
-    /*
-      Check that we're not having two identical consecutive keys in one
-      object; though the real restriction should not have 'consecutive'.
+    /**
+       Like @c String::append()
+       @param  str     String, in this instance's charset
+       @param  length  length of string
     */
-    DBUG_ASSERT(strncmp(previous_key, key, sizeof(previous_key) - 1) != 0);
-    strncpy(previous_key, key, sizeof(previous_key) - 1);
-    previous_key[sizeof(previous_key) - 1]= 0;
-#endif
-    stmt->buffer.append('"');
-    stmt->buffer.append(key);
-    stmt->buffer.append(STRING_WITH_LEN("\": "));
-  }
+    bool append(const char *str, size_t length);
+    bool append(const char *str) { return append(str, strlen(str)); }
+    /**
+       Like @c append() but escapes certain characters for string values to
+       be JSON-compliant.
+       @param  str     String in UTF8
+       @param  length  length of string
+    */
+    bool append_escaped(const char *str, size_t length);
+    bool append(char chr);
+
+    size_t get_allowed_mem_size() const { return allowed_mem_size; }
+    bool get_malloc_error() const { return malloc_error; }
+    size_t get_missing_bytes() const { return missing_bytes; }
+
+    void set_malloc_error() { malloc_error= true; }
+    void set_allowed_mem_size(size_t a) { allowed_mem_size= a; }
+  }; // end of class Buffer
+
+  Buffer trace_buffer;                    ///< Where the trace is accumulated
+  Buffer query_buffer;                    ///< Where the original query is put
+
+  friend class Opt_trace_disable_I_S;
+};
+
+
+/**
+   This is a trick.
+   The problem is that Opt_trace_stmt needs access to
+   Opt_trace_struct::check_key(). We don't want to make Opt_trace_stmt a
+   friend of Opt_trace_struct, that would be giving Opt_trace_stmt access to
+   too many things. So we have to make check_key() public. This isn't good
+   either: function becomes accessible to includers of opt_trace.h.
+   Instead, we declare the class below as friend of Opt_trace_struct; this
+   class only calls check_key(), and Opt_trace_stmt is a friend of it. The
+   idea is that, of Opt_trace_struct, only check_key() is made accessible
+   to Opt_trace_stmt, and only to it, via the class below.
+   Let me know what option to choose, trick or public.
+*/
+class Partial_access_for_Opt_trace_stmt
+{
+private:
+  static const char *check_key(Opt_trace_struct *s, const char *key)
+  { return s->check_key(key); }
+
+  friend class Opt_trace_stmt;
+};
+
+
+// implementation of class Opt_trace_struct
+
+bool Opt_trace_struct::dbug_assert_on_syntax_error= true;
+
+namespace {
+/// opening and closing symbols for arrays ([])and objects ({})
+const char brackets[]= { '[', '{', ']', '}' };
+inline char opening_bracket(bool requires_key)
+{
+  return brackets[requires_key];
+}
+inline char closing_bracket(bool requires_key)
+{
+  return brackets[requires_key + 2];
 }
+} // namespace
 
 
-void Opt_trace_struct::add_struct(const char *key)
+void Opt_trace_struct::do_construct(Opt_trace_context *ctx,
+                                    bool requires_key_arg,
+                                    const char *key,
+                                    Opt_trace_context::feature_value feature)
 {
-  DBUG_ASSERT(started);
-  if (!stmt->support_I_S)
-    return;
-  stmt->separator();
-  add_key_name(key);
+  saved_key= key;
+  requires_key= requires_key_arg;
+
+  DBUG_PRINT("opt", ("%s: starting struct", key));
+  stmt= ctx->get_current_stmt_in_gen();
+  started= true;
+#ifndef DBUG_OFF
+  previous_key[0]= 0;
+#endif
+  stmt->open_struct(key, this, feature, opening_bracket(requires_key));
 }
 
 
-void Opt_trace_struct::do_destruct(void)
+void Opt_trace_struct::do_destruct()
 {
-  DBUG_PRINT("opt", ("%s: ending %s", saved_key, types[requires_key]));
+  DBUG_PRINT("opt", ("%s: ending struct", saved_key));
   DBUG_ASSERT(started);
-  /*
-    Note:
-    - the Server code invokes Opt_trace_struct's constructor/destructor, and so
-    stmt's current_struct is necessarily set by Opt_trace_struct's
-    constructor/destructor
-    - the Server code invokes Opt_trace_context start/end() functions, and so
-    context's current_stmt_in_gen is set in start()/end() (which is clean: object
-    sets _its_ variables, does not let its child do it).
-  */
-  stmt->current_struct= parent;
-  DBUG_ASSERT(parent == NULL || stmt == parent->stmt);
-  if (stmt->support_I_S)
-  {
-    stmt->pop();
-    stmt->next_line();
-    stmt->buffer.append(brackets[requires_key + 2]);
-    if (stmt->ctx->end_marker && saved_key != NULL)
-    {
-      stmt->buffer.append(STRING_WITH_LEN(" /* "));
-      stmt->buffer.append(saved_key);
-      stmt->buffer.append(STRING_WITH_LEN(" */"));
-    }
-  }
-  stmt->support_I_S= save_stmt_support_I_S;
+  stmt->close_struct(saved_key, closing_bracket(requires_key));
   started= false;
 }
 
 
+/**
+   @note add() has an up-front if(), hopefully inlined, so that in the common
+   case - tracing run-time disabled - we have no function call. If tracing is
+   enabled, we call do_add().
+   In a 20-table plan search (as in BUG#50595), the execution time was
+   decreased from 2.6 to 2.0 seconds thanks to this inlined-if trick.
+*/
 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, const char *val,
                                            const size_t val_length,
                                            bool escape)
 {
   DBUG_ASSERT(started);
   DBUG_PRINT("opt", ("%s: \"%.*s\"", key, (int)val_length, val));
-  if (!stmt->support_I_S)
-    return *this;
-  stmt->separator();
-  add_key_name(key);
-  stmt->buffer.append('"');
-  /*
-    Objects' keys use "normal" characters (A-Za-z0-9_), no escaping
-    needed. Same for numeric/bool values. Only string values may need
-    escaping.
-  */
-  if (escape)
-  {
-    /*
-      Work around BUG#57341. Every time we come here (printing a condition or
-      an expanded query), the input comes from String-s having
-      str_charset==UTF8, but in reality it may contain pieces in query's
-      charset (==character_set_client), pieces in UTF8, and pieces in ASCII
-      ("select" and other language keywords). If we directly store that mix in a
-      UTF8 column, the query_charset piece causes an issue:
-      Field_blob::store() will truncate the trace at first unrecognized
-      character. At worse, a single bad character in the expanded query makes
-      all the rest of the trace be lost.
-      To work around the bug, we force a conversion from UTF8 to UTF8, which
-      will replace any non-UTF8 characters with '?'. Some query_charset
-      characters may produce valid "exotic" UTF8 characters. That and a few
-      '?' are much better than a truncated trace.
-      If the client uses only utf8, things are guaranteed ok.
-      Note that JSON accepts only UTF8.
-    */
-    CHARSET_INFO *query_charset= stmt->query_buffer.charset();
-    if (my_charset_same(query_charset, system_charset_info))
-      stmt->buffer.append_escaped(val, val_length);
-    else
-    {
-
-      uint32 new_length= system_charset_info->mbmaxlen * val_length;
-      char *utf8_str= (char *)my_malloc(new_length, MYF(0));
-      if (utf8_str == NULL)
-      {
-        stmt->buffer.malloc_error= true;
-        return *this;
-      }
-      uint errors;
-      new_length= copy_and_convert(utf8_str, new_length, system_charset_info,
-                                   val, val_length, system_charset_info,
-                                   &errors);
-      /*
-        Such UTF8 can now be safely escaped. Because UTF8 has the same
-        characters in range 0-127 as ASCII does, and other UTF8 characters
-        don't contain 0-127 bytes, if we see a 0 byte it is really the NUL
-        character and not a part of a longer character, if we see a newline,
-        same, etc. That wouldn't necessarily be true if we used
-        query_charset as target character set, so escaping would be
-        impossible.
-      */
-      stmt->buffer.append_escaped(utf8_str, new_length);
-      my_free(utf8_str);
-    }
-  }
-  else
-    stmt->buffer.append(val, val_length);
-  stmt->buffer.append('"');
+  stmt->add(check_key(key), val, val_length, true, escape);
   return *this;
 }
 
+namespace {
+/// human-readable names for boolean values
+LEX_CSTRING readables[]= { { STRING_WITH_LEN("false") },
+                           { STRING_WITH_LEN("true") } };
+}
 
 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, bool val)
 {
   DBUG_ASSERT(started);
   DBUG_PRINT("opt", ("%s: %d", key, (int)val));
-  if (!stmt->support_I_S)
-    return *this;
-  stmt->separator();
-  add_key_name(key);
-  LEX_CSTRING readables[]= { { STRING_WITH_LEN("false") },
-                             { STRING_WITH_LEN("true") } };
   const LEX_CSTRING *readable= &readables[test(val)];
-  stmt->buffer.append(readable->str, readable->length);
+  stmt->add(check_key(key), readable->str, readable->length, false, false);
   return *this;
 }
 
@@ -301,14 +397,10 @@ Opt_trace_struct& Opt_trace_struct::do_a
 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, longlong val)
 {
   DBUG_ASSERT(started);
-  char buf[22];
+  char buf[22];                     // 22 is enough for digits of a 64-bit int
   llstr(val, buf);
   DBUG_PRINT("opt", ("%s: %s", key, buf));
-  if (!stmt->support_I_S)
-    return *this;
-  stmt->separator();
-  add_key_name(key);
-  stmt->buffer.append(buf);
+  stmt->add(check_key(key), buf, strlen(buf), false, false);
   return *this;
 }
 
@@ -319,11 +411,7 @@ Opt_trace_struct& Opt_trace_struct::do_a
   char buf[22];
   ullstr(val, buf);
   DBUG_PRINT("opt", ("%s: %s", key, buf));
-  if (!stmt->support_I_S)
-    return *this;
-  stmt->separator();
-  add_key_name(key);
-  stmt->buffer.append(buf);
+  stmt->add(check_key(key), buf, strlen(buf), false, false);
   return *this;
 }
 
@@ -331,33 +419,25 @@ Opt_trace_struct& Opt_trace_struct::do_a
 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, double val)
 {
   DBUG_ASSERT(started);
-  char buf[32];
+  char buf[32];                         // 32 is enough for digits of a double
   my_snprintf(buf, sizeof(buf), "%g", val);
   DBUG_PRINT("opt", ("%s: %s", key, buf));
-  if (!stmt->support_I_S)
-    return *this;
-  stmt->separator();
-  add_key_name(key);
-  stmt->buffer.append(buf);
+  stmt->add(check_key(key), buf, strlen(buf), false, false);
   return *this;
 }
 
 
 Opt_trace_struct& Opt_trace_struct::do_add_null(const char *key)
 {
+  DBUG_ASSERT(started);
   DBUG_PRINT("opt", ("%s: null", key));
-  if (!stmt->support_I_S)
-    return *this;
-  stmt->separator();
-  add_key_name(key);
-  stmt->buffer.append(STRING_WITH_LEN("null"));
+  stmt->add(check_key(key), STRING_WITH_LEN("null"), false, false);
   return *this;
 }
 
 
 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, Item *item)
 {
-  DBUG_ASSERT(started);
   char buff[256];
   String str(buff,(uint32) sizeof(buff), system_charset_info);
   str.length(0);
@@ -372,549 +452,366 @@ Opt_trace_struct& Opt_trace_struct::do_a
 }
 
 
-/* Opt_trace_context class */
-
-const char *Opt_trace_context::flag_names[]=
-{
-  "enabled", "end_marker", "one_line", "default", NullS
-};
-
-const char *Opt_trace_context::feature_names[]=
-{
-  "greedy_search", "range_optimizer", "dynamic_range", 
-  "repeated_subselect", "default", NullS
-};
-
-const Opt_trace_context::feature_value Opt_trace_context::FEATURES_DEFAULT=
-  Opt_trace_context::feature_value(Opt_trace_context::GREEDY_SEARCH   |
-                                   Opt_trace_context::RANGE_OPTIMIZER |
-                                   Opt_trace_context::DYNAMIC_RANGE   |
-                                   Opt_trace_context::REPEATED_SUBSELECT);
-
-Opt_trace_context::Opt_trace_context(void):
-  oldest_stmt_to_show(NULL), newest_stmt_to_show(NULL), stmt_to_del(NULL),
-  since_offset_0(0), current_stmt_in_gen(NULL), cannot_change_settings(false)
-{}
-
-
-Opt_trace_context::~Opt_trace_context(void)
-{
-  /* There may well be some few ended traces left: */
-  purge(true);
-  /* All should have moved to 'del' list: */
-  DBUG_ASSERT(newest_stmt_to_show == NULL && oldest_stmt_to_show == NULL);
-  /* All of 'del' list should have been deleted: */
-  DBUG_ASSERT(stmt_to_del == NULL);
-}
-
-
-Opt_trace_struct *Opt_trace_context::get_current_struct(void) const
+Opt_trace_struct& Opt_trace_struct::do_add_utf8_table(TABLE *tab)
 {
-  return current_stmt_in_gen->current_struct;
+  return
+    do_add("database", tab->s->db.str, tab->s->db.length, true).
+    do_add("table", tab->alias, strlen(tab->alias), true);
 }
 
 
-void Opt_trace_context::link_to_shown(Opt_trace_stmt *stmt)
+const char *Opt_trace_struct::check_key(const char *key)
 {
-  DBUG_PRINT("opt", ("Opt_trace_context::link_to_shown %p", stmt));
-  stmt->prev= stmt->next= NULL;
-  if (oldest_stmt_to_show == NULL)
+  DBUG_ASSERT(started);
+  /**
+    User should always add to the innermost open object, not outside.
+    @todo: replace this with a member function in Opt_trace_stmt doing
+    this check; or make current_struct return a pointer-to-const. This will be
+    possible after Opt_trace_context::get_current_struct() is deleted.
+  */
+  DBUG_ASSERT(stmt->get_current_struct() == this);
+  bool has_key= key != NULL;
+  if (unlikely(has_key != requires_key))
   {
-    DBUG_ASSERT(newest_stmt_to_show == NULL);
-    oldest_stmt_to_show= stmt;
+    stmt->syntax_error(key);
+    key= has_key ? NULL : "?";
+    has_key= !has_key;
   }
-  else
+  if (has_key)
   {
-    newest_stmt_to_show->next= stmt;
-    stmt->prev= newest_stmt_to_show;
+#ifndef DBUG_OFF
+    /*
+      Check that we're not having two identical consecutive keys in one
+      object; though the real restriction should not have 'consecutive'.
+    */
+    DBUG_ASSERT(strncmp(previous_key, key, sizeof(previous_key) - 1) != 0);
+    strncpy(previous_key, key, sizeof(previous_key) - 1);
+    previous_key[sizeof(previous_key) - 1]= 0;
+#endif
   }
-  newest_stmt_to_show= stmt;
+  return key;
 }
 
 
-void Opt_trace_context::unlink_from_shown(Opt_trace_stmt *stmt)
+// Implementation of Opt_trace_stmt class
+
+Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg,
+                               enum enum_support_I_S support_I_S_arg):
+  ended(false), support_I_S(support_I_S_arg), ctx(ctx_arg),
+  current_struct(NULL),
+  stack_of_current_structs(array_min_room, array_extend),
+  stack_of_values_of_support_I_S(array_min_room, array_extend)
 {
-  DBUG_PRINT("opt", ("Opt_trace_context::unlink_from_shown %p", stmt));
-  if (stmt == oldest_stmt_to_show)
-    oldest_stmt_to_show= stmt->next;
-  else
-    stmt->prev->next= stmt->next;
-  if (stmt == newest_stmt_to_show)
-    newest_stmt_to_show= stmt->prev;
-  else
-    stmt->next->prev= stmt->prev;
+  // Trace is always in UTF8, it's "all" that JSON accepts
+  trace_buffer.set_charset(system_charset_info);
+  DBUG_ASSERT(system_charset_info == &my_charset_utf8_general_ci);
 }
 
 
-void Opt_trace_context::link_to_del(Opt_trace_stmt *stmt)
+void Opt_trace_stmt::end()
 {
-  DBUG_PRINT("opt", ("Opt_trace_context::link_to_del %p", stmt));
-  stmt->prev= stmt->next= NULL;
-  if (stmt_to_del != NULL)
-  {
-    stmt_to_del->next= stmt;
-    stmt->prev= stmt_to_del;
-  }
-  stmt_to_del= stmt;
+  DBUG_ASSERT(stack_of_current_structs.elements() == 0);
+  DBUG_ASSERT(stack_of_values_of_support_I_S.elements() == 0);
+  ended= true;
+  /*
+    Because allocation is done in big chunks, buffer->Ptr[str_length]
+    may be uninitialized while buffer->Ptr[allocated length] is 0, so we
+    must use c_ptr_safe() as we want a 0-terminated string (which is easier
+    to manipulate in a debugger, or to compare in unit tests with
+    EXPECT_STREQ).
+    c_ptr_safe() may realloc an empty String from 0 bytes to 8 bytes,
+    when it adds the closing \0.
+  */
+  trace_buffer.c_ptr_safe();
+  // Send the full nice trace to DBUG.
+  DBUG_EXECUTE("opt",
+               {
+                 const char *trace= trace_buffer.c_ptr_safe();
+                 DBUG_LOCK_FILE;
+                 fputs("Complete optimizer trace:", DBUG_FILE);
+                 fputs(trace, DBUG_FILE);
+                 fputs("\n", DBUG_FILE);
+                 DBUG_UNLOCK_FILE;
+               }
+               );
 }
 
 
-void Opt_trace_context::unlink_from_del(Opt_trace_stmt *stmt)
+void Opt_trace_stmt::set_allowed_mem_size(size_t size)
 {
-  DBUG_PRINT("opt", ("Opt_trace_context::unlink_from_del %p", stmt));
-  if (stmt->prev != NULL)
-    stmt->prev->next= stmt->next;
-  if (stmt == stmt_to_del)
-    stmt_to_del= stmt->prev;
-  else
-    stmt->next->prev= stmt->prev;
+  trace_buffer.set_allowed_mem_size(size);
 }
 
 
-void Opt_trace_context::purge(bool purge_all)
+bool Opt_trace_stmt::set_query(const char *query, size_t length,
+                               CHARSET_INFO *charset)
 {
-  DBUG_ENTER("Opt_trace_context::purge");
-  if (!purge_all && offset >= 0)
+  // Should be called only once per statement.
+  DBUG_ASSERT(query_buffer.ptr() == NULL);
+  query_buffer.set_charset(charset);
+  if (support_I_S != YES_FOR_THIS)
   {
-    /* This case is managed in @c Opt_trace_context::start() */
-    DBUG_VOID_RETURN;
+    /*
+      Query won't be read, don't waste resources storing it. Still we have set
+      the charset, which is necessary.
+    */
+    return false;
   }
-  ulong i= 0; // distance to the newest trace (unit: 'list elements')
-  Opt_trace_stmt *stmt;
-  /* Start from the newest traces, scroll back in time */
-  for (stmt= newest_stmt_to_show ; stmt != NULL ; )
+  // We are taking a bit of space from 'trace_buffer'.
+  size_t available=
+    (trace_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size()) ?
+    0 : (trace_buffer.get_allowed_mem_size() - trace_buffer.alloced_length());
+  query_buffer.set_allowed_mem_size(available);
+  // No need to escape query, this is not for JSON.
+  const bool rc= query_buffer.append(query, length);
+  // Space which query took is taken out of the trace:
+  const size_t new_allowed_mem_size=
+    (query_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size()) ?
+    0 : (trace_buffer.get_allowed_mem_size() - query_buffer.alloced_length());
+  trace_buffer.set_allowed_mem_size(new_allowed_mem_size);
+  return rc;
+}
+
+
+void Opt_trace_stmt::open_struct(const char *key, Opt_trace_struct *struc,
+                                 Opt_trace_context::feature_value feature,
+                                 char opening_bracket)
+{
+  bool has_disabled_I_S= false;
+  if (support_I_S == YES_FOR_THIS)
   {
-    i++;
-    if (!purge_all && i <= (ulong)(-offset))
+    if (!ctx->feature_enabled(feature))
     {
-      /* OFFSET mandates that this trace should be kept; move to previous */
-      stmt= stmt->prev;
+
+      /*
+        User requested no tracing for this structure's feature. We are entering
+        a disabled portion; put an ellipsis "..." to alert the user.
+        Disabling applies to all the structure's children.
+        It is possible that inside this struct, a new statement is created
+        (range optimizer can evaluate stored functions...): its tracing is
+        disabled too.
+        When the structure is destroyed, the initial setting is restored.
+      */
+      if (current_struct != NULL)
+      {
+        if (key != NULL)
+          current_struct->add_alnum(key, "...");
+        else
+          current_struct->add_alnum("...");
+      }
+      stack_of_values_of_support_I_S.append(support_I_S);
+      support_I_S= NO_FOR_THIS_AND_CHILDREN;
+      has_disabled_I_S= true;
     }
     else
     {
-      Opt_trace_stmt *stmt_del= stmt;
-      stmt= stmt_del->prev;
-      /* make it invisible in OPTIMIZER_TRACE table... */
-      unlink_from_shown(stmt_del);
-      /* ... and remember to free it (as in @c free()) when possible */
-      link_to_del(stmt_del);
-      i--; // it does not count for 'offset'
+      if (current_struct != NULL)
+        key= Partial_access_for_Opt_trace_stmt::
+          check_key(current_struct, key);
+      trace_buffer.prealloc();
+      separator();
+      if (key != NULL)
+      {
+        trace_buffer.append('"');
+        trace_buffer.append(key);
+        trace_buffer.append(STRING_WITH_LEN("\": "));
+      }
+      trace_buffer.append(opening_bracket);
     }
   }
-  /* Examine list of "to be freed" traces and free what can be */
-  for (stmt= stmt_to_del ; stmt != NULL ; )
-  {
-    if (stmt->started)
-    {
-      /*
-        This trace is not finished, freeing it now would lead to use of
-        freed memory if a structure is later added to it. This would be
-        possible: assume OFFSET=-1 and we have
-        CALL statement starts executing
-          create its trace (call it "trace #1")
-          add structure to trace #1
-          add structure to trace #1
-          First sub-statement executing
-            create its trace (call it "trace #2")
-            from then on, trace #1 is not needed, free() it
-            add structure to trace #2
-            add structure to trace #2
-          First sub-statement ends
-          add structure to trace #1 - oops, adding to a free()d trace!
-        So if a trace is not finished, we will wait until it is and
-        re-consider it then (which is why this function is called in @c
-        Opt_trace_stmt::end() too).
-      */
-      stmt= stmt->prev;
-    }
-    else
-    {
-      Opt_trace_stmt *stmt_del= stmt;
-      stmt= stmt_del->prev;
-      unlink_from_del(stmt_del);
-      delete stmt_del;
-    }
-  }
-  DBUG_VOID_RETURN;
+  Struct_desc d;
+  d.current_struct=   current_struct;
+  d.has_disabled_I_S= has_disabled_I_S;
+  d.current_struct_empty= current_struct_empty;
+  stack_of_current_structs.append(d);
+  current_struct= struc;
+  current_struct_empty= true;
 }
 
 
-bool Opt_trace_context::start(bool support_I_S_arg, bool end_marker_arg,
-                              bool one_line_arg,
-                              long offset_arg,
-                              long limit_arg,
-                              ulong max_mem_size_arg,
-                              ulonglong features_arg)
+void Opt_trace_stmt::close_struct(const char *saved_key,
+                                  char closing_bracket)
 {
-  DBUG_ENTER("Opt_trace_context::start");
-  /*
-    Tracing may already be started when we come here, for example if we are
-    starting execution of a sub-statement of a stored routine, which CALL has
-    tracing enabled too.
-  */
-  if (current_stmt_in_gen == NULL)
-  {
-    /*
-      This is the initial situation: tracing is not yet enabled. For sure we
-      can change settings then.
-    */
-    DBUG_ASSERT(!cannot_change_settings);
-    cannot_change_settings= false;
-  }
-  if (cannot_change_settings)
-  {
-    /*
-      Tracing is strictly disabled by the caller. Thus don't listen to any
-      request from the user for enabling tracing or changing settings (offset
-      etc). Doing otherwise would surely bring a problem.
-    */
-  }
-  else
-  {
-    /*
-      Here we allow a stored routine's sub-statement to enable/disable
-      tracing, or change settings. Thus in a stored routine's body, there can
-      be some 'SET OPTIMIZER_TRACE="enabled=[on|off]"' to trace only certain
-      sub-statements.
-    */
-    support_I_S= support_I_S_arg;
-    end_marker= end_marker_arg;
-    one_line= one_line_arg;
-    offset= offset_arg;
-    limit= limit_arg;
-    max_mem_size= max_mem_size_arg;
-    features= feature_value(features_arg | MISC); // MISC always on
-  }
-  /*
-    Decide whether to-be-created trace should support I_S.
-    Don't use current_stmt_in_gen->support_I_S as a base: if the previous
-    trace was disabled due to being "before offset" (case of a positive
-    offset), we don't want the new trace to inherit and be disabled (for
-    example it may be 'after offset'). On the other hand, if I_S is disabled
-    at the context's level, it imposes on this new trace. Thus: inherit from
-    the context.
-  */
-  bool new_stmt_support_I_S= support_I_S;
+  Struct_desc d= stack_of_current_structs.pop();
+  current_struct= d.current_struct;
+  current_struct_empty= d.current_struct_empty;
 
-  if (new_stmt_support_I_S && offset >= 0)
+  if (support_I_S == YES_FOR_THIS)
   {
-    /* If outside the offset/limit window, no need to support I_S */
-    if (since_offset_0 < (ulong)offset)
+    next_line();
+    trace_buffer.append(closing_bracket);
+    if (ctx->get_end_marker() && saved_key != NULL)
     {
-      DBUG_PRINT("opt", ("disabled: since_offset_0(%llu) < offset(%lu)",
-                         (ulonglong)since_offset_0, offset));
-      new_stmt_support_I_S= false;
-    }
-    else if (since_offset_0 >= (ulong)(offset + limit))
-    {
-      DBUG_PRINT("opt", ("disabled: since_offset_0(%llu) >="
-                         " offset(%lu) + limit(%lu)",
-                         (ulonglong)since_offset_0, offset, limit));
-      new_stmt_support_I_S= false;
-    }
-    since_offset_0++;
-  }
-
-  Opt_trace_stmt *stmt= new Opt_trace_stmt(this, current_stmt_in_gen,
-                                           new_stmt_support_I_S);
-  DBUG_PRINT("opt",("new stmt %p support_I_S %d", stmt,
-                    (int)new_stmt_support_I_S));
-  if (stmt == NULL)
-    DBUG_RETURN(true);
-
-  current_stmt_in_gen= stmt;
-  if (new_stmt_support_I_S)
-    link_to_shown(stmt);
-  else
-  {
-    /*
-      If sending only to DBUG, don't show to the user.
-      Same if tracing was temporarily disabled at higher layers with
-      Opt_trace_disable_I_S.
-      So we just link it to the 'del' list for purging when ended.
-    */
-    link_to_del(stmt);
-  }
-  /*
-    As we just added one trace, maybe the previous ones are unneeded now
-  */
-  purge(false);
-  /* This purge may have freed space, compute max allowed size: */
-  stmt->buffer.allowed_mem_size= allowed_mem_size(stmt);
-  /* Trace is always in UTF8, it's "all" that JSON accepts */
-  stmt->buffer.set_charset(system_charset_info);
-  DBUG_ASSERT(system_charset_info == &my_charset_utf8_general_ci);
-  DBUG_RETURN(false);
-}
-
-
-void Opt_trace_context::end(void)
-{
-  if (current_stmt_in_gen != NULL)
-  {
-    Opt_trace_stmt *parent= current_stmt_in_gen->parent;
-    current_stmt_in_gen->end();
-    current_stmt_in_gen= parent;
-    if (parent != NULL)
-    {
-      /*
-        Parent regains control, now it needs to be told that its child has
-        used space, and thus parent's allowance has shrunk.
-      */
-      parent->buffer.allowed_mem_size= allowed_mem_size(parent);
+      trace_buffer.append(STRING_WITH_LEN(" /* "));
+      trace_buffer.append(saved_key);
+      trace_buffer.append(STRING_WITH_LEN(" */"));
     }
   }
-  /*
-    Purge again. Indeed when we are here, compared to the previous start() we
-    have one more ended trace, so can potentially free more. Consider
-    offset=-1 and:
-       top_stmt, started
-         sub_stmt, starts: can't free top_stmt as it is not ended yet
-         sub_stmt, ends: won't free sub_stmt (as user will want to see it),
-         can't free top_stmt as not ended yet
-       top_stmt, continued
-       top_stmt, ends: free top_stmt as it's not last and is ended, keep only
-       sub_stmt.
-    Still the purge is done in ::start() too, as an optimization, for this
-    case:
-       sub_stmt, started
-       sub_stmt, ended
-       sub_stmt, starts: can free above sub_stmt, will save memory compared to
-       free-ing it only when the new sub_stmt ends.
-  */
-  purge(false);
+  if (d.has_disabled_I_S)
+    support_I_S= stack_of_values_of_support_I_S.pop();
 }
 
 
-size_t Opt_trace_context::allowed_mem_size(const Opt_trace_stmt *for_stmt)
-  const
+void Opt_trace_stmt::separator()
 {
-  DBUG_ENTER("Opt_trace_context::allowed_mem_size");
-  size_t mem_size= 0;
-  /* Even to-be-deleted traces use memory, so consider them in sum */
-  for (Opt_trace_stmt *stmt_start= newest_stmt_to_show ;
-       ;
-       stmt_start= stmt_to_del)
+  DBUG_ASSERT(support_I_S == YES_FOR_THIS);
+  // Put a comma first, if we have already written an object at this level.
+  if (current_struct != NULL)
   {
-    for (Opt_trace_stmt *stmt= stmt_start ; stmt != NULL ; )
-    {
-      if (stmt != for_stmt)
-        mem_size+= stmt->buffer.alloced_length() +
-          stmt->query_buffer.alloced_length();
-      stmt= stmt->prev;
-    }
-    if (stmt_start == stmt_to_del)
-      break;
+    if (!current_struct_empty)
+      trace_buffer.append(',');
+    next_line();
+    current_struct_empty= false;
   }
-  size_t rc= (mem_size <= max_mem_size) ? (max_mem_size - mem_size) : 0;
-  DBUG_PRINT("opt", ("rc %llu max_mem_size %llu",
-                     (ulonglong)rc, (ulonglong)max_mem_size));
-  DBUG_RETURN(rc);
-}
-
-
-const char *Opt_trace_context::get_tail(size_t size)
-{
-  if (current_stmt_in_gen == NULL)
-    return "";
-  Opt_trace_stmt::Buffer *buffer= &(current_stmt_in_gen->buffer);
-  size_t buffer_len= buffer->length();
-  const char *ptr= buffer->c_ptr_safe();
-  if (buffer_len > size)
-    ptr= ptr + buffer_len - size;
-  return ptr;
 }
 
 
-void Opt_trace_context::reset(void)
-{
-  purge(true);
-  since_offset_0= 0;
+namespace {
+const char my_spaces[] =
+  "                                                                "
+  "                                                                "
+  "                                                                "
+  ;
 }
 
 
-bool Opt_trace_context::set_query(const char *query, size_t length,
-                                  CHARSET_INFO *charset)
+void Opt_trace_stmt::next_line()
 {
-  return current_stmt_in_gen->set_query(query, length, charset);
-}
-
-
-/* Opt_trace_iterator class */
+  if (ctx->get_one_line())
+    return;
+  trace_buffer.append('\n');
 
-void Opt_trace_iterator::operator++(int)
-{
-  cursor= cursor->next;
-  if (row_count >= limit)
+  int to_be_printed= 2 * stack_of_current_structs.elements();
+  const int spaces_len= sizeof(my_spaces) - 1;
+  while (to_be_printed > spaces_len)
   {
-    /*
-      In theory this test is needed only if offset<0, where we have to trace
-      after 'limit' to prepare for the future, but not show what is after
-      'limit'; if offset>=0, we stopped producing traces when 'limit' was
-      reached.
-    */
-    cursor= NULL; // like the 'end' iterator has
+    trace_buffer.append(my_spaces, spaces_len);
+    to_be_printed-= spaces_len;
   }
-  row_count++;
+  trace_buffer.append(my_spaces, to_be_printed);
 }
 
 
-/**
-   @note because allocation is done in big chunks, buffer->Ptr[str_length]
-   may be uninitialized while buffer->Ptr[allocated length] is 0, so we
-   must use c_ptr_safe() as we want a NUL-terminated string (which is easier
-   to manipulate in a debugger, or to compare in unit tests with
-   ASSERT_STREQ).
-   c_ptr_safe() may realloc an empty String from 0 bytes to 8 bytes,
-   when it adds the closing NUL.
-*/
-Opt_trace_info Opt_trace_iterator::operator*()
+void Opt_trace_stmt::add(const char *key, const char *val, size_t val_length,
+                         bool quotes, bool escape)
 {
-  Opt_trace_info ret;
-  ret.trace_ptr=     cursor->buffer.c_ptr_safe();
-  ret.trace_length=  cursor->buffer.length();
-  ret.query_ptr=     cursor->query_buffer.ptr();
-  ret.query_length=  cursor->query_buffer.length();
-  ret.query_charset= cursor->query_buffer.charset();
-  ret.missing_bytes= cursor->buffer.missing_bytes;
-  ret.malloc_error=  cursor->buffer.malloc_error ||
-    cursor->query_buffer.malloc_error;
-  return ret;
-}
-
-
-Opt_trace_iterator Opt_trace_iterator::end;
-
-
-/* Opt_trace_stmt class */
-
-const size_t Opt_trace_stmt::empty_trace_len= 4;
-const size_t Opt_trace_stmt::buffer_alloc_increment= 1024;
-
-
-Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg,
-                               Opt_trace_stmt *parent_arg,
-                               bool support_I_S_arg):
-  started(true), ctx(ctx_arg), parent(parent_arg),
-  current_struct(NULL), support_I_S(support_I_S_arg),
-  depth(0), max_depth(0)
-{
-  DBUG_ASSERT(parent == NULL || ctx == parent->ctx);
-  int dummy= 0;
-  level_empty.append(dummy);
-  level_empty.at(0)= true;
-}
-
-
-void Opt_trace_stmt::end(void)
-{
-  DBUG_ASSERT(depth == 0);
-  depth= 0;
-  started= false;
-  /* Send the full nice trace to DBUG */
-  DBUG_EXECUTE("opt",
-               if (buffer.length() <= empty_trace_len)
-               { /*  don't spam DBUG with empty trace */ }
-               else
-               {
-                 const char *trace= buffer.c_ptr_safe();
-                 DBUG_LOCK_FILE;
-                 fputs("Complete optimizer trace:", DBUG_FILE);
-                 fputs(trace, DBUG_FILE);
-                 fputs("\n", DBUG_FILE);
-                 DBUG_UNLOCK_FILE;
-               }
-               );
-}
-
-
-void Opt_trace_stmt::next_line(void)
-{
-  if (ctx->one_line)
+  if (support_I_S != YES_FOR_THIS)
     return;
-  buffer.append('\n');
-  char spaces[]= "                               ";
-  // 2 spaces per nesting level
-  const int nb= depth * 2, spaces_len= sizeof(spaces) - 1;
-  // add spaces in chunks of spaces_len, better than many append(' ')
-  for (int i= 0; i < nb/spaces_len; i++)
-    buffer.append(spaces, spaces_len);
-  buffer.append(spaces, (nb % spaces_len));
-}
-
-
-void Opt_trace_stmt::push(void)
-{
-  DBUG_ASSERT(support_I_S);
-  depth++;
-  if (depth > max_depth)
+  separator();
+  if (key != NULL)
   {
-    int dummy= 0;
-    level_empty.append(dummy);
-    max_depth= depth;
+    trace_buffer.append('"');
+    trace_buffer.append(key);
+    trace_buffer.append(STRING_WITH_LEN("\": "));
   }
-  level_empty.at(depth)= true;
-}
-
-
-void Opt_trace_stmt::pop(void)
-{
-  DBUG_ASSERT(support_I_S);
-  depth--;
+  if (quotes)
+    trace_buffer.append('"');
+  /*
+    Objects' keys use "normal" characters (A-Za-z0-9_), no escaping
+    needed. Same for numeric/bool values. Only string values may need
+    escaping.
+  */
+  if (escape)
+  {
+    /*
+      Work around BUG#57341 / #11764503 . Every time we come here (printing a
+      condition or an expanded query), the input comes from String-s having
+      str_charset==UTF8, but in reality it may contain pieces in query's
+      charset (==character_set_client), pieces in UTF8, and pieces in ASCII
+      ("select" and other language keywords). If we directly store that mix in a
+      UTF8 column, the query_charset piece causes an issue:
+      Field_blob::store() will truncate the trace at first unrecognized
+      character. So just a single bad character in the expanded query makes
+      all the rest of the trace be lost.
+      To work around the bug, we force a conversion from UTF8 to UTF8, which
+      will replace any non-UTF8 characters with '?'. Some query_charset
+      characters may produce valid "exotic" UTF8 characters. That and a few
+      '?' are much better than a truncated trace.
+      If the client uses only utf8, things are guaranteed ok.
+      Note that JSON accepts only UTF8.
+    */
+    if (my_charset_same(query_buffer.charset(), system_charset_info))
+      trace_buffer.append_escaped(val, val_length);
+    else
+    {
+      uint32 new_length= system_charset_info->mbmaxlen * val_length;
+      char *utf8_str= (char *)my_malloc(new_length, MYF(0));
+      if (utf8_str == NULL)
+        trace_buffer.set_malloc_error();
+      else
+      {
+        uint errors;
+        new_length= copy_and_convert(utf8_str, new_length, system_charset_info,
+                                     val, val_length, system_charset_info,
+                                     &errors);
+        /*
+          Such UTF8 can now be safely escaped. Because UTF8 has the same
+          characters in range 0-127 as ASCII does, and other UTF8 characters
+          don't contain 0-127 bytes, if we see a byte equal to 0 it is really
+          the UTF8 u0000 character (a.k.a. ASCII NUL) and not a part of a longer
+          character; if we see a newline, same, etc. That wouldn't necessarily
+          be true if we used query_charset as target character set, so escaping
+          would be impossible.
+        */
+        trace_buffer.append_escaped(utf8_str, new_length);
+        my_free(utf8_str);
+      }
+    }
+  }
+  else
+    trace_buffer.append(val, val_length);
+  if (quotes)
+    trace_buffer.append('"');
 }
 
 
-void Opt_trace_stmt::separator(void)
+void Opt_trace_stmt::syntax_error(const char *key)
 {
+  DBUG_PRINT("opt", ("syntax error key: %s", key));
+  DBUG_ASSERT(support_I_S == YES_FOR_THIS);
+  DBUG_ASSERT(!Opt_trace_struct::dbug_assert_on_syntax_error);
   /*
-    If we've already written an object at this level, first put a
-    comma.
+    Inserting some warning text into the trace below helps locating where
+    things went wrong.
   */
-  if (!level_empty.at(depth))
-    buffer.append(',');
-  next_line();
-  level_empty.at(depth)= false;
+  if (key != NULL)
+  {
+    trace_buffer.append(STRING_WITH_LEN("** invalid JSON"
+                                        " (unexpected key \""));
+    trace_buffer.append(key);
+    trace_buffer.append(STRING_WITH_LEN("\") ** "));
+  }
+  else
+    trace_buffer.append(STRING_WITH_LEN("** invalid JSON"
+                                        " (missing key) ** "));
 }
 
 
-bool Opt_trace_stmt::set_query(const char *query, size_t length,
-                               CHARSET_INFO *charset)
+void Opt_trace_stmt::fill_info(Opt_trace_info *info) const
 {
-  /* Should be called only once per statement */
-  DBUG_ASSERT(query_buffer.ptr() == NULL);
-  query_buffer.set_charset(charset);
-  /* We're taking a bit of space from 'buffer' */
-  if (buffer.alloced_length() >= buffer.allowed_mem_size)
-  {
-    /* trace buffer already occupies all space */
-    buffer.missing_bytes+= length;
-    return true;
-  }
-  const size_t available= buffer.allowed_mem_size - buffer.alloced_length();
-  query_buffer.allowed_mem_size= available;
-  /* No need to escape query, this is not for JSON */
-  const bool rc= query_buffer.append(query, length);
-  /* Space which query took is taken out of the trace: */
-  if (query_buffer.alloced_length() >= buffer.allowed_mem_size)
-    buffer.allowed_mem_size= 0;
-  else
-    buffer.allowed_mem_size-= query_buffer.alloced_length();
-  return rc;
+  info->trace_ptr=     trace_buffer.ptr();
+  info->trace_length=  trace_buffer.length();
+  info->query_ptr=     query_buffer.ptr();
+  info->query_length=  query_buffer.length();
+  info->query_charset= query_buffer.charset();
+  info->missing_bytes= trace_buffer.get_missing_bytes() +
+    query_buffer.get_missing_bytes();
+  info->malloc_error=  trace_buffer.get_malloc_error() ||
+    query_buffer.get_malloc_error();
 }
 
 
-/* Opt_trace_stmt::Buffer class */
-
-bool Opt_trace_stmt::Buffer::realloc(uint32 arg_length)
+const char *Opt_trace_stmt::trace_buffer_tail(size_t size)
 {
-  bool rc= String::realloc(arg_length);
-  malloc_error|= rc;
-  return rc;
+  size_t buffer_len= trace_buffer.length();
+  const char *ptr= trace_buffer.c_ptr_safe();
+  if (buffer_len > size)
+    ptr+= buffer_len - size;
+  return ptr;
 }
 
 
-bool Opt_trace_stmt::simulate_oom= false;
-
+// Implementation of class Opt_trace_stmt::Buffer
 
 bool Opt_trace_stmt::Buffer::append_escaped(const char *str, size_t length)
 {
@@ -927,14 +824,15 @@ bool Opt_trace_stmt::Buffer::append_esca
   else
   {
 #ifndef DBUG_OFF // no mostly-useless if() in release binary
-    if (unlikely(simulate_oom))
+    if (unlikely(Opt_trace_context::simulate_oom_in_buffers))
       rc= true;
     else
 #endif
     {
       rc= false;
       const char *pstr, *pstr_end;
-      char buf[128] /* temporary output buffer*/, *pbuf= buf;
+      char buf[128];                     // Temporary output buffer.
+      char *pbuf= buf;
       for (pstr= str, pstr_end= (str + length) ; pstr < pstr_end ; pstr++)
       {
         char esc;
@@ -942,8 +840,8 @@ bool Opt_trace_stmt::Buffer::append_esca
         /*
           JSON syntax says that control characters must be escaped. Experience
           confirms that this means ASCII 0->31 and " and \ . A few of
-          them are accepted with a short escaping syntax (using \ : like
-          \n) but for most of them, only \uXXXX works, where XXXX is an
+          them are accepted with a short escaping syntax (using \ : like \n)
+          but for most of them, only \uXXXX works, where XXXX is a
           hexadecimal value for the code point.
           Rules also mention escaping / , but Python's and Perl's json modules
           do not require it, and somewhere on Internet someone said JSON
@@ -951,7 +849,7 @@ bool Opt_trace_stmt::Buffer::append_esca
         */
         switch (c)
         {
-          // don't use \u when possible for common chars, \ is easier to read:
+          // Don't use \u when possible for common chars, \ is easier to read:
         case '\\':
           esc= '\\';
           break;
@@ -971,7 +869,7 @@ bool Opt_trace_stmt::Buffer::append_esca
           esc= 0;
           break;
         }
-        if (esc != 0)                           // escaping with backslash
+        if (esc != 0)                           // Escaping with backslash.
         {
           *pbuf++= '\\';
           *pbuf++= esc;
@@ -979,7 +877,7 @@ bool Opt_trace_stmt::Buffer::append_esca
         else
         {
           uint ascii_code= (uint)c;
-          if (ascii_code < 32)                  // escaping with \u
+          if (ascii_code < 32)                  // Escaping with \u
           {
             *pbuf++= '\\';
             *pbuf++= 'u';
@@ -997,17 +895,21 @@ bool Opt_trace_stmt::Buffer::append_esca
             *pbuf++= _dig_vec_lower[ascii_code];
           }
           else
-            *pbuf++= c; // normal character, no escaping needed
+            *pbuf++= c; // Normal character, no escaping needed.
         }
+        /*
+          To fit a next character, we need at most 6 bytes (happens when using
+          \uXXXX syntax) before the buffer's end:
+        */
         if (pbuf > buf + (sizeof(buf) - 6))
         {
-          /* no room in 'buf' for next char, so flush it */
-          rc|= String::append(buf, pbuf - buf);
+          // Possibly no room in 'buf' for next char, so flush buf.
+          rc|= string_buf.append(buf, pbuf - buf);
           pbuf= buf; // back to buf's start
         }
       }
-      /* flush any chars left in 'buf' */
-      rc|= String::append(buf, pbuf - buf);
+      // Flush any chars left in 'buf'.
+      rc|= string_buf.append(buf, pbuf - buf);
     }
     malloc_error|= rc;
   }
@@ -1026,11 +928,11 @@ bool Opt_trace_stmt::Buffer::append(cons
   else
   {
 #ifndef DBUG_OFF // no mostly-useless if() in release binary
-    if (unlikely(simulate_oom))
+    if (unlikely(Opt_trace_context::simulate_oom_in_buffers))
       rc= true;
     else
 #endif
-      rc= String::append(str, length);
+      rc= string_buf.append(str, length);
     malloc_error|= rc;
   }
   return rc;
@@ -1047,54 +949,468 @@ bool Opt_trace_stmt::Buffer::append(char
   }
   else
   {
-    // no need for escaping chr, given how this function is used */
-    rc= String::append(chr);
+    // No need for escaping chr, given how this function is used.
+    rc= string_buf.append(chr);
     malloc_error|= rc;
   }
   return rc;
 }
 
 
-/* Opt_trace_disable_I_S class */
+
+bool Opt_trace_stmt::Buffer::prealloc()
+{
+  const size_t alloced=   alloced_length();
+  const size_t increment= 1024;
+  if ((alloced - length()) < (increment / 3))
+  {
+    /*
+      Support for I_S will produce long strings, and there is little free
+      space left in the allocated buffer, so it looks like
+      realloc is soon unavoidable; so let's get many bytes at a time.
+      Note that if this re-allocation fails, or any String::append(), we
+      will get a weird trace; either truncated if the server stops, or maybe
+      with a hole if there is later memory again for the trace's
+      continuation. That will be visible in the OS_MALLOC_ERROR column.
+      */
+    size_t new_size= alloced + increment;
+    size_t max_size= allowed_mem_size;
+    /*
+      Determine a safety margin:
+      (A) String::realloc() adds at most ALIGN_SIZE(1) bytes to requested
+      length, so we need to decrement max_size by this amount, to be sure that
+      we don't allocate more than max_size
+      (B) We need to stay at least one byte under that max_size, or the next
+      append() would trigger up-front truncation, which is potentially wrong
+      for a "pre-emptive allocation" as we do here.
+    */
+    const size_t safety_margin= ALIGN_SIZE(1) /* (A) */ + 1 /* (B) */;
+    if (max_size >= safety_margin)
+    {
+      max_size-= safety_margin;
+      if (new_size > max_size) // Don't pre-allocate more than the limit.
+        new_size= max_size;
+      if (new_size >= alloced) // Never shrink string.
+      {
+        bool rc= string_buf.realloc(new_size);
+        malloc_error|= rc;
+        return rc;
+      }
+    }
+  }
+  return false;
+}
+
+
+// Implementation of Opt_trace_context class
+
+const char *Opt_trace_context::flag_names[]=
+{
+  "enabled", "end_marker", "one_line", "default", NullS
+};
+
+const char *Opt_trace_context::feature_names[]=
+{
+  "greedy_search", "range_optimizer", "dynamic_range",
+  "repeated_subselect", "default", NullS
+};
+
+const Opt_trace_context::feature_value
+Opt_trace_context::default_features=
+  Opt_trace_context::feature_value(Opt_trace_context::GREEDY_SEARCH |
+                                   Opt_trace_context::RANGE_OPTIMIZER |
+                                   Opt_trace_context::DYNAMIC_RANGE   |
+                                   Opt_trace_context::REPEATED_SUBSELECT);
+
+bool Opt_trace_context::simulate_oom_in_buffers= false;
+
+Opt_trace_context::Opt_trace_context() :
+  current_stmt_in_gen(NULL),
+  stack_of_current_stmts(array_min_room, array_extend),
+  all_stmts_for_I_S(array_min_room, array_extend),
+  all_stmts_to_del(array_min_room, array_extend),
+  since_offset_0(0)
+  {}
+
+Opt_trace_context::~Opt_trace_context()
+{
+  /* There may well be some few ended traces left: */
+  purge_stmts(true);
+  /* All should have moved to 'del' list: */
+  DBUG_ASSERT(all_stmts_for_I_S.elements() == 0);
+  /* All of 'del' list should have been deleted: */
+  DBUG_ASSERT(all_stmts_to_del.elements() == 0);
+}
+
+
+bool Opt_trace_context::well_constructed() const
+{
+  /*
+    Dynamic array allocates memory from the heap, potentially on each
+    append() call. It is hard to leave all data structures in a correct state
+    if an append() fails.
+    The constructors of Dynamic_array have normally preallocated a reasonable
+    amount of cells, sufficient for most scenarios. It is possible that they
+    failed to do it though (@sa init_dynamic_array2()); if that is the case,
+    we prefer to fail.
+    Even if the check is successful, it can happen that the preallocated cells
+    are later exhausted, the array is re-allocated, which hits OOM, then we
+    will indeed crash :-/
+  */
+  if (stack_of_current_stmts.elements() == 0 &&
+      all_stmts_for_I_S.elements() == 0 &&
+      all_stmts_to_del.elements() == 0)
+  {
+    /*
+      About the if() above: it's sufficient to run this check right after
+      construction, not all the time.
+    */
+    if (!stack_of_current_stmts.guaranteed_room(array_min_room) ||
+        !all_stmts_for_I_S.guaranteed_room(array_min_room) ||
+        !all_stmts_to_del.guaranteed_room(array_min_room))
+      return false;
+  }
+  return true;
+}
+
+
+bool Opt_trace_context::start(enum enum_support_I_S support_I_S_arg,
+                              bool end_marker_arg, bool one_line_arg,
+                              long offset_arg, long limit_arg,
+                              ulong max_mem_size_arg, ulonglong features_arg)
+{
+  /*
+    Decide whether to-be-created trace should support I_S.
+    Sometimes the parent rules, sometimes not. If the parent
+    trace was disabled due to being "before offset" (case of a positive
+    offset), we don't want the new trace to inherit and be disabled (for
+    example it may be 'after offset').
+  */
+  enum enum_support_I_S new_stmt_support_I_S;
+  bool rc;
+  DBUG_ENTER("Opt_trace_context::start");
+
+  if (unlikely(!well_constructed()))
+    DBUG_RETURN(true);
+  /*
+    Tracing may already be started when we come here, for example if we are
+    starting execution of a sub-statement of a stored routine (CALL has
+    tracing enabled too).
+  */
+  if (current_stmt_in_gen != NULL &&
+      current_stmt_in_gen->get_support_I_S() == NO_FOR_THIS_AND_CHILDREN)
+  {
+    /*
+      Tracing is strictly disabled by the caller. Thus don't listen to any
+      request from the user for enabling tracing or changing settings (offset
+      etc). Doing otherwise would surely bring a problem.
+    */
+    new_stmt_support_I_S= NO_FOR_THIS_AND_CHILDREN;
+  }
+  else
+  {
+    /*
+      Here we allow a stored routine's sub-statement to enable/disable
+      tracing, or change settings. Thus in a stored routine's body, there can
+      be some 'SET OPTIMIZER_TRACE="enabled=[on|off]"' to trace only certain
+      sub-statements.
+    */
+    new_stmt_support_I_S= support_I_S_arg;
+    end_marker= end_marker_arg;
+    one_line= one_line_arg;
+    offset= offset_arg;
+    limit= limit_arg;
+    max_mem_size= max_mem_size_arg;
+    // MISC always on
+    features= Opt_trace_context::feature_value(features_arg |
+                                               Opt_trace_context::MISC);
+  }
+  if (new_stmt_support_I_S == YES_FOR_THIS && offset >= 0)
+  {
+    /* If outside the offset/limit window, no need to support I_S */
+    if (since_offset_0 < offset)
+    {
+      DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) < offset(%ld)",
+                         since_offset_0, offset));
+      new_stmt_support_I_S= NO_FOR_THIS;
+    }
+    else if (since_offset_0 >= (offset + limit))
+    {
+      DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) >="
+                         " offset(%ld) + limit(%ld)",
+                         since_offset_0, offset, limit));
+      new_stmt_support_I_S= NO_FOR_THIS;
+    }
+    since_offset_0++;
+  }
+
+  Opt_trace_stmt *stmt= new(std::nothrow)
+    Opt_trace_stmt(this, new_stmt_support_I_S);
+  DBUG_PRINT("opt",("new stmt %p support_I_S %d", stmt,
+                    new_stmt_support_I_S));
+  DBUG_EXECUTE_IF("opt_trace_oom1", {
+      delete stmt;
+      stmt= NULL;
+    });
+
+  if (unlikely(stmt == NULL))
+    goto err;
+  if (unlikely(!stmt->well_constructed()))
+    goto err;
+
+  if (unlikely(stack_of_current_stmts.append(current_stmt_in_gen)))
+      goto err;
+
+  if (new_stmt_support_I_S == YES_FOR_THIS)
+    rc= all_stmts_for_I_S.append(stmt);
+  else
+  {
+    /*
+      If sending only to DBUG, don't show to the user.
+      Same if tracing was temporarily disabled at higher layers with
+      Opt_trace_disable_I_S.
+      So we just link it to the 'del' list for purging when ended.
+    */
+    rc= all_stmts_to_del.append(stmt);
+  }
+
+  if (unlikely(rc))
+    goto err;
+
+  current_stmt_in_gen= stmt;
+
+  /*
+    As we just added one trace, maybe the previous ones are unneeded now
+  */
+  purge_stmts(false);
+  /* This purge may have freed space, compute max allowed size: */
+  stmt->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
+  DBUG_RETURN(false);
+err:
+  delete stmt;
+  DBUG_RETURN(true);
+}
+
+
+void Opt_trace_context::end()
+{
+  if (current_stmt_in_gen != NULL)
+  {
+    current_stmt_in_gen->end();
+    Opt_trace_stmt * const parent= stack_of_current_stmts.pop();
+    current_stmt_in_gen= parent;
+    if (parent != NULL)
+    {
+      /*
+        Parent regains control, now it needs to be told that its child has
+        used space, and thus parent's allowance has shrunk.
+      */
+      parent->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
+    }
+  }
+  else
+    DBUG_ASSERT(stack_of_current_stmts.elements() == 0);
+  /*
+    Purge again. Indeed when we are here, compared to the previous start() we
+    have one more ended trace, so can potentially free more. Consider
+    offset=-1 and:
+       top_stmt, started
+         sub_stmt, starts: can't free top_stmt as it is not ended yet
+         sub_stmt, ends: won't free sub_stmt (as user will want to see it),
+         can't free top_stmt as not ended yet
+       top_stmt, continued
+       top_stmt, ends: free top_stmt as it's not last and is ended, keep only
+       sub_stmt.
+    Still the purge is done in ::start() too, as an optimization, for this
+    case:
+       sub_stmt, started
+       sub_stmt, ended
+       sub_stmt, starts: can free above sub_stmt, will save memory compared to
+       free-ing it only when the new sub_stmt ends.
+  */
+  purge_stmts(false);
+}
+
+
+void Opt_trace_context::purge_stmts(bool purge_all)
+{
+  DBUG_ENTER("Opt_trace_context::purge_stmts");
+  if (!purge_all && offset >= 0)
+  {
+    /* This case is managed in @c Opt_trace_context::start() */
+    DBUG_VOID_RETURN;
+  }
+  long idx;
+  /*
+    Start from the newest traces (array's end), scroll back in time. This
+    direction is necessary, as we may delete elements from the array (assume
+    purge_all=true and array has 2 elements and we traverse starting from
+    index 0: cell 0 is deleted, making cell 1 become cell 0; index is
+    incremented to 1, which is past the array's end, so break out of the loop:
+    cell 0 (old cell 1) was not deleted, wrong).
+  */
+  for (idx= (all_stmts_for_I_S.elements() - 1) ; idx >= 0 ; idx--)
+  {
+    if (!purge_all && ((all_stmts_for_I_S.elements() + offset) <= idx))
+    {
+      /* OFFSET mandates that this trace should be kept; move to previous */
+    }
+    else
+    {
+      /* Remember to free it (as in @c free()) when possible... */
+      all_stmts_to_del.append(all_stmts_for_I_S.at(idx));
+      /* ... For now, make it invisible in OPTIMIZER_TRACE tabl */
+      all_stmts_for_I_S.del(idx);
+    }
+  }
+  /* Examine list of "to be freed" traces and free what can be */
+  for (idx= (all_stmts_to_del.elements() - 1) ; idx >= 0 ; idx--)
+  {
+    Opt_trace_stmt *stmt= all_stmts_to_del.at(idx);
+    if (!stmt->has_ended())
+    {
+      /*
+        This trace is not finished, freeing it now would lead to use of
+        freed memory if a structure is later added to it. This would be
+        possible: assume OFFSET=-1 and we have
+        CALL statement starts executing
+          create its trace (call it "trace #1")
+          add structure to trace #1
+          add structure to trace #1
+          First sub-statement executing
+            create its trace (call it "trace #2")
+            from then on, trace #1 is not needed, free() it
+            add structure to trace #2
+            add structure to trace #2
+          First sub-statement ends
+          add structure to trace #1 - oops, adding to a free()d trace!
+        So if a trace is not finished, we will wait until it is and
+        re-consider it then (which is why this function is called in @c
+        Opt_trace_stmt::end() too).
+      */
+    }
+    else
+    {
+      all_stmts_to_del.del(idx);
+      delete stmt;
+    }
+  }
+  DBUG_VOID_RETURN;
+}
+
+
+size_t Opt_trace_context::allowed_mem_size_for_current_stmt() const
+{
+  DBUG_ENTER("Opt_trace_context::allowed_mem_size");
+  size_t mem_size= 0;
+  int idx;
+  for (idx= (all_stmts_for_I_S.elements() - 1) ; idx >= 0 ; idx--)
+  {
+    const Opt_trace_stmt *stmt= all_stmts_for_I_S.at(idx);
+    mem_size+= stmt->alloced_length();
+  }
+  // Even to-be-deleted traces use memory, so consider them in sum
+  for (idx= (all_stmts_to_del.elements() - 1) ; idx >= 0 ; idx--)
+  {
+    const Opt_trace_stmt *stmt= all_stmts_to_del.at(idx);
+    mem_size+= stmt->alloced_length();
+  }
+  /* The current statement is in exactly one of the two lists above */
+  mem_size-= current_stmt_in_gen->alloced_length();
+  size_t rc= (mem_size <= max_mem_size) ? (max_mem_size - mem_size) : 0;
+  DBUG_PRINT("opt", ("rc %llu max_mem_size %llu",
+                     (ulonglong)rc, (ulonglong)max_mem_size));
+  DBUG_RETURN(rc);
+}
+
+
+bool Opt_trace_context::set_query(const char *query, size_t length,
+                                  CHARSET_INFO *charset)
+{
+  return current_stmt_in_gen->set_query(query, length, charset);
+}
+
+
+Opt_trace_struct *Opt_trace_context::get_current_struct() const
+{
+  return current_stmt_in_gen->get_current_struct();
+}
+
+
+const char *Opt_trace_context::get_tail(size_t size)
+{
+  return current_stmt_in_gen->trace_buffer_tail(size);
+}
+
+
+void Opt_trace_context::reset()
+{
+  purge_stmts(true);
+  since_offset_0= 0;
+}
+
+
+const Opt_trace_stmt
+*Opt_trace_context::get_next_stmt_for_I_S(long *got_so_far) const
+{
+  const Opt_trace_stmt *p;
+  if (*got_so_far >= limit)
+    p= NULL;
+  else if (*got_so_far >= all_stmts_for_I_S.elements())
+    p= NULL;
+  else
+  {
+    p= all_stmts_for_I_S.at(*got_so_far);
+    DBUG_ASSERT(p != NULL);
+    (*got_so_far)++;
+  }
+  return p;
+}
+
+// Implementation of class Opt_trace_iterator
+
+Opt_trace_iterator::Opt_trace_iterator(Opt_trace_context *ctx_arg) :
+  ctx(ctx_arg), row_count(0)
+{
+  next();
+}
+
+void Opt_trace_iterator::next()
+{
+  cursor= ctx->get_next_stmt_for_I_S(&row_count);
+}
+
+
+void Opt_trace_iterator::get_value(Opt_trace_info *info) const
+{
+  cursor->fill_info(info);
+}
+
+
+// Implementation of class Opt_trace_disable_I_S
 
 Opt_trace_disable_I_S::Opt_trace_disable_I_S(Opt_trace_context *ctx_arg,
                                              bool disable_arg) :
   disable(disable_arg)
 {
-  if (likely(!disable))
-    return;
-  ctx= ctx_arg;
-  if (ctx != NULL)
+  if (disable)
   {
-    /* Disable in to-be-created future traces */
-    saved_ctx_support_I_S= ctx->support_I_S;
-    ctx->support_I_S= false;
-    saved_ctx_cannot_change_settings= ctx->cannot_change_settings;
-    /* Mark that this disabling is mandatory for all: */
-    ctx->cannot_change_settings= true;
-    stmt= ctx->current_stmt_in_gen;
-    if (stmt != NULL)
+    if (ctx_arg != NULL)
     {
-      /* Disable also in the current trace */
-      saved_stmt_support_I_S= stmt->support_I_S;
-      stmt->support_I_S= false;
+      stmt= ctx_arg->get_current_stmt_in_gen();
+      if (stmt != NULL)
+        stmt->disable_I_S_for_this_and_children();
     }
+    else
+      stmt= NULL;
   }
 }
 
 
-Opt_trace_disable_I_S::~Opt_trace_disable_I_S(void)
+Opt_trace_disable_I_S::~Opt_trace_disable_I_S()
 {
-  if (likely(!disable))
-    return;
-  if (ctx != NULL)
-  {
-    ctx->support_I_S= saved_ctx_support_I_S;
-    ctx->cannot_change_settings= saved_ctx_cannot_change_settings;
-    DBUG_ASSERT(stmt == ctx->current_stmt_in_gen);
-    if (stmt != NULL)
-      stmt->support_I_S= saved_stmt_support_I_S;
-  }
+  if (disable && (stmt != NULL))
+    stmt->restore_I_S();
 }
 
 #endif // OPTIMIZER_TRACE

=== modified file 'sql/opt_trace.h'
--- a/sql/opt_trace.h	2011-01-17 20:41:34 +0000
+++ b/sql/opt_trace.h	2011-02-15 20:53:19 +0000
@@ -11,20 +11,21 @@
 
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
-   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA  */
 
 #ifndef OPT_TRACE_INCLUDED
 #define OPT_TRACE_INCLUDED
 
 #include "my_config.h"  // OPTIMIZER_TRACE
 #include "sql_array.h"  // Dynamic_array
-class THD;
-class st_select_lex;
 #include "my_base.h"    // ha_rows
-#include "sql_string.h" // String
 #include "sql_list.h"   // because sql_cmd.h needs it
 #include "sql_cmd.h"    // for enum_sql_command
 
+struct st_schema_table;
+struct TABLE_LIST;
+struct TABLE;
+
 /**
    @file
    API for the Optimizer trace (WL#5257)
@@ -57,17 +58,17 @@ class st_select_lex;
   dictionary or Perl's associative array or hash or STL's hash_map.
   @li "arrays" (ordered set of values); equivalent to Python's and Perl's list
   or STL's vector.
-  @li "values": a value can be a string, number, boolean, null, which we all call
-  "scalars", or be an object, array.
+  @li "values": a value can be a string, number, boolean, null,
+  which we all call "scalars", or be an object, array.
 
-  For example (after "<<" are explanations which are not part of output):
+  For example (explanations after "<<" are not part of output):
 @verbatim
   {                           << start of top object
     "first_name": "Gustave",  << key/value pair (value is string)
     "last_name": "Eiffel",    << key/value pair (value is string)
     "born": 1832,             << key/value pair (value is integer)
     "contributions_to": [     << key/value pair (value is array)
-       {                      << 1st item of array is an object (here a building)
+       {                      << 1st item of array is an object (a building)
          "name": "Eiffel tower",
          "location": Paris
        },                     << end of 1st item of array
@@ -105,15 +106,35 @@ class st_select_lex;
   SELECT terminates after executing the first subquery if the related IN
   predicate is false, so we won't see @c JOIN::optimize() tracing for subq2;
   whereas EXPLAIN SELECT analyzes all subqueries (see loop at the end of @c
-  select_describe()).
+  @c select_describe()).
 
   @section USER_SELECT_TRACING_STATEMENTS How a user traces only certain
   statements
 
-  When tracing is in force, by default each new trace overwrites the previous
-  trace. Thus, if a statement contains sub-statements (example: invokes stored
-  procedures, stored functions, triggers), at the execution's end only the
-  last sub-statement's trace is visible.
+  When tracing is in force, each SQL statement generates a trace; more
+  exactly, so does any of
+  SELECT
+  EXPLAIN SELECT
+  INSERT or REPLACE, with VALUES or SELECT,
+  UPDATE|DELETE and their multi-table variants,
+  CALL,
+  and all the above prefixed by SQL PREPARE/EXECUTE.
+  A statement is defined as "a call to @c mysql_execute_command()". Thus
+@verbatim
+  PREPARE xxx;
+@endverbatim
+  is one statement (PREPARE is executed, but not the statement to
+  prepare). But
+@verbatim
+  EXECUTE xxx;
+@endverbatim
+  is two (two calls to @c mysql_execute_command(): EXECUTE is executed, which
+  itself executes the prepared statement).
+  By default each new trace overwrites the previous trace. Thus, if a
+  statement contains sub-statements (example: invokes stored procedures,
+  stored functions, triggers), the top statement and sub-statements each
+  generate traces, but at the execution's end only the last sub-statement's
+  trace is visible.
   If the user wants to see the trace of another sub-statement, she/he can
   enable/disable tracing around the desired sub-statement, but this requires
   editing the routine's code, which may not be possible. Another solution is
@@ -177,7 +198,7 @@ class st_select_lex;
   SET OPTIMIZER_TRACE_FEATURES="feature1=on|off,...";
 @endverbatim
   where "feature1" is one optimizer feature. For example "greedy_search": a
-  certain Opt_trace_array at the start of greedy_search() has a flag
+  certain Opt_trace_array at the start of @c greedy_search() has a flag
   "GREEDY_SEARCH" passed to its constructor: this means that if the user has
   turned tracing of greedy search off, this array will not be written to the
   I_S trace, neither will any children structures. All this disabled "trace
@@ -188,34 +209,34 @@ class st_select_lex;
   Check @c Opt_trace* usage in @c advance_sj_state():
 
 @verbatim
-  Opt_trace_array ota(trace, "semijoin_strategy_choice");
+  Opt_trace_array trace_choices(trace, "semijoin_strategy_choice");
 @endverbatim
 
-  this creates an array for key "semijoin_strategy_choice". We are going to
+  This creates an array for key "semijoin_strategy_choice". We are going to
   list possible semijoin strategy choices.
 
 @verbatim
-  Opt_trace_object oto(trace);
+  Opt_trace_object trace_one_strategy(trace);
 @endverbatim
 
-  this creates an object without key (normal, it's in an array). This
+  This creates an object without key (normal, it's in an array). This
   object will describe one single strategy choice.
 
 @verbatim
-  oto.add("strategy", "FirstMatch");
+  trace_one_strategy.add("strategy", "FirstMatch");
 @endverbatim
 
-  this adds a key/value pair to the just-created object: key is "strategy",
+  This adds a key/value pair to the just-created object: key is "strategy",
   value is "FirstMatch". This is the strategy to be described in the
   just-created object.
 
 @verbatim
-  oto.add("cost", *current_read_time).
+  trace_one_strategy.add("cost", *current_read_time).
     add("records", *current_record_count);
-  oto.add("chosen", (pos->sj_strategy == SJ_OPT_FIRST_MATCH));
+  trace_one_strategy.add("chosen", (pos->sj_strategy == SJ_OPT_FIRST_MATCH));
 @endverbatim
 
-  this adds 3 key/value pairs: cost of strategy, number of records produced
+  This adds 3 key/value pairs: cost of strategy, number of records produced
   by this strategy, and whether this strategy is chosen.
 
   After that, there is similar code for other semijoin strategies.
@@ -241,15 +262,14 @@ class st_select_lex;
 
   For more output examples, check @c mysql-test/r/optimizer_trace*prot.result.
 
-  Feature can be un-compiled with @code cmake -DOPTIMIZER_TRACE=0 @endcode
-  or @code configure --without-optimizer-trace @endcode
+  Feature can be un-compiled with @code cmake -DOPTIMIZER_TRACE=0 @endcode.
 
   @section WITH_DBUG Interaction between trace and DBUG
 
   We don't want to have to duplicate code like this:
 @verbatim
   DBUG_PRINT("info",("cost %g",cost));
-  trace_object.add("cost",cost));
+  Opt_trace_object(thd->opt_trace).add("cost",cost);
 @endverbatim
 
   Thus, any optimizer trace operation, *even* if tracing is run-time disabled,
@@ -288,23 +308,32 @@ class st_select_lex;
   can and should often be used. Having a restricted vocabulary helps
   consistency.
 
-  @li Use only simple characters for key names: a-ZA-Z_# (to denote a
-  number), and no space.
+  @li Use only simple characters for key names: a-ZA-Z_#, and no space. '#'
+  serves to denote a number, like in "select#" .
 
   @li Keep in mind than in an object, keys are not ordered; an application may
-  parse the JSON output and output it again with keys' order changed; thus
+  parse the JSON output and output it again with keys order changed; thus
   when order matters, use an array (which may imply having anonymous objects
   as items of the array, with keys inside the anonymous objects, see how it's
-  done in JOIN::optimize_steps()). Keep in mind that in an object keys should
+  done in @c JOIN::optimize()). Keep in mind that in an object keys should
   be unique, an application may lose duplicate keys.
 
 */
 
-class Opt_trace_disable_I_S;
 class Opt_trace_struct;
-class Opt_trace_object;
-class Opt_trace_array;
-class Opt_trace_stmt;
+class Opt_trace_stmt;           // implementation detail local to opt_trace.cc
+
+/**
+   The different ways a trace output can be sent to
+   INFORMATION_SCHEMA.OPTIMIZER_TRACE.
+   Note that a trace may also go to DBUG, independently of the values below.
+*/
+enum enum_support_I_S
+{
+  YES_FOR_THIS= 0,                       ///< sent to I_S
+  NO_FOR_THIS= 1,                        ///< not sent, undefined for children
+  NO_FOR_THIS_AND_CHILDREN= 2            ///< not sent, and children not sent
+};
 
 
 /**
@@ -315,15 +344,16 @@ class Opt_trace_stmt;
   @verbatim Opt_trace_context *opt_trace; @endverbatim
   It maintains properties of the session's regarding tracing: enabled/disabled
   state, style (all trace on one line, or not, etc), a list of all remembered
-  traces of previous and current statement (as restricted by OFFSET/LIMIT),
-  and a pointer to the current open object/array.
+  traces of previous and current SQL statement (as restricted by OFFSET/LIMIT),
+  and a pointer to the current (being-generated) trace (which itself has a
+  pointer to its current open object/array).
 
   Here is why the context needs to provide the current open object/array:
 
   @li When adding a value (scalar or array or object) to an array, or adding a
   key/value pair to an object, we need this outer object or array (from now
-  on, we will call "structure" an "object or array", as both are structured
-  types).
+  on, we will use the term "structure" for "object or array", as both are
+  structured types).
 
   @li The most natural way would be that the outer object would be passed in
   argument to the adder (the function which wants to add the value or
@@ -335,157 +365,190 @@ class Opt_trace_stmt;
   modifying many function prototypes.
   Example (in gdb "backtrace" notation: inner frames first):
 @verbatim
-    #0  Item_in_subselect::single_value_transformer - opens an object for key "transformation"
+    #0  Item_in_subselect::single_value_transformer
+        - opens an object for key "transformation"
     #1  Item_in_subselect::select_in_like_transformer - does no tracing
     #2  Item_allany_subselect::select_transformer - does no tracing
     #3  JOIN::prepare - opens an object for key "join_preparation"
 @endverbatim
-  So the object opened in #3 would have to passed in argument to #2 and #1 in
-  order to finally reach #0 where object "transformation" would be added to it.
+  So the object opened in #3 would have to be passed in argument to #2 and #1
+  in order to finally reach #0 where object "transformation" would be added to
+  it.
 
   @li So, as we cannot practically pass the object down, we rather maintain a
-  "current object or array" in the Opt_trace_context context; it's a pointer
-  to an instance of Opt_trace_struct, and the function deep down (frame #0) grabs
-  it from the context, where it was depositted by the function high up (frame
-  #13 in the last example).
+  "current object or array" accessible from the Opt_trace_context context;
+  it's a pointer to an instance of Opt_trace_struct, and the function deep
+  down (frame #0) grabs it from the context, where it was depositted by the
+  function high up (frame #13 in the last example).
 */
+
 class Opt_trace_context
 {
 public:
-  Opt_trace_context(void);
-  ~Opt_trace_context(void);
+  Opt_trace_context();
+  ~Opt_trace_context();
+
+  /**
+     Starts a new trace.
+     @param  support_I_S      Should trace be in information_schema
+     @param  end_marker       For a key/(object|array) pair, should the key be
+                              repeated in a comment when the object|array
+                              closes? like
+@verbatim
+                              "key_foo": {
+                                           multi-line blah
+                                         } / * key_foo * /
+@endverbatim
+                              This is for human-readability only, not valid in
+                              JSON. Note that YAML supports #-prefixed
+                              comments (we would just need to put the next
+                              item's "," before the current item's "#").
+     @param  one_line         Should the trace be on a single line without
+                              indentation? (More compact for network transfer
+                              to programs, less human-readable.)
+     @param  offset           Offset for restricting trace production.
+     @param  limit            Limit for restricting trace production.
+     @param  max_mem_size     Maximum allowed for cumulated size of all
+                              remembered traces.
+     @param  features         Only these optimizer features should be traced.
+
+     @retval false            ok
+     @retval true             error (OOM): instance is unusable, so only
+                              destructor is permitted on it; any other
+                              member function has undefined effects.
+  */
+  bool start(enum enum_support_I_S support_I_S,
+             bool end_marker, bool one_line,
+             long offset, long limit, ulong max_mem_size,
+             ulonglong features);
+  /// Ends the current (=open, unfinished, being-generated) trace.
+  void end();
+
+  /// Returns whether there is a current trace
+  bool is_started() const { return current_stmt_in_gen != NULL; }
+
+  /**
+     Set the "original" query (not transformed, as sent by client) for the
+     current trace.
+     @param   query    query
+     @param   length   query's length
+     @param   charset  charset which was used to encode this query
+     @retval  false    ok
+     @retval  true     error
+  */
+  bool set_query(const char* query, size_t length, CHARSET_INFO *charset);
+
+  /**
+     @returns the current (=open, unfinished, being-generated) structure. This
+     function will be deleted after being eliminated from opt_range.cc.
+  */
+  Opt_trace_struct *get_current_struct() const;
+
+  /**
+     @returns a pointer to the last bytes of the current trace, 0-terminated.
+     Can be called only if is_started() is true.
+     @param  size  how many last bytes are wanted
+  */
+  const char *get_tail(size_t size);
 
   /**
-     Flags' names for @@@@optimizer_trace variable of @c sys_vars.cc :
+     Brainwash: deletes all remembered traces and resets counters regarding
+     OFFSET/LIMIT (so that the next statement is considered as "at offset
+     0"). Does not reset the @@@@optimizer_trace_offset/limit variables.
+  */
+  void reset();
+
+  /// @sa parameters of Opt_trace_context::start()
+  bool get_end_marker() const { return end_marker; }
+  /// @sa parameters of Opt_trace_context::start()
+  bool get_one_line() const { return one_line; }
+
+  /**
+     Names of flags for @@@@optimizer_trace variable of @c sys_vars.cc :
      @li "enabled" = tracing enabled
      @li "end_marker" = see parameter of @ref Opt_trace_context::start
      @li "one_line"= see parameter of @ref Opt_trace_context::start
      @li "default".
   */
   static const char *flag_names[];
+
   /** Flags' numeric values for @@@@optimizer_trace variable */
-  enum flag_value {
-    FLAG_DEFAULT= 0, FLAG_ENABLED= 1, FLAG_END_MARKER= 2, FLAG_ONE_LINE= 4
+  enum {
+    FLAG_DEFAULT= 0, FLAG_ENABLED= 1 << 0,
+    FLAG_END_MARKER= 1 << 1, FLAG_ONE_LINE= 1 << 2
   };
+
   /**
      Features' names for @@@@optimizer_trace_features variable of
      @c sys_vars.cc:
      @li "greedy_search" = the greedy search for a plan
      @li "range_optimizer" = the cost analysis of accessing data through
      ranges in indices
-     @li "dynamic_range" = the range optimization performed for each record 
+     @li "dynamic_range" = the range optimization performed for each record
                            when access method is dynamic range
      @li "repeated_subselect" = the repeated execution of subselects
      @li "default".
   */
   static const char *feature_names[];
+
   /** Features' numeric values for @@@@optimizer_trace_features variable */
   enum feature_value {
-    GREEDY_SEARCH= 1,
-    RANGE_OPTIMIZER= 2,
-    DYNAMIC_RANGE= 4,
-    REPEATED_SUBSELECT= 8, ///@todo join cache, semijoin...
+    GREEDY_SEARCH= (1 << 0),
+    RANGE_OPTIMIZER= (1 << 1),
+    DYNAMIC_RANGE= (1 << 2),
+    REPEATED_SUBSELECT= (1 << 3), ///@todo join cache, semijoin...
     /*
-      if you add here, update feature_value of empty implementation
-      and FEATURES_DEFAULT!
+      If you add here, update feature_value of empty implementation
+      and default_features!
     */
     /**
        Anything unclassified, including the top object (thus, by "inheritance
        from parent", disabling MISC makes an empty trace).
        This feature cannot be disabled by the user; for this it is important
-       that it always has biggest flag; flag's value itself does not matter
+       that it always has biggest flag; flag's value itself does not matter.
     */
-    MISC= 128
+    MISC= (1 << 7)
   };
-  static const feature_value FEATURES_DEFAULT;
+  static const feature_value default_features;
 
   /**
-     Starts a new trace.
-     @param  need_it_for_I_S  should trace produce output suitable for
-                              information_schema, or only send to DBUG
-     @param  end_marker       for a key/(object|array) pair, should the key be
-                              repeated in a comment when the object|array
-                              closes, like
-@verbatim
-                              "key_foo": {
-                                           multi-line blah
-                                         } / * key_foo * /
-@endverbatim
-                              This is for human-readability only, not valid in
-                              JSON. Note that YAML supports #-prefixed
-                              comments (we would just need to put the next
-                              item's "," before the current item's "#").
-     @param  one_line         should the trace be on a single line without
-                              indentation (more compact for network transfer
-                              to programs, less human-readable)
-     @param  offset           offset for restricting trace production
-     @param  limit            limit for restricting trace production
-     @param  max_mem_size     maximum allowed for cumulated size of all
-                              remembered traces
-     @param  features         only those optimizer features should be traced
-
-     @retval false            ok
-     @retval true             error (OOM)
-  */
-  bool start(bool need_it_for_I_S, bool end_marker, bool one_line,
-             long offset, long limit, ulong max_mem_size,
-             ulonglong features);
-  /** Ends the current trace */
-  void end(void);
-  /** Returns whether there is a current trace */
-  bool is_started(void) const { return current_stmt_in_gen != NULL; }
-  /** Returns the current open Object Or Array */
-  Opt_trace_struct *get_current_struct(void) const;
-  /**
-     Returns a pointer to the last bytes of the current trace, 0-terminated.
-     @param  size  how many last bytes are wanted
-  */
-  const char *get_tail(size_t size);
-  /**
-     Brainwash: deletes all remembered traces and resets counters regarding
-     OFFSET/LIMIT (so that the next statement is considered as "at offset
-     0").
+     @returns whether an optimizer feature should be traced.
+     @param  f  feature
   */
-  void reset(void);
+  bool feature_enabled (Opt_trace_context::feature_value f) const
+  { return features & f; }
+
+  /// Turn this on only in unit tests for out-of-memory testing
+  static bool simulate_oom_in_buffers;
 
   /**
-     Set the "original query" for the current statement.
-     @param   query    query
-     @param   length   query's length
-     @param   charset  charset which was used to encode this query
-     @retval  false    ok
-     @retval  true     error
+     Opt_trace_struct is passed Opt_trace_context*, and needs to know
+     to which statement's trace to attach, so Opt_trace_context must provide
+     this information.
+     This cannot return pointer-to-const, but as the definition of
+     Opt_trace_stmt is confined to opt_trace.cc, this is ok (includer of
+     opt_trace.h is ignorant of the layout of the pointed instance so cannot
+     use it).
   */
-  bool set_query(const char* query, size_t length, CHARSET_INFO *charset);
+  Opt_trace_stmt *get_current_stmt_in_gen() { return current_stmt_in_gen; }
 
-  bool feature_enabled (feature_value arg) const { return features & arg; }
+  /**
+     @returns the next statement to show in I_S.
+     @param[in,out]  got_so_far  How many statements the caller got so far
+     (by previous calls to this function); function updates this count.
+     @note traces are returned from oldest to newest.
+   */
+  const Opt_trace_stmt *get_next_stmt_for_I_S(long *got_so_far) const;
 
 private:
 
   /**
-     The oldest of {traces remembered for putting into the OPTIMIZER_TRACE
-     table}: the one created first. Will be first row of OPTIMIZER_TRACE
-     table. With newest_stmt_to_show, this forms a double-linked list,
-     as we need:
-     - to output traces "oldest first" in OPTIMIZER_TRACE
-     - to preserve traces "newest first" when optimizer-trace-offset<0
-     - to delete a trace in the middle of the list when it is permanently out
-     of the offset/limit showable window.
-  */
-  Opt_trace_stmt *oldest_stmt_to_show;
-  Opt_trace_stmt *newest_stmt_to_show; ///< Newest remembered statement trace
-  /** List of traces which are unneeded because of OFFSET/LIMIT */
-  Opt_trace_stmt *stmt_to_del;
-  /**
-     Number of statements traced so far since "offset 0", for comparison with
-     OFFSET/LIMIT
-  */
-  ha_rows since_offset_0;
-
-  /**
      Trace which is currently being generated, where structures are being
-     added. In simple cases it is equal to @c newest_stmt_to_show. But it can
-     be prior to it, for example when calling a stored routine:
+     added. "in_gen" stands for "in generation", being-generated.
+
+     In simple cases it is equal to the last element of array
+     all_stmts_for_I_S. But it can be prior to it, for example when calling a
+     stored routine:
 @verbatim
      CALL statement starts executing
        create trace of CALL (call it "trace #1")
@@ -505,173 +568,81 @@ private:
      the CALL's one, where structures will be added, until a second
      sub-statement is executed.
      Another case is when the current statement sends only to DBUG:
-     newest_stmt_to_show lists only traces shown in OPTIMIZER_TRACE.
+     all_stmts_for_I_S lists only traces shown in OPTIMIZER_TRACE.
   */
   Opt_trace_stmt *current_stmt_in_gen;
 
   /**
-     true: all to-be-created traces should support OPTIMIZER_TRACE and DBUG
-     false: they should support only DBUG (or nothing, if non-debug binary)
-  */
-  bool support_I_S;
-  bool end_marker;  ///< copy of parameter of Opt_trace_context::start
-  bool one_line;    ///< copy of parameter of Opt_trace_context::start
-  long offset;      ///< copy of parameter of Opt_trace_context::start
-  long limit;       ///< copy of parameter of Opt_trace_context::start
-  size_t max_mem_size; ///< copy of parameter of Opt_trace_context::start
-  feature_value features; ///< copy of parameter of Opt_trace_context::start
+     To keep track of what is the current statement, as execution goes into a
+     sub-statement, and back to the upper statement, we have a stack of
+     successive values of current_stmt_in_gen:
+     when in a statement we enter a substatement (a new trace), we push the
+     statement's trace on the stack and change current_stmt_in_gen to the
+     substatement's trace; when leaving the substatement we pop from the stack
+     and set current_stmt_in_gen to the popped value.
+  */
+  Dynamic_array<Opt_trace_stmt *> stack_of_current_stmts;
 
-  /** Whether the settings above may be changed for a new trace */
-  bool cannot_change_settings;
   /**
-     Find and delete unneeded traces.
-     For example if OFFSET=-1,LIMIT=1, only the last trace is needed. When a
-     new trace is started, the previous traces becomes unneeded and this
-     function deletes it which frees memory.
-     @param  all  if true, ignore OFFSET and thus delete everything
+     List of remembered traces for putting into the OPTIMIZER_TRACE
+     table. Element 0 is the one created first, will be first row of
+     OPTIMIZER_TRACE table. The array structure fullfills those needs:
+     - to output traces "oldest first" in OPTIMIZER_TRACE
+     - to preserve traces "newest first" when @@@@optimizer_trace_offset<0
+     - to delete a trace in the middle of the list when it is permanently out
+       of the offset/limit showable window.
   */
-  void purge(bool purge_all); ///< find and delete unneeded traces
-  /** put trace in list of traces to show in OPTIMIZER_TRACE */
-  void link_to_shown(Opt_trace_stmt *stmt);
-  /** remove trace from list of traces to show in OPTIMIZER_TRACE */
-  void unlink_from_shown(Opt_trace_stmt *stmt);
-  /** put trace in list of unneeded traces */
-  void link_to_del(Opt_trace_stmt *stmt);
-  /** remove trace from list of unneeded traces */
-  void unlink_from_del(Opt_trace_stmt *stmt);
-  /** compute maximum allowed memory size for trace 'for_stmt'*/
-  size_t allowed_mem_size(const Opt_trace_stmt *for_stmt) const;
-
-  friend class Opt_trace_stmt;
-  friend class Opt_trace_struct;
-  friend class Opt_trace_disable_I_S;
-  friend class Opt_trace_iterator;
-  friend void opt_trace_print_expanded_query(THD *thd,
-                                             st_select_lex *select_lex);
-};
-
-
-/**
-  @class Opt_trace_stmt
-
-  The trace of one statement. For example, executing a stored procedure
-  containing 3 sub-statements will produce 4 traces (one for the CALL
-  statement, one for each sub-statement), so 4 Opt_trace_stmt linked together
-  into Opt_trace_context's lists.
-*/
-class Opt_trace_stmt
-{
-private: // Except other classes in this file nobody should use Opt_trace_stmt
+  Dynamic_array<Opt_trace_stmt *> all_stmts_for_I_S;
   /**
-     Constructor, starts a trace
-     @param  ctx_arg          context
-     @param  parent           parent trace (if any); if tracing a
-                              sub-statement, could be trace of the statement
-                              which called the sub-statement
-     @param  support_I_S_arg  whether this trace should support
-                              OPTIMIZER_TRACE
-  */
-  Opt_trace_stmt(Opt_trace_context *ctx_arg, Opt_trace_stmt *parent,
-                 bool support_I_S_arg);
-  /** Ends a trace; destruction may not be possible immediately though */
-  void end(void);
-  bool started;
-  Opt_trace_context *ctx;     ///< context
-  Opt_trace_stmt *parent;     ///< parent trace
-  Opt_trace_struct *current_struct; ///< current open structure
-  /**
-     true: trace supports OPTIMIZER_TRACE and DBUG
-     false: trace supports only DBUG (or nothing, if non-debug binary)
-  */
-  bool support_I_S;
-
-  /**
-     Extension of class String, for storing query or trace.
-
-     We want to prevent users from calling String functions which allocate
-     memory, because we want to record their malloc error status. So we make
-     them accessible only through our wrappers, thanks to private
-     inheritance.
+     List of traces which are unneeded because of OFFSET/LIMIT, and scheduled
+     for deletion from memory.
   */
-  class Buffer: private String
-  {
-private:
-    size_t allowed_mem_size; ///< allowed memory size for this String
-    size_t missing_bytes;    ///< how many bytes could not be added
-    bool   malloc_error;     ///< whether there was a malloc/realloc() error
-public:
-    Buffer() : allowed_mem_size(0), missing_bytes(0), malloc_error(false) {}
-    uint32 alloced_length(void) const { return String::alloced_length(); }
-    uint32 length(void) const { return String::length(); }
-    bool realloc(uint32 arg_length);
-    char *c_ptr_safe(void)
-    {
-      /* Alas, String::c_ptr_safe() does no realloc error checking */
-      return String::c_ptr_safe();
-    }
-    inline const char *ptr(void) const { return String::ptr(); }
-    /**
-       @param  str  String, in this instance's charset
-       @param  length  length of string
-    */
-    bool append(const char *str, size_t length);
-    bool append(const char *str) { return append(str, strlen(str)); }
-    /**
-       Like @c append() but escapes certain characters for string values to
-       be JSON-compliant.
-       @param  str  String in UTF8
-       @param  length  length of string
-    */
-    bool append_escaped(const char *str, size_t length);
-    bool append(char chr);
+  Dynamic_array<Opt_trace_stmt *> all_stmts_to_del;
 
-    friend class Opt_trace_struct;
-    friend class Opt_trace_context;
-    friend class Opt_trace_stmt;
-    friend class Opt_trace_iterator;
-  };
-
-  Buffer buffer;         ///< Where the trace is accumulated
-  Buffer query_buffer;   ///< Where the query is put
+  bool end_marker;          ///< copy of parameter of Opt_trace_context::start
+  bool one_line;            ///< copy of parameter of Opt_trace_context::start
+  /// copy of parameter of Opt_trace_context::start
+  Opt_trace_context::feature_value features;
+  long offset;              ///< copy of parameter of Opt_trace_context::start
+  long limit;               ///< copy of parameter of Opt_trace_context::start
+  size_t max_mem_size;      ///< copy of parameter of Opt_trace_context::start
 
   /**
-     Nesting level of the current structure.
-     The more nested ("deep"), the more indentation spaces we add on the left.
+     Number of statements traced so far since "offset 0", for comparison with
+     OFFSET and LIMIT, when OFFSET >= 0.
   */
-  int depth;
-  /** maximum nesting level so far */
-  int max_depth;
-  /** whether current structure is empty; one such info per nesting level */
-  Dynamic_array<int> level_empty;
-  /** enter a deeper nesting level */
-  void push(void);
-  /** leave current nesting level and go back one level up */
-  void pop(void);
-  /** put comma, newline and indentation */
-  void separator(void);
-  /** put newline and indentation */
-  void next_line(void);
+  long since_offset_0;
 
-  /** @sa Opt_trace_context::set_query() */
-  bool set_query(const char* query, size_t length, CHARSET_INFO *charset);
-
-  Opt_trace_stmt *prev, *next; ///< list management
+  /// @returns whether the constructor went perfectly well.
+  bool well_constructed() const;
 
-  /** By how much we should increase buffer's size when it's becoming full */
-  static const size_t buffer_alloc_increment;
-  /** Length of an empty trace */
-  static const size_t empty_trace_len;
+  /**
+     Find and delete unneeded traces.
+     For example if OFFSET=-1,LIMIT=1, only the last trace is needed. When a
+     new trace is started, the previous traces becomes unneeded and this
+     function deletes them which frees memory.
+     @param  all  if true, ignore OFFSET and thus delete everything
+  */
+  void purge_stmts(bool purge_all);       ///< find and delete unneeded traces
 
-public:
-  /** Turn this on only in unit tests for out-of-memory testing */
-  static bool simulate_oom;
+  /**
+     Compute maximum allowed memory size for current trace. The current trace
+     is the only one active. Other traces break down in two groups:
+     - the finished ones (from previously executed statements),
+     - the "started but not finished ones": they are not current, are not
+     being updated at this moment: this must be the trace of a top
+     statement calling a substatement which is the current trace now: trace's
+     top statement is not being updated at this moment.
+     So the length of all those other traces is "a given", and the current
+     trace can grow in the room left by them.
+  */
+  size_t allowed_mem_size_for_current_stmt() const;
+
+  /// Not defined copy constructor, to disallow copy.
+  Opt_trace_context(const Opt_trace_context&);
+  /// Not defined assignment operator, to disallow assignment.
+  Opt_trace_context& operator=(const Opt_trace_context&);
 
-  friend class Opt_trace_context;
-  friend class Opt_trace_struct;
-  friend class Opt_trace_disable_I_S;
-  friend class Opt_trace_iterator;
-  friend void opt_trace_print_expanded_query(THD *thd,
-                                             st_select_lex *select_lex);
 };
 
 
@@ -681,76 +652,82 @@ public:
 struct Opt_trace_info
 {
   /**
-     String containing trace. It is NUL-terminated, only to aid debugging or
-     unit testing; this property is not relied upon in normal server usage.
+     String containing trace.
+     If trace has been end()ed, this is 0-terminated, which is only to aid
+     debugging or unit testing; this property is not relied upon in normal
+     server usage.
+     If trace has not been ended, this is not 0-terminated. That rare case can
+     happen when a substatement reads OPTIMIZER_TRACE (at that stage, the top
+     statement is still executing so its trace is not ended yet, but may still
+     be read by the sub-statement).
   */
   const char *trace_ptr;
-  size_t trace_length;   ///< length of trace string
-  /** String containing query. NUL-termination: like trace_ptr */
+  size_t trace_length;                          ///< length of trace string
+  //// String containing original query. 0-termination: like trace_ptr.
   const char *query_ptr;
-  size_t query_length;   ///< length of query string
-  CHARSET_INFO *query_charset; ///< charset of query string
+  size_t query_length;                          ///< length of query string
+  CHARSET_INFO *query_charset;                  ///< charset of query string
   /**
     How many bytes this trace is missing (for traces which were truncated
-    because of --optimizer-trace-max-mem-size)
+    because of @@@@optimizer-trace-max-mem-size).
   */
   size_t missing_bytes;
-  bool malloc_error; ///< whether there was some OS malloc error
+  bool malloc_error;               ///< whether there was some OS malloc error
 };
 
 
 /**
    Iterator over the list of remembered traces.
    @note due to implementation, the list must not change during an
-   iterator's lifetime.
+   iterator's lifetime, or results may be unexpected (no crash though).
 */
 class Opt_trace_iterator
 {
 public:
   /**
-    Constructor.
     @param  ctx  context
-    @note row_count starts at 1 because after construction we are
-    positioned either:
-    - on the first element (row_count=1 is then correct)
-    - or on the end, in which case row_count will not be used, as the user
-    should compare with the 'end' iterator before taking the content with the
-   '*' operator.
-  */
-  Opt_trace_iterator(Opt_trace_context *ctx) :
-    cursor(ctx->oldest_stmt_to_show), row_count(1), limit(ctx->limit) {}
-  void operator++(int); ///< advances iterator to next trace
+  */
+  Opt_trace_iterator(Opt_trace_context *ctx);
+
+  void next();                           ///< Advances iterator to next trace.
+
   /**
-     Returns information about the trace on which the iterator is
+     Provides information about the trace on which the iterator is
      positioned.
+     @param[out]  info  information returned.
+     The usage pattern is
+     1) instantiate the iterator
+     2) test at_end(), if false: call get_value() and then next()
+     3) repeat (2) until at_end() is true.
   */
-  Opt_trace_info operator*();
-  /** Needed to compare the iterator with the list's end */
-  bool operator!=(const Opt_trace_iterator &other) const
-  { return cursor != other.cursor; }
-  static Opt_trace_iterator end;   ///< List's end
+  void get_value(Opt_trace_info *info) const;
+
+  /// @returns whether iterator is positioned to the end.
+  bool at_end() const { return cursor == NULL; }
+
 private:
-  /** Only to construct the 'end' iterator */
-  Opt_trace_iterator(void) : cursor(NULL) {}
-  Opt_trace_stmt *cursor; ///< trace which the iterator is positioned on
-  ha_rows row_count;      ///< how many traces yielded so far
-  ha_rows limit;          ///< yield "empty" after yielding that many traces
+  /// Pointer to context, from which traces are retrieved
+  Opt_trace_context *ctx;
+  const Opt_trace_stmt *cursor; ///< trace which the iterator is positioned on
+  long row_count;               ///< how many traces retrieved so far
 };
 
 
+class Partial_access_for_Opt_trace_stmt;
+
 /**
    Object and array are both "structured data" and have lots in common, so the
    Opt_trace_struct is a base class for them.
    When you want to add a structure to the trace, you create an instance of
    Opt_trace_object or Opt_trace_array, then you add information to it with
-   add(), then the destructor closes the OOA (we use RAII, Resource
+   add(), then the destructor closes the structure (we use RAII, Resource
    Acquisition Is Initialization).
 */
+
 class Opt_trace_struct
 {
-protected:
+public:
   /**
-     Constructor.
      @param  ctx_arg  Optimizer trace context for this structure
      @param  requires_key_arg  whether this structure requires/forbids keys
                       for values put inside it (an object requires them, an
@@ -759,7 +736,7 @@ protected:
                       NULL otherwise. This pointer must remain constant and
                       valid until the object is destroyed (to support
                       @ref saved_key).
-     @param  feature  optimizer feature to which this objects belong
+     @param  feature  optimizer feature to which this structure belongs
 
      This constructor is never called directly, only from subclasses.
   */
@@ -768,22 +745,18 @@ protected:
                    Opt_trace_context::feature_value feature) :
     started(false)
   {
-    /* A first inlined test */
+    // A first inlined test
     if (unlikely(ctx_arg != NULL) && ctx_arg->is_started())
     {
-      /* tracing enabled: must fully initialize the structure */
-      do_construct(ctx_arg->current_stmt_in_gen,
-                   ctx_arg->current_stmt_in_gen->current_struct,
-                   requires_key_arg, key, feature);
+      // Tracing enabled: must fully initialize the structure.
+      do_construct(ctx_arg, requires_key_arg, key, feature);
     }
     /*
       Otherwise, just leave "started" to false, it marks that the structure is
       dummy.
     */
   }
-  ~Opt_trace_struct(void) { if (unlikely(started)) do_destruct(); }
-
-public:
+  ~Opt_trace_struct() { if (unlikely(started)) do_destruct(); }
 
   /**
      Adds a value (of string type) to the structure. A key is specified, so it
@@ -792,7 +765,7 @@ public:
      There are two "add_*" variants to add a string value.
      If the value is 0-terminated and each character
      - is ASCII 7-bit
-     - has code >=32 and is neither '"' nor '\\'
+     - has ASCII code >=32 and is neither '"' nor '\\'
      then add_alnum() should be used. That should be the case for all fixed
      strings like add_alnum("cause", "cost").
      Otherwise, add_utf8() should be used; it accepts any UTF8-encoded character
@@ -804,33 +777,9 @@ public:
      @param  key    key
      @param  value  value
      @param  val_length  length of string 'value'
-     @return a reference to the structure, useful for chaining like this:
+     @returns a reference to the structure, useful for chaining like this:
      @verbatim add(x,y).add(z,t).add(u,v) @endverbatim
 
-     In the most performance-critical case (release binary with
-     tracing compiled in but not enabled at runtime), we don't want function
-     calls, which is why we have an inline if() in each add() method below.
-     In an optimizer-intensive test (as in BUG#50595 with a 20-table plan
-     search, see mysql-test/t/bug50595.test), this inlining took
-     @c greedy_search()'s clock time from 2.6 secs down to 2.0 secs.
-     In calls like
-     @verbatim <some Opt_trace_struct>.add().add().add() @endverbatim
-     which have 3 if()s (one per @c add()), it has been measured that wrapping
-     that inside a single if(), like
-     @verbatim if(started) { <some Opt_trace_struct>.add().add().add() } @endverbatim
-     degraded performance (from 2.0 to 2.1 secs). Adding if() in one single
-     code line caused de-inlining of add() methods in several unrelated places
-     and the performance degraded. So we don't do it. The performance impact of
-     multiple chained add() seems comparable to a single add() anyway.
-     Removing @c likely() also increases from 2.0 to 2.1, so we leave
-     them.
-     The same test against a vanilla tree without this WL, took 1.9 secs; so
-     does the WL tree with optimizer trace compiled out.
-     So the important figure is that this WL takes the common case from 1.9 to
-     2.0 secs, but it is hoped that the used testcase (20-table plan) is
-     unusually intensive on the optimizer and thus real-life cases should have
-     a smaller penalty. This will be benchmarked with the QA teams.
-
      String-related add() variants are named add_[something]():
      - to avoid confusing the compiler between:
      add(const char *value, size_t    val_length) and
@@ -841,18 +790,18 @@ public:
      add(const char *key,   double value) instead of
      add(const char *value, size_t val_length).
   */
-  Opt_trace_struct& add_alnum(const char *key,
-                              const char *value)
+  Opt_trace_struct& add_alnum(const char *key, const char *value)
   {
     if (likely(!started))
       return *this;
     return do_add(key, value, strlen(value), false);
   }
+
   /**
      Adds a value (of string type) to the structure. No key is specified, so
      it adds only the value (the structure must thus be an array).
      @param  value  value
-     @return a reference to the structure
+     @returns a reference to the structure
   */
   Opt_trace_struct& add_alnum(const char *value)
   {
@@ -860,6 +809,7 @@ public:
       return *this;
     return do_add(NULL, value, strlen(value), false);
   }
+
   /**
      Like add_alnum() but supports any UTF8 characters in 'value'.
      Will "escape" 'value' to be JSON-compliant.
@@ -874,28 +824,31 @@ public:
       return *this;
     return do_add(key, value, val_length, true);
   }
-  /** Variant of add_utf8() for adding to an array (no key) */
+
+  /// Variant of add_utf8() for adding to an array (no key)
   Opt_trace_struct& add_utf8(const char *value, size_t val_length)
   {
     if (likely(!started))
       return *this;
     return do_add(NULL, value, val_length, true);
   }
-  /** Variant of add_utf8() where 'value' is 0-terminated */
-  Opt_trace_struct& add_utf8(const char *key,
-                             const char *value)
+
+  //// Variant of add_utf8() where 'value' is 0-terminated
+  Opt_trace_struct& add_utf8(const char *key, const char *value)
   {
     if (likely(!started))
       return *this;
     return do_add(key, value, strlen(value), true);
   }
-  /** Variant of add_utf8() where 'value' is 0-terminated */
+
+  /// Variant of add_utf8() where 'value' is 0-terminated
   Opt_trace_struct& add_utf8(const char *value)
   {
     if (likely(!started))
       return *this;
     return do_add(NULL, value, strlen(value), true);
   }
+
   /**
      Add a value (of Item type) to the structure. The Item should be a
      condition (like a WHERE clause) which will be pretty-printed into the
@@ -989,6 +942,7 @@ public:
       return *this;
     return do_add(NULL, value);
   }
+  /// Adds a JSON null object (==Python's "None")
   Opt_trace_struct& add_null(const char *key)
   {
     if (likely(!started))
@@ -996,21 +950,36 @@ public:
     return do_add_null(key);
   }
   /**
-    The exception to RAII: this function is an explicit way to end a
+     Helper to put the database/table name in an object.
+     @param  tab  TABLE* pointer
+  */
+  Opt_trace_struct& add_utf8_table(TABLE *tab)
+  {
+    if (likely(!started))
+      return *this;
+    return do_add_utf8_table(tab);
+  }
+  /**
+    The exception to RAII: this function is an explicit way of ending a
     structure before it goes out of scope. Don't use it unless RAII mandates
     a new scope which mandates re-indenting lots of code lines.
   */
-  void end(void) { if (unlikely(started)) do_destruct(); }
+  void end() { if (unlikely(started)) do_destruct(); }
+
+  /**
+     Whether a JSON syntax error should cause an assertion in debug binaries;
+     turn this off only in unit tests for bad syntax testing
+  */
+  static bool dbug_assert_on_syntax_error;
 
 private:
-  /** Full initialization. @sa Opt_trace_struct::Opt_trace_struct */
-  void do_construct(Opt_trace_stmt *stmt,
-                    Opt_trace_struct *parent,
-                    bool requires_key,
-                    const char *key,
+
+  /// Full initialization. @sa Opt_trace_struct::Opt_trace_struct
+  void do_construct(Opt_trace_context *ctx,
+                    bool requires_key, const char *key,
                     Opt_trace_context::feature_value feature);
-  /** Really does destruction */
-  void do_destruct(void);
+  /// Really does destruction
+  void do_destruct();
   /**
      Really adds to the object. @sa add().
      @param  escape  do JSON-compliant escaping of 'value'.
@@ -1024,106 +993,44 @@ private:
   Opt_trace_struct& do_add(const char *key, longlong value);
   Opt_trace_struct& do_add(const char *key, ulonglong value);
   Opt_trace_struct& do_add(const char *key, double value);
-  /** Adds a JSON null object (==Python's "None") */
   Opt_trace_struct& do_add_null(const char *key);
-  /**
-     Adds an inner structure to this structure.
-     @param key  key if the inner structure is the value of a key/value pair,
-                 NULL otherwise.
-  */
-  void add_struct(const char *key);
-  /**
-     Emits a JSON syntax error.
-     @param key  key involved in the error, NULL if there is no key.
-
-     When adding a value (or array or object) to an array, or a key/value pair
-     to an object, we need this outer array or object.
-
-     It would be possible, when trying to add a key to an array (which is wrong
-     in JSON) (or similarly when trying to add a value without any key to an
-     object), to catch it at compilation time, if the outer object was passed an
-     argument of type @c Opt_trace_array* to the adder. Then the @c add(key,val)
-     call would not compile as Opt_trace_array wouldn't feature it.
-
-     But as explained in comment of class Opt_trace_context we
-     cannot pass down the object, have to maintain a "current object or
-     array" in the Opt_trace_context context (pointer to an instance of
-     Opt_trace_struct), and the adder grabs it from the context.
-
-     As this current structure is of type "object or array", we cannot do
-     compile-time checks that only suitable methods are used. A call to @c
-     add(key,value) is necessarily legal for the compiler as the structure may
-     be an object, though it will be wrong in case the structure is actually
-     an array at run-time. Thus we have the risk of an untested particular
-     situation where the current structure is not an object (but an array)
-     though the code expected it to be one. We catch that at run-time:
-     Opt_trace_struct methods detect wrong usage, like adding a value to an
-     object without specifying a key, and then they:
-     @li in debug build, assert
-     @li in release builds, emit a warning string in the trace and should not
-     crash. The trace is invalid JSON but still human-readable (our best
-     effort).
-  */
-  void syntax_error(const char *key);
-  void add_key_name(const char *key); ///< Puts a key inside the structure
-
-  /** not defined copy constructor, to disallow copy */
-  Opt_trace_struct(const Opt_trace_struct&);
-  /** not defined assignment operator, to disallow = */
-  Opt_trace_struct& operator=(const Opt_trace_struct&);
+  Opt_trace_struct& do_add_utf8_table(TABLE *tab);
+
+  const char *check_key(const char *key);  ///< Validates the key
+  friend class Partial_access_for_Opt_trace_stmt;
+
+  Opt_trace_struct(const Opt_trace_struct&);            ///< not defined
+  Opt_trace_struct& operator=(const Opt_trace_struct&); ///< not defined
+
+  bool started; ///< Whether the structure does tracing or is dummy
 
-  bool started; ///< Whether the structure does tracing
   /**
      Whether the structure requires/forbids keys for values inside it.
-     1: this is an object. 0: this is an array. Other: incorrect.
+     true: this is an object. false: this is an array.
 
-     @note The canonical way would be to not have such int8 per instance, but
-     rather have a pure virtual method Opt_trace_struct::requires_key(),
-     overloaded by Opt_trace_object (returning 1) and by Opt_trace_array
-     (returning 0). But this would have drawbacks:
-     @li it would add a vtbl pointer to each instance which takes even more
-     space than int8
-     @li it would add requires_key() function calls which cost more than
-     reading one int8
-     @li Opt_trace_object::requires_key() would not be accessible from
-     Opt_trace_struct::construct() (which would complicate coding), whereas the
-     int8 is.
-  */
-  int8 requires_key;
-  Opt_trace_stmt *stmt;  ///< Trace owning the structure
-  /** Parent structure ("outer structure" of the structure) */
-  Opt_trace_struct *parent;
-  /** key if the structure is the value of a key/value pair, NULL otherwise */
+     @note The canonical way would be to not have such bool per instance, but
+     rather have a pure virtual member function
+     Opt_trace_struct::requires_key(), overloaded by Opt_trace_object
+     (returning true) and by Opt_trace_array (returning false). But
+     Opt_trace_object::requires_key() would not be accessible from
+     Opt_trace_struct::do_construct() (which would complicate coding), whereas
+     the bool is.
+  */
+  bool requires_key;
+  Opt_trace_stmt *stmt;                        ///< Trace owning the structure
+  /// Key if the structure is the value of a key/value pair, NULL otherwise
   const char *saved_key;
 #ifndef DBUG_OFF
   /**
      Fixed-length prefix of previous key in this structure, if this structure
-     is an object
+     is an object. Serves to detect when adding two same consecutive keys to
+     an object, which would be wrong.
   */
   char previous_key[25];
 #endif
-
-  /**
-     A structure may, because it belongs to a feature for which tracing is not
-     wanted, disable the statement's tracing; it will thus apply to all the
-     structure's children. When the structure is destroyed, it restores the
-     initial setting.
-  */
-  bool save_stmt_support_I_S;
-
-  /** opening and closing symbols for arrays ([])and objects ({}) */
-  static const char brackets[];
-  /** human-readable names of structure types */
-  static const char* types[];
-public:
-  /**
-     Whether a JSON syntax error should cause an assertion in debug binaries;
-     turn this off only in unit tests for bad syntax testing
-  */
-  static bool dbug_assert_on_syntax_error;
-
 };
 
+
 /**
    A JSON object (unordered set of key/value pairs).
    Defines only a constructor, all the rest is inherited from Opt_trace_struct.
@@ -1136,21 +1043,24 @@ public:
      key/value pair.
      @param  ctx  context for this object
      @param  key  key
+     @param  feature  optimizer feature to which this structure belongs
   */
   Opt_trace_object(Opt_trace_context *ctx, const char *key,
                    Opt_trace_context::feature_value feature=
-                   Opt_trace_context::MISC) :
-  Opt_trace_struct(ctx, true, key, feature) {}
+                   Opt_trace_context::MISC)
+    : Opt_trace_struct(ctx, true, key, feature)
+  {}
   /**
      Constructs an object. No key is specified, so the object is just a value
      (serves for the single root object, or for adding the object to an array).
      @param  ctx  context for this object
+     @param  feature  optimizer feature to which this structure belongs
   */
   Opt_trace_object(Opt_trace_context *ctx,
                    Opt_trace_context::feature_value feature=
-                   Opt_trace_context::MISC) :
-  Opt_trace_struct(ctx, true, NULL, feature) {}
-
+                   Opt_trace_context::MISC)
+    : Opt_trace_struct(ctx, true, NULL, feature)
+  {}
 };
 
 
@@ -1166,33 +1076,31 @@ public:
      key/value pair.
      @param  ctx  context for this array
      @param  key  key
+     @param  feature  optimizer feature to which this structure belongs
   */
   Opt_trace_array(Opt_trace_context *ctx, const char *key,
                   Opt_trace_context::feature_value feature=
-                  Opt_trace_context::MISC) :
-  Opt_trace_struct(ctx, false, key, feature) {}
+                  Opt_trace_context::MISC)
+    : Opt_trace_struct(ctx, false, key, feature)
+  {}
   /**
      Constructs an array. No key is specified, so the array is just a value
      (serves for adding the object to an array).
      @param  ctx  context for this array
+     @param  feature  optimizer feature to which this structure belongs
   */
   Opt_trace_array(Opt_trace_context *ctx,
                   Opt_trace_context::feature_value feature=
-                  Opt_trace_context::MISC) :
-  Opt_trace_struct(ctx, false, NULL, feature) {}
+                  Opt_trace_context::MISC)
+    : Opt_trace_struct(ctx, false, NULL, feature)
+  {}
 };
 
 
 /**
    Instantiate an instance of this class for specific, punctual cases where
-   optimizer trace should write only to DBUG and not I_S. Example:
-   @c QUICK_RANGE_SELECT::dbug_dump() writes to the I_S trace and DBUG a
-   "range_select" object. This is good when called from
-   @c SQL_SELECT::test_quick_select(). But it is also called by @c TEST_join()
-   (from @c JOIN::optimize()), and there the produced I_S trace is
-   undesirable, so it is silenced with such object below (trace only goes to
-   DBUG then, for the duration of @c TEST_join()).
-   Re-enabling happens when the instance is destroyed.
+   optimizer trace, in a certain section of Optimizer code, should write only
+   to DBUG and not I_S. Example: see sql_select.cc.
    Note that this class should rarely be used; the "feature" parameter of
    Opt_trace_struct is a good alternative.
 */
@@ -1200,13 +1108,12 @@ class Opt_trace_disable_I_S
 {
 public:
   /**
-     Constructor
-     @param  ctx_arg  context
-     @param  disable_arg  whether the instance should really disable
+     @param  ctx_arg      Context.
+     @param  disable_arg  Whether the instance should really disable
                           anything. If false, the object is dummy. If true,
                           tracing-to-I_S is disabled at construction and
                           re-enabled at destruction.
-     @details a dummy instance is there only for RAII reasons. Imagine we want
+     @details A dummy instance is there only for RAII reasons. Imagine we want
      to do this:
 @verbatim
      {
@@ -1232,17 +1139,14 @@ public:
 @endverbatim
   */
   Opt_trace_disable_I_S(Opt_trace_context *ctx_arg, bool disable_arg);
-  /** Destructor. Re-enables tracing. */
+  /// Destructor. Restores trace's "enabled" property to its previous value.
   ~Opt_trace_disable_I_S();
+
 private:
-  Opt_trace_context *ctx; ///< context to disable/re-enable
-  Opt_trace_stmt *stmt;   ///< trace to disable/re-enable
-  /** whether this instance really does disabling */
-  bool disable;
-  /** saved value before disabling, for restoring in the destructor */
-  bool saved_ctx_support_I_S;
-  bool saved_stmt_support_I_S;
-  bool saved_ctx_cannot_change_settings;
+  bool disable;              ///< whether this instance really does disabling.
+  Opt_trace_stmt *stmt;      ///< statement where disabling happens
+  Opt_trace_disable_I_S(const Opt_trace_disable_I_S&); // not defined
+  Opt_trace_disable_I_S& operator=(const Opt_trace_disable_I_S&);//not defined
 };
 
 
@@ -1253,16 +1157,15 @@ private:
 
 //@{
 
-struct TABLE_LIST;
 /**
   Start tracing a THD's actions (generally at a statement's start).
   @param  thd  the THD
   @param  tbl  list of tables read/written by the statement.
   @param  sql_command  SQL command being prepared or executed
-  @return whether this function decided to trace (and thus the corresponding
+  @returns whether this function decided to trace (and thus the corresponding
   opt_trace_end() should end the trace).
-  @note if tracing was already started (by a top statement above the present
-  sub-statement in the call chain), and this function decides to trace
+  @note if tracing was already started by a top statement above the present
+  sub-statement in the call chain, and this function decides to trace
   (respectively not trace) the sub-statement, it returns "true"
   (resp. false). Each sub-statement is responsible for ending the trace which it
   has started.
@@ -1277,6 +1180,7 @@ bool opt_trace_start(THD *thd, const TAB
 */
 void opt_trace_end(THD *thd, bool started);
 
+class st_select_lex;
 /**
    Prints SELECT query to optimizer trace. It is not the original query (@see
    opt_trace_set_query()) but a printout of the parse tree (Item-s).
@@ -1293,22 +1197,23 @@ void opt_trace_print_expanded_query(THD 
 */
 void opt_trace_add_select_number(Opt_trace_struct *s,
                                  uint select_number);
-
-
-  /**
-     Set the "original query" for the current statement.
-     @param   trace    trace context
-     @param   query    query
-     @param   length   query's length
-     @param   charset  charset which was used to encode this query
-     @retval  false    ok
-     @retval  true     error
-  */
-static inline bool opt_trace_set_query(Opt_trace_context *trace,
-                                       const char *query,
-                                       size_t query_length,
-                                       CHARSET_INFO *query_charset)
-{ return trace->set_query(query, query_length, query_charset); }
+/**
+   Set the "original" query (not transformed, as sent by client) for the
+   current trace.
+   @param   trace    trace context
+   @param   query    query
+   @param   length   query's length
+   @param   charset  charset which was used to encode this query
+   @retval  false    ok
+   @retval  true     error
+
+   @todo make the charset parameter "const CHARSET_INFO *". I have a patch
+   for this, but will push it later because it has "consequences" in many
+   other files (by "const contamination", it affects String, then Field,
+   Item, charset code, 300 kB unified diff).
+*/
+bool opt_trace_set_query(Opt_trace_context *trace, const char *query,
+                         size_t query_length, CHARSET_INFO *query_charset);
 
 /**
    Fills information_schema.OPTIMIZER_TRACE with rows (one per trace)
@@ -1317,7 +1222,6 @@ static inline bool opt_trace_set_query(O
 */
 int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *cond);
 
-struct st_schema_table;
 /**
    Create fields' descriptions of information_schema.OPTIMIZER_TRACE
    @retval 0 ok
@@ -1338,11 +1242,11 @@ class Opt_trace_context
 public:
   /// We need this enum even if tracing is disabled
   enum feature_value {
-    GREEDY_SEARCH= 1,
-    RANGE_OPTIMIZER= 2,
-    DYNAMIC_RANGE= 4,
-    REPEATED_SUBSELECT= 8,
-    MISC= 128
+    GREEDY_SEARCH= (1 << 0),
+    RANGE_OPTIMIZER= (1 << 1),
+    DYNAMIC_RANGE= (1 << 2),
+    REPEATED_SUBSELECT= (1 << 3),
+    MISC= (1 << 7)
   };
 };
 
@@ -1352,10 +1256,12 @@ class Opt_trace_object
 public:
   Opt_trace_object(Opt_trace_context *ctx, const char *key,
                    Opt_trace_context::feature_value feature=
-                   Opt_trace_context::MISC) {}
+                   Opt_trace_context::MISC)
+  {}
   Opt_trace_object(Opt_trace_context *ctx,
                    Opt_trace_context::feature_value feature=
-                   Opt_trace_context::MISC) {}
+                   Opt_trace_context::MISC)
+  {}
   Opt_trace_object& add_alnum(const char *key, const char *value)
   { return *this; }
   Opt_trace_object& add_utf8(const char *key,
@@ -1370,7 +1276,8 @@ public:
   Opt_trace_object& add(const char *key, longlong value) { return *this; }
   Opt_trace_object& add(const char *key, ulonglong value) { return *this; }
   Opt_trace_object& add(const char *key, double value) { return *this; }
-  void end(void) {}
+  Opt_trace_object& add_utf8_table(TABLE *tab) { return *this; }
+  void end() {}
 };
 
 /** Empty implementation used when optimizer trace is not compiled in */
@@ -1379,10 +1286,12 @@ class Opt_trace_array
 public:
   Opt_trace_array(Opt_trace_context *ctx, const char *key,
                   Opt_trace_context::feature_value feature=
-                  Opt_trace_context::MISC) {}
+                  Opt_trace_context::MISC)
+  {}
   Opt_trace_array(Opt_trace_context *ctx,
                   Opt_trace_context::feature_value feature=
-                  Opt_trace_context::MISC) {}
+                  Opt_trace_context::MISC)
+  {}
   Opt_trace_array& add_alnum(const char *value) { return *this; }
   Opt_trace_array& add_utf8(const char *value, size_t val_length)
   { return *this; }
@@ -1395,7 +1304,8 @@ public:
   Opt_trace_array& add(longlong value) { return *this; }
   Opt_trace_array& add(ulonglong value) { return *this; }
   Opt_trace_array& add(double value) { return *this; }
-  void end(void) {}
+  Opt_trace_array& add_utf8_table(TABLE *tab) { return *this; }
+  void end() {}
 };
 
 /** Empty implementation used when optimizer trace is not compiled in */
@@ -1431,21 +1341,15 @@ public:
    @param from           description of the before-transformation state
    @param to             description of the after-transformation state
 */
-#define OPT_TRACE_TRANSFORM(trace,object_level0,object_level1,select_number,from,to) \
+#define OPT_TRACE_TRANSFORM(trace,object_level0,object_level1,          \
+                            select_number,from,to)                      \
   Opt_trace_object object_level0(trace);                                \
   Opt_trace_object object_level1(trace, "transformation");              \
   opt_trace_add_select_number(&object_level1, select_number);           \
   object_level1.add_alnum("from", from).add_alnum("to", to);
 
-/**
-   Helper to put the database/table name in the trace
-   @param  t  TABLE* pointer
-*/
-#define add_utf8_table(t)                                               \
-  add_utf8("database", (t)->s->db.str, (t)->s->db.length).              \
-  add_utf8("table", (t)->alias)
-
-#if !defined(DBUG_OFF) && !defined(OPTIMIZER_TRACE)
+#if !defined(DBUG_OFF) && !defined(OPTIMIZER_TRACE) \
+ && !defined(OPTIMIZER_TRACE_UNITTEST)
 
 /*
   A debug binary without optimizer trace compiled in, will miss some

=== modified file 'sql/opt_trace2server.cc'
--- a/sql/opt_trace2server.cc	2011-01-12 13:44:58 +0000
+++ b/sql/opt_trace2server.cc	2011-02-15 20:53:19 +0000
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
 
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
-   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA  */
 
 /**
    @file
@@ -19,20 +19,20 @@
    Helpers connecting the optimizer trace to THD or Information Schema. They
    are dedicated "to the server" (hence the file's name).
    In order to create a unit test of the optimizer trace without defining
-   Item_field (and all its parent classes), st_select_lex..., those helpers
-   are defined out of opt_trace.cc.
+   Item_field (and all its parent classes), st_select_lex..., these helpers
+   are defined in opt_trace.cc.
 */
 
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation				// gcc: Class implementation
-#endif
-
 #include "opt_trace.h"
 #include "sql_show.h"  // schema_table_stored_record()
 #include "sql_parse.h" // sql_command_flags
 
 #ifdef OPTIMIZER_TRACE
 
+namespace {
+
+const char I_S_table_name[]= "OPTIMIZER_TRACE";
+
 /* Standalone functions */
 
 /**
@@ -43,9 +43,8 @@
    OPTIMIZER_TRACE will overwrite OPTIMIZER_TRACE as it runs and provide
    uninteresting info.
 */
-static bool list_has_optimizer_trace_table(const TABLE_LIST *tbl)
+bool list_has_optimizer_trace_table(const TABLE_LIST *tbl)
 {
-  static const char I_S_table_name[]= "OPTIMIZER_TRACE";
   for( ; tbl ; tbl= tbl->next_global)
   {
     if (tbl->schema_table &&
@@ -60,8 +59,7 @@ static bool list_has_optimizer_trace_tab
    Whether a SQL command qualifies for optimizer tracing.
    @param  sql_command  the command
 */
-static inline bool sql_command_can_be_traced
-(enum enum_sql_command sql_command)
+inline bool sql_command_can_be_traced(enum enum_sql_command sql_command)
 {
   /*
     Tracing is limited to a few SQL commands only.
@@ -71,7 +69,7 @@ static inline bool sql_command_can_be_tr
     - they probably don't have anything interesting optimizer-related
     - select_lex for them might be uninitialized and unprintable.
     - SHOW WARNINGS would create an uninteresting trace and thus overwrite the
-    previous interesting one.
+      previous interesting one.
 
     About prepared statements: note that we don't turn on tracing for
     SQLCOM_PREPARE (respectively SQLCOM_EXECUTE), because we don't know yet what
@@ -86,6 +84,7 @@ static inline bool sql_command_can_be_tr
   return (sql_command_flags[sql_command] & CF_OPTIMIZER_TRACE);
 }
 
+} // namespace
 
 bool opt_trace_start(THD *thd, const TABLE_LIST *tbl,
                      enum enum_sql_command sql_command)
@@ -96,35 +95,72 @@ bool opt_trace_start(THD *thd, const TAB
     We need an optimizer trace:
     * if the user asked for it or
     * if we are using --debug (because the trace serves as a relay for it, for
-    optimizer debug printouts).
-    Additionally, we should not be tracing if:
+      optimizer debug printouts).
+    Additionally, we should *not* be tracing if:
     * command is not interesting (optimizer-wise)
     * query involves I_S.OPTIMIZER_TRACE (we do not want to overwrite the
-    trace while reading it with SELECT).
+      trace while reading it with SELECT).
   */
-  ulonglong var= thd->variables.optimizer_trace;
-  bool need_it_for_I_S= (var & Opt_trace_context::FLAG_ENABLED);
-  bool need_it_for_dbug= false, allocated_here= false;
+  const ulonglong var= thd->variables.optimizer_trace;
+  enum enum_support_I_S support_I_S= (var & Opt_trace_context::FLAG_ENABLED) ?
+    YES_FOR_THIS : NO_FOR_THIS;
+  bool need_it_for_dbug= false;
+  bool allocated_here= false;
+
   /* This will be triggered if --debug or --debug=d:opt_trace is used */
   DBUG_EXECUTE("opt", need_it_for_dbug= true;);
-  if (!(need_it_for_I_S || need_it_for_dbug))
-    goto disable;
-  if (!sql_command_can_be_traced(sql_command))
-    goto disable;
-  if (list_has_optimizer_trace_table(tbl))
-    goto disable;
+  if (support_I_S != YES_FOR_THIS && !need_it_for_dbug)
+  {
+    // No need to create a trace for this statement.
+    DBUG_RETURN(false);
+  }
+  if (!sql_command_can_be_traced(sql_command) ||
+      list_has_optimizer_trace_table(tbl))
+  {
+    /*
+      The statement will not do tracing.
+
+      We still create a trace, because:
+      1) DBUG may need it
+      2) imagine there is a parent statement which has a trace, and we
+      don't create a trace for the child statement here. Then trace structures
+      of the child will be accidentally attached to the parent's trace (as
+      it is still 'current_stmt_in_gen', which constructors of
+      Opt_trace_struct will use); thus the child's trace will be visible (as a
+      chunk of the parent's trace). That would be incorrect. To avoid this, we
+      create a trace for the child but with I_S output disabled; this changes
+      'current_stmt_in_gen', thus this child's trace structures will be
+      attached to the child's trace and not be visible.
+
+      We also constraint its substatements to do no tracing. This is an extra
+      safety, to prevent against tracing happening in unexpected scenarios, in
+      commands which we normally think do no tracing. Assume that in the
+      future, ALTER TABLE would be able to call a stored function, like this:
+        ALTER TABLE t MODIFY COLUMN c DEFAULT stored_func(d)
+      ('d' being another column; to say that by default 'c' should
+      calculated from 'd'). This introduces a new code path, which may lead to
+      some incorrect JSON syntax in the trace.
+
+      With the constraint in place, the SELECT would not be traced.
+      This constraint forces us to enable trace for CALL because otherwise,
+      execution of a stored procedure would not be traced. Same for SQL PREPARE
+      and SQL EXECUTE.
+    */
+    support_I_S= NO_FOR_THIS_AND_CHILDREN;
+  }
+
   /*
     We don't allocate it in THD's MEM_ROOT as it must survive until a next
     statement (SELECT) reads the trace.
   */
   if (thd->opt_trace == NULL)
   {
-    if ((thd->opt_trace= new Opt_trace_context()) == NULL)
-      goto disable;
+    if ((thd->opt_trace= new(std::nothrow) Opt_trace_context) == NULL)
+      DBUG_RETURN(false);
     allocated_here= true;
   }
-start_trace:
-  if (thd->opt_trace->start(need_it_for_I_S,
+
+  if (thd->opt_trace->start(support_I_S,
                             (var & Opt_trace_context::FLAG_END_MARKER),
                             (var & Opt_trace_context::FLAG_ONE_LINE),
                             thd->variables.optimizer_trace_offset,
@@ -137,28 +173,9 @@ start_trace:
       delete thd->opt_trace;
       thd->opt_trace= NULL;
     }
-    goto disable;
+    DBUG_RETURN(false);
   }
   DBUG_RETURN(true); // started all ok
-disable:
-  /*
-    No need to create a trace for this statement.
-    Exception: imagine there is a parent statement which has a trace, and we
-    don't create a trace for the child statement here. Then trace structures of
-    the child will be accidentally attached to the parent's trace (as
-    it is still 'current_stmt_in_gen', which constructors of Opt_trace_struct
-    will use); thus the child's trace will be visible (as a chunk of the
-    parent's trace). That would be incorrect.
-    To avoid this, we create a trace for the child but with I_S output disabled;
-    this changes 'current_stmt_in_gen', thus this child's trace structures
-    will be attached to the child's trace and not be visible.
-  */
-  if (need_it_for_I_S && thd->opt_trace != NULL && thd->opt_trace->is_started())
-  {
-    need_it_for_I_S= false;
-    goto start_trace;
-  }
-  DBUG_RETURN(false);
 }
 
 
@@ -172,6 +189,7 @@ void opt_trace_end(THD *thd, bool starte
 
 
 void opt_trace_print_expanded_query(THD *thd, st_select_lex *select_lex)
+
 {
   Opt_trace_context * const trace= thd->opt_trace;
   if (likely(trace == NULL || !trace->is_started()))
@@ -180,9 +198,8 @@ void opt_trace_print_expanded_query(THD 
   String str(buff,(uint32) sizeof(buff), system_charset_info);
   str.length(0);
   /*
-    If this statement is not SELECT, what is shown here can be
-    inexact. INSERT SELECT is shown as SELECT. DELETE WHERE is shown
-    as SELECT WHERE.
+    If this statement is not SELECT, what is shown here can be inexact.
+    INSERT SELECT is shown as SELECT. DELETE WHERE is shown as SELECT WHERE.
     This is acceptable given the audience (developers) and the goal (the
     inexact parts are irrelevant for the optimizer).
   */
@@ -196,7 +213,7 @@ void opt_trace_add_select_number(Opt_tra
 {
   if (unlikely(select_number >= INT_MAX))
   {
-    /* clearer than any huge number */
+    // Clearer than any huge number.
     s->add_alnum("select#", "fake");
   }
   else
@@ -204,6 +221,13 @@ void opt_trace_add_select_number(Opt_tra
 }
 
 
+bool opt_trace_set_query(Opt_trace_context *trace, const char *query,
+                         size_t query_length, CHARSET_INFO *query_charset)
+{
+  return trace->set_query(query, query_length, query_charset);
+}
+
+
 int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *cond)
 {
 #ifdef OPTIMIZER_TRACE
@@ -211,15 +235,14 @@ int fill_optimizer_trace_info(THD *thd, 
   if (thd->opt_trace != NULL)
   {
     TABLE *table= tables->table;
+    Opt_trace_info info;
     /*
       The list must not change during the iterator's life time. This is ok as
       the life time is only the present block which cannot change the list.
     */
-    for (Opt_trace_iterator it(thd->opt_trace) ;
-         it != Opt_trace_iterator::end ;
-         it++)
+    for (Opt_trace_iterator it(thd->opt_trace) ; !it.at_end() ; it.next())
     {
-      const Opt_trace_info info= *it;
+      it.get_value(&info);
       restore_record(table, s->default_values);
       /*
         We will put the query, which is in character_set_client, into a column
@@ -227,8 +250,10 @@ int fill_optimizer_trace_info(THD *thd, 
         When literals with introducers are used, see "LiteralsWithIntroducers"
         in this file.
       */
-      table->field[0]->store(info.query_ptr, info.query_length, info.query_charset);
-      table->field[1]->store(info.trace_ptr, info.trace_length, system_charset_info);
+      table->field[0]->store(info.query_ptr, info.query_length,
+                             info.query_charset);
+      table->field[1]->store(info.trace_ptr, info.trace_length,
+                             system_charset_info);
       table->field[2]->store(info.missing_bytes, true);
       table->field[3]->store(info.malloc_error, true);
       if (schema_table_store_record(thd, table))
@@ -248,24 +273,22 @@ int fill_optimizer_trace_info(THD *thd, 
 ST_FIELD_INFO optimizer_trace_info[]=
 {
   /* name, length, type, value, maybe_null, old_name, open_method */
-  {"QUERY", 65535, MYSQL_TYPE_STRING, 0, false, "", SKIP_OPEN_TABLE},
-  {"TRACE", 65535, MYSQL_TYPE_STRING, 0, false, 0, SKIP_OPEN_TABLE},
+  {"QUERY", 65535, MYSQL_TYPE_STRING, 0, false, NULL, SKIP_OPEN_TABLE},
+  {"TRACE", 65535, MYSQL_TYPE_STRING, 0, false, NULL, SKIP_OPEN_TABLE},
   {"MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 20, MYSQL_TYPE_LONG,
-   0, false, "", SKIP_OPEN_TABLE},
-  {"OS_MALLOC_ERROR", 1, MYSQL_TYPE_TINY, 0, false, "", SKIP_OPEN_TABLE},
+   0, false, NULL, SKIP_OPEN_TABLE},
+  {"OS_MALLOC_ERROR", 1, MYSQL_TYPE_TINY, 0, false, NULL, SKIP_OPEN_TABLE},
   {NULL, 0,  MYSQL_TYPE_STRING, 0, true, NULL, 0}
 };
 
 
 int make_optimizer_trace_table_for_show(THD *thd, ST_SCHEMA_TABLE *schema_table)
 {
-  ST_FIELD_INFO *field_info;
   Name_resolution_context *context= &thd->lex->select_lex.context;
-  int i;
 
-  for (i= 0; schema_table->fields_info[i].field_name != NULL; i++)
+  for (int i= 0; schema_table->fields_info[i].field_name != NULL; i++)
   {
-    field_info= &schema_table->fields_info[i];
+    ST_FIELD_INFO *field_info= &schema_table->fields_info[i];
     Item_field *field= new Item_field(context,
                                       NullS, NullS, field_info->field_name);
     if (field)

=== modified file 'sql/sql_array.h'
--- a/sql/sql_array.h	2010-07-13 17:29:44 +0000
+++ b/sql/sql_array.h	2011-02-15 17:19:30 +0000
@@ -36,10 +36,19 @@ public:
     my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment);
   }
 
+  /**
+     @note Though formally this could be declared "const" it would be
+     misleading at it returns a non-const pointer to array's data.
+  */
   Elem& at(int idx)
   {
     return *(((Elem*)array.buffer) + idx);
   }
+  /// Const variant of at(), which cannot change data
+  const Elem& at(int idx) const
+  {
+    return *(((Elem*)array.buffer) + idx);
+  }
 
   Elem *front()
   {
@@ -51,12 +60,22 @@ public:
     return ((Elem*)array.buffer) + array.elements;
   }
 
-  bool append(Elem &el)
+  bool append(const Elem &el)
   {
-    return (insert_dynamic(&array, (uchar*)&el));
+    return insert_dynamic(&array, &el);
   }
 
-  int elements()
+  Elem& pop()
+  {
+    return *static_cast<Elem *>(pop_dynamic(&array));
+  }
+
+  void del(uint idx)
+  {
+    delete_dynamic_element(&array, idx);
+  }
+
+  int elements() const
   {
     return array.elements;
   }
@@ -65,6 +84,17 @@ public:
   {
     array.elements= 0;
   }
+  /**
+    @returns whether the array guarantees that it contains free cells already
+    successfully allocated from memory.
+    @param required  caller requires this amount of free allocated cells
+    Caller can use this to make sure that its @c append() calls will succeed,
+    not fail due to out-of-memory.
+  */
+  bool guaranteed_room(uint required) const
+  {
+    return (array.max_element - array.elements) >= required;
+  }
 
   ~Dynamic_array()
   {

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2010-12-19 14:24:03 +0000
+++ b/sql/sql_parse.cc	2011-02-15 17:19:30 +0000
@@ -397,15 +397,23 @@ void init_update_queries(void)
   sql_command_flags[SQLCOM_INSTALL_PLUGIN]=    CF_CHANGES_DATA;
   sql_command_flags[SQLCOM_UNINSTALL_PLUGIN]=  CF_CHANGES_DATA;
 
+  // (1): without it, a procedure's substatements would not be traced.
+  sql_command_flags[SQLCOM_CALL]=      CF_REEXECUTION_FRAGILE |
+                                       CF_CAN_GENERATE_ROW_EVENTS |
+                                       CF_OPTIMIZER_TRACE; // (1)
   /*
-    The following is used to preserver CF_ROW_COUNT during the
-    a CALL or EXECUTE statement, so the value generated by the
-    last called (or executed) statement is preserved.
-    See mysql_execute_command() for how CF_ROW_COUNT is used.
+    (1): without it, the executed statement would not be traced. Execution of
+    SQLCOM_EXECUTE calls mysql_execute_command() on executed command, which
+    will check whether that executed command can actually be traced.
   */
-  sql_command_flags[SQLCOM_CALL]=      CF_REEXECUTION_FRAGILE |
-                                       CF_CAN_GENERATE_ROW_EVENTS;
-  sql_command_flags[SQLCOM_EXECUTE]=   CF_CAN_GENERATE_ROW_EVENTS;
+  sql_command_flags[SQLCOM_EXECUTE]=   CF_CAN_GENERATE_ROW_EVENTS |
+                                       CF_OPTIMIZER_TRACE; // (1)
+  /*
+    (1): without it, the prepared statement would not be traced.
+    check_prepared_statement() will check whether prepared command can
+    actually be traced.
+  */
+  sql_command_flags[SQLCOM_PREPARE]=   CF_OPTIMIZER_TRACE; // (1)
 
   /*
     The following admin table operations are allowed
@@ -5137,7 +5145,7 @@ bool check_some_routine_access(THD *thd,
 }
 
 
-/*
+/**
   Check if the given table has any of the asked privileges
 
   @param thd		 Thread handler

=== modified file 'sql/sys_vars.cc'
--- a/sql/sys_vars.cc	2011-01-14 13:42:35 +0000
+++ b/sql/sys_vars.cc	2011-02-15 20:53:19 +0000
@@ -1437,7 +1437,7 @@ static Sys_var_flagset Sys_optimizer_tra
        " and val is one of {on, off, default}",
        SESSION_VAR(optimizer_trace_features), CMD_LINE(REQUIRED_ARG),
        Opt_trace_context::feature_names,
-       DEFAULT(Opt_trace_context::FEATURES_DEFAULT));
+       DEFAULT(Opt_trace_context::default_features));
 
 /** Delete all old optimizer traces */
 static bool optimizer_trace_update(sys_var *self, THD *thd,

=== modified file 'unittest/gunit/CMakeLists.txt'
--- a/unittest/gunit/CMakeLists.txt	2010-09-18 16:25:43 +0000
+++ b/unittest/gunit/CMakeLists.txt	2011-01-11 08:15:46 +0000
@@ -207,7 +207,7 @@ IF (CMAKE_CXX_COMPILER_ID STREQUAL "SunP
 ENDIF()
 
 # Add tests (link them with sql library) 
-SET(TESTS sql_list mdl mdl_mytap my_regex thread_utils opt_trace)
+SET(TESTS sql_list mdl mdl_mytap my_regex thread_utils opt_trace opt_notrace)
 FOREACH(test ${TESTS})
   ADD_EXECUTABLE(${test}-t ${test}-t.cc)
   TARGET_LINK_LIBRARIES(${test}-t gunit sqlgunitlib strings dbug regex)

=== added file 'unittest/gunit/opt_notrace-t.cc'
--- a/unittest/gunit/opt_notrace-t.cc	1970-01-01 00:00:00 +0000
+++ b/unittest/gunit/opt_notrace-t.cc	2011-01-11 08:15:46 +0000
@@ -0,0 +1,21 @@
+#include "my_config.h"
+#include <gtest/gtest.h>
+
+#if defined(OPTIMIZER_TRACE)
+#undef OPTIMIZER_TRACE
+#endif
+#define OPTIMIZER_TRACE_UNITTEST
+
+#include <opt_trace.h>
+
+namespace {
+
+TEST(Foo, Bar)
+{
+  // Fill in more here, to verify that implementations are in sync!!!
+  Opt_trace_context trace;
+  Opt_trace_object oto(&trace);
+  Opt_trace_array  ota(&trace);
+}
+
+}

=== modified file 'unittest/gunit/opt_trace-t.cc'
--- a/unittest/gunit/opt_trace-t.cc	2010-12-30 16:17:40 +0000
+++ b/unittest/gunit/opt_trace-t.cc	2011-02-15 20:53:19 +0000
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
 
    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
-   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA  */
 
 /**
    @file
@@ -25,12 +25,22 @@
 
 #ifdef OPTIMIZER_TRACE
 
-/* Some minimal working implementations to have a working trace... */
 CHARSET_INFO *system_charset_info= &my_charset_utf8_general_ci;
-/* ... end here. */
 
-const ulonglong all_features= Opt_trace_context::FEATURES_DEFAULT;
+namespace {
 
+const ulonglong all_features= Opt_trace_context::default_features;
+
+/**
+   @note It is a macro, for proper reporting of line numbers in case of
+   assertion failure. SCOPED_TRACE will report line number at the
+   macro expansion site.
+*/
+#define check_json_compliance(str, length)              \
+  {                                                     \
+    SCOPED_TRACE("");                                   \
+    do_check_json_compliance(str, length);              \
+  }
 
 /**
    Checks compliance of a trace with JSON syntax rules.
@@ -41,13 +51,14 @@ const ulonglong all_features= Opt_trace_
    @param  str     pointer to trace
    @param  length  trace's length
 */
-void check_json_compliance(const char *str, size_t length)
+void do_check_json_compliance(const char *str, size_t length)
 {
-#if 0
+  return;
   /*
     Read from stdin, eliminate comments, parse as JSON. If invalid, an exception
     is thrown by Python, uncaught, which produces a non-zero error code.
   */
+#ifndef __WIN__
   const char python_cmd[]=
     "python -c \""
     "import json, re, sys;"
@@ -56,22 +67,31 @@ void check_json_compliance(const char *s
     "json.loads(s, 'utf-8')\"";
   // Send the trace to this new process' stdin:
   FILE *fd= popen(python_cmd, "w");
-  ASSERT_NE((void*)NULL, fd);
-  ASSERT_EQ((size_t)1, fwrite(str, length, 1, fd));
+  ASSERT_TRUE(NULL != fd);
+  ASSERT_EQ(1U, fwrite(str, length, 1, fd));
   int rc= pclose(fd);
   rc= WEXITSTATUS(rc);
-  ASSERT_EQ(0, rc);
+  EXPECT_EQ(0, rc);
 #endif
 }
 
+class TraceContentTest : public ::testing::Test
+{
+public:
+  Opt_trace_context trace;
+};
+
+
+TEST_F(TraceContentTest, ConstructAndDestruct)
+{
+}
+
 
 /** Test empty trace */
-TEST(Trace_content_test, empty)
+TEST_F(TraceContentTest, Empty)
 {
-  /* Create a trace */
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, false, false, -1, 1, ULONG_MAX,
-                               all_features));
+  ASSERT_FALSE(trace.start(YES_FOR_THIS, false, false, -1, 1, ULONG_MAX,
+                           all_features));
   /*
     Add at least an object to it. A really empty trace ("") is not
     JSON-compliant, at least Python's JSON module raises an exception.
@@ -83,26 +103,30 @@ TEST(Trace_content_test, empty)
   trace.end();
   /* And verify trace's content */
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  /*
+    ASSERT here, because a failing EXPECT_FALSE would continue into
+    it.get_value() and segfault.
+  */
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]= "{\n}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
   check_json_compliance(info.trace_ptr, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_FALSE(info.malloc_error);
   /* Should be no more traces */
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /** Test normal usage */
-TEST(Trace_content_test, normal_usage)
+TEST_F(TraceContentTest, NormalUsage)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
-                               all_features));
+  ASSERT_FALSE(trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
+                           all_features));
   {
     Opt_trace_object oto(&trace);
     {
@@ -124,8 +148,9 @@ TEST(Trace_content_test, normal_usage)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
@@ -145,25 +170,24 @@ TEST(Trace_content_test, normal_usage)
     "    4\n"
     "  ] /* another array */\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
   check_json_compliance(info.trace_ptr, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /**
-   Test Opt_trace_context::get_tail(). Same as Trace_content_test.normal_usage
+   Test Opt_trace_context::get_tail(). Same as TraceContentTest.NormalUsage
    but with a get_tail() in the middle.
 */
-TEST(Trace_content_test, tail)
+TEST_F(TraceContentTest, Tail)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
-                               all_features));
+  ASSERT_FALSE(trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
+                           all_features));
   {
     Opt_trace_object oto(&trace);
     {
@@ -186,8 +210,9 @@ TEST(Trace_content_test, tail)
   const char *tail= trace.get_tail(40);
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
@@ -207,24 +232,23 @@ TEST(Trace_content_test, tail)
     "    4\n"
     "  ] /* another array */\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
-  ASSERT_EQ((void *)0, info.query_ptr);
-  ASSERT_EQ((size_t)0, info.query_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  ASSERT_EQ(0, strcmp(expected + sizeof(expected) - 1 - 40, tail));
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_EQ((void *)0, info.query_ptr);
+  EXPECT_EQ(0U, info.query_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  EXPECT_EQ(0, strcmp(expected + sizeof(expected) - 1 - 40, tail));
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /** Test reaction to malformed JSON (object with value without key) */
-TEST(Trace_content_test, buggy_object)
+TEST_F(TraceContentTest, BuggyObject)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
-                               all_features));
+  ASSERT_FALSE(trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
+                           all_features));
   {
     Opt_trace_object oto(&trace);
     {
@@ -247,14 +271,15 @@ TEST(Trace_content_test, buggy_object)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
     "    200.4,\n"
-    "    {\n"
-    "      ** invalid JSON (missing key) ** \"?\": \"one value\"\n"
+    "    {** invalid JSON (missing key) ** \n"
+    "      \"?\": \"one value\"\n"
     "    },\n"
     "    \"one string element\",\n"
     "    true\n"
@@ -267,20 +292,19 @@ TEST(Trace_content_test, buggy_object)
     "    4\n"
     "  ] /* another array */\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /** Test reaction to malformed JSON (array with value with key) */
-TEST(Trace_content_test, buggy_array)
+TEST_F(TraceContentTest, BuggyArray)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
                                all_features));
   {
     Opt_trace_object oto(&trace);
@@ -298,12 +322,13 @@ TEST(Trace_content_test, buggy_array)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
-    "  \"one array\": [\n"
-    "    ** invalid JSON (unexpected key \"superfluous key\") ** 200.4\n"
+    "  \"one array\": [** invalid JSON (unexpected key \"superfluous key\") ** \n"
+    "    200.4\n"
     "  ] /* one array */,\n"
     "  \"yet another key\": -1000,\n"
     "  \"another array\": [\n"
@@ -313,21 +338,20 @@ TEST(Trace_content_test, buggy_array)
     "    4\n"
     "  ] /* another array */\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /** Test Opt_trace_disable_I_S */
-TEST(Trace_content_test, disable_I_S)
+TEST_F(TraceContentTest, DisableIS)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
-                               all_features));
+  ASSERT_FALSE(trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
+                           all_features));
   {
     Opt_trace_object oto(&trace);
     {
@@ -357,8 +381,9 @@ TEST(Trace_content_test, disable_I_S)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
@@ -378,121 +403,123 @@ TEST(Trace_content_test, disable_I_S)
     "    4\n"
     "  ] /* another array */\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
   check_json_compliance(info.trace_ptr, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 /** Helper for Trace_settings_test.offset */
-static void make_one_trace(Opt_trace_context &trace, const char *name,
-                           long offset, long limit)
+void make_one_trace(Opt_trace_context *trace, const char *name,
+                    long offset, long limit)
 {
-  ASSERT_EQ(false, trace.start(true, true, false, offset, limit, ULONG_MAX,
-                               all_features));
+  ASSERT_FALSE(trace->start(YES_FOR_THIS, true, false, offset, limit,
+                            ULONG_MAX, all_features));
   {
-    Opt_trace_object oto(&trace);
+    Opt_trace_object oto(trace);
     oto.add(name, 0LL);
   }
-  trace.end();
+  trace->end();
 }
 
 
 /**
    Helper for Trace_settings_test.offset
 
-   @param  names  a NULL-terminated array of "names"
+   @param  trace  The trace context.
+   @param  names  A NULL-terminated array of "names".
 
    Checks that the list of traces is as expected.
    This macro checks that the first trace contains names[0], that the second
    trace contains names[1], etc. That the number of traces is the same as
    the number of elements in "names".
 
-   @note it is a macro, for proper reporting of line numbers in case of
-   assertion failure (if it were a function, the line number in the function
-   would be reported, which is not useful).
+   @note It is a macro, for proper reporting of line numbers in case of
+   assertion failure. SCOPED_TRACE will report line number at the
+   macro expansion site.
 */
-#define check(trace, names) do                                    \
-  {                                                               \
-    Opt_trace_iterator it(&trace);                                  \
-    Opt_trace_info info;                                            \
-    for (const char **name= names ; *name != NULL ; name++)       \
-    {                                                             \
-      ASSERT_EQ(true, it != Opt_trace_iterator::end);                 \
-      info= *it;                                                    \
-      const size_t name_len= strlen(*name);                         \
-      ASSERT_EQ(name_len + 11, info.trace_length);                   \
-      ASSERT_EQ(0, strncmp(info.trace_ptr + 5, *name, name_len));         \
-      ASSERT_EQ((size_t)0, info.missing_bytes);                     \
-      ASSERT_EQ(false, info.malloc_error);                          \
-      it++;                                                         \
-    }                                                             \
-    ASSERT_EQ(false, it != Opt_trace_iterator::end);               \
-  } while (0)
+#define check(trace, names) { SCOPED_TRACE(""); do_check(&trace, names); }
+
+void do_check(Opt_trace_context *trace, const char**names)
+{
+  Opt_trace_iterator it(trace);
+  Opt_trace_info info;
+  for (const char **name= names ; *name != NULL ; name++)
+  {
+    ASSERT_FALSE(it.at_end());
+    it.get_value(&info);
+    const size_t name_len= strlen(*name);
+    EXPECT_EQ(name_len + 11, info.trace_length);
+    EXPECT_EQ(0, strncmp(info.trace_ptr + 5, *name, name_len));
+    EXPECT_EQ(0U, info.missing_bytes);
+    EXPECT_EQ(false, info.malloc_error);
+    it.next();
+  }
+  ASSERT_TRUE(it.at_end());
+}
 
 
 /** Test offset/limit variables */
-TEST(Trace_settings_test, offset)
+TEST(TraceSettingsTest, Offset)
 {
   Opt_trace_context trace;
-  make_one_trace(trace, "100", -1 /* offset */, 1 /* limit */);
+  make_one_trace(&trace, "100", -1 /* offset */, 1 /* limit */);
   const char *expected_traces0[]= {"100", NULL};
   check(trace, expected_traces0);
-  make_one_trace(trace, "101", -1, 1);
+  make_one_trace(&trace, "101", -1, 1);
   /* 101 should have overwritten 100 */
   const char *expected_traces1[]= {"101", NULL};
   check(trace, expected_traces1);
-  make_one_trace(trace, "102", -1, 1);
+  make_one_trace(&trace, "102", -1, 1);
   const char *expected_traces2[]= {"102", NULL};
   check(trace, expected_traces2);
-  check(trace, expected_traces2);
   trace.reset();
   const char *expected_traces_empty[]= {NULL};
   check(trace, expected_traces_empty);
-  make_one_trace(trace, "103", -3, 2);
-  make_one_trace(trace, "104", -3, 2);
-  make_one_trace(trace, "105", -3, 2);
-  make_one_trace(trace, "106", -3, 2);
-  make_one_trace(trace, "107", -3, 2);
-  make_one_trace(trace, "108", -3, 2);
-  make_one_trace(trace, "109", -3, 2);
+  make_one_trace(&trace, "103", -3, 2);
+  make_one_trace(&trace, "104", -3, 2);
+  make_one_trace(&trace, "105", -3, 2);
+  make_one_trace(&trace, "106", -3, 2);
+  make_one_trace(&trace, "107", -3, 2);
+  make_one_trace(&trace, "108", -3, 2);
+  make_one_trace(&trace, "109", -3, 2);
   const char *expected_traces3[]= {"107", "108", NULL};
   check(trace, expected_traces3);
   trace.reset();
   check(trace, expected_traces_empty);
-  make_one_trace(trace, "110", 3, 2);
-  make_one_trace(trace, "111", 3, 2);
-  make_one_trace(trace, "112", 3, 2);
-  make_one_trace(trace, "113", 3, 2);
-  make_one_trace(trace, "114", 3, 2);
-  make_one_trace(trace, "115", 3, 2);
-  make_one_trace(trace, "116", 3, 2);
+  make_one_trace(&trace, "110", 3, 2);
+  make_one_trace(&trace, "111", 3, 2);
+  make_one_trace(&trace, "112", 3, 2);
+  make_one_trace(&trace, "113", 3, 2);
+  make_one_trace(&trace, "114", 3, 2);
+  make_one_trace(&trace, "115", 3, 2);
+  make_one_trace(&trace, "116", 3, 2);
   const char *expected_traces10[]= {"113", "114", NULL};
   check(trace, expected_traces10);
   trace.reset();
   check(trace, expected_traces_empty);
-  make_one_trace(trace, "117", 0, 1);
-  make_one_trace(trace, "118", 0, 1);
-  make_one_trace(trace, "119", 0, 1);
+  make_one_trace(&trace, "117", 0, 1);
+  make_one_trace(&trace, "118", 0, 1);
+  make_one_trace(&trace, "119", 0, 1);
   const char *expected_traces17[]= {"117", NULL};
   check(trace, expected_traces17);
   trace.reset();
-  make_one_trace(trace, "120", 0, 0);
-  make_one_trace(trace, "121", 0, 0);
-  make_one_trace(trace, "122", 0, 0);
+  make_one_trace(&trace, "120", 0, 0);
+  make_one_trace(&trace, "121", 0, 0);
+  make_one_trace(&trace, "122", 0, 0);
   const char *expected_traces20[]= {NULL};
   check(trace, expected_traces20);
 }
 
 
 /** Test truncation by max_mem_size */
-TEST(Trace_settings_test, max_mem_size)
+TEST(TraceSettingsTest, MaxMemSize)
 {
   Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, false, false, -1, 1,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -1, 1,
                                1000 /* max_mem_size */, all_features));
   /* make a "long" trace */
   {
@@ -505,8 +532,9 @@ TEST(Trace_settings_test, max_mem_size)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
@@ -517,21 +545,21 @@ TEST(Trace_settings_test, max_mem_size)
     Without truncation the trace would take:
     2+17+3+1+20*100 = 2023
   */
-  ASSERT_EQ((size_t)996, info.trace_length);
-  ASSERT_EQ((size_t)1027, info.missing_bytes); // 996+1027=2023
-  ASSERT_EQ(false, info.malloc_error);
-  ASSERT_EQ(0, strncmp(expected, info.trace_ptr, sizeof(expected) - 1));
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(996U, info.trace_length);
+  EXPECT_EQ(1027U, info.missing_bytes); // 996+1027=2023
+  EXPECT_EQ(false, info.malloc_error);
+  EXPECT_EQ(0, strncmp(expected, info.trace_ptr, sizeof(expected) - 1));
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 
 /** Test how truncation by max_mem_size affects next traces */
-TEST(Trace_settings_test, max_mem_size2)
+TEST(TraceSettingsTest, MaxMemSize2)
 {
   Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, false, false, -2, 2,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -2, 2,
                                21 /* max_mem_size */, all_features));
   /* make a "long" trace */
   {
@@ -540,97 +568,100 @@ TEST(Trace_settings_test, max_mem_size2)
   }
   trace.end();
   /* A second similar trace */
-  ASSERT_EQ(false, trace.start(true, false, false, -2, 2, 21, all_features));
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -2, 2, 21,
+                               all_features));
   {
     Opt_trace_object oto(&trace);
     oto.add_alnum("some key2", "make it long");
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  Opt_trace_info info= *it;
-  ASSERT_EQ((size_t)17, info.trace_length);
-  ASSERT_EQ((size_t)16, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
+  EXPECT_EQ(17U, info.trace_length);
+  EXPECT_EQ(16U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_FALSE(it.at_end());
+  it.get_value(&info);
   /* 2nd trace completely empty as first trace left no room */
-  ASSERT_EQ((size_t)0, info.trace_length);
-  ASSERT_EQ((size_t)33, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(0U, info.trace_length);
+  EXPECT_EQ(33U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
   /*
     3rd trace; the first one should automatically be purged, thus the 3rd
     should have a bit of room.
   */
-  ASSERT_EQ(false, trace.start(true, false, false, -2, 2, 21, all_features));
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -2, 2, 21,
+                               all_features));
   {
     Opt_trace_object oto(&trace);
     oto.add_alnum("some key3", "make it long");
   }
   trace.end();
   Opt_trace_iterator it2(&trace);
-  ASSERT_EQ(true, it2 != Opt_trace_iterator::end);
-  info= *it2;
-  ASSERT_EQ((size_t)0, info.trace_length);
-  ASSERT_EQ((size_t)33, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it2++;
-  info= *it2;
+  ASSERT_FALSE(it2.at_end());
+  it2.get_value(&info);
+  EXPECT_EQ(0U, info.trace_length);
+  EXPECT_EQ(33U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it2.next();
+  it2.get_value(&info);
   /*
     3rd one had room. A bit less than first, because just reading the second
     with the iterator has reallocated the second from 0 to 8 bytes...
   */
-  ASSERT_EQ((size_t)14, info.trace_length);
-  ASSERT_EQ((size_t)19, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it2++;
-  ASSERT_EQ(false, it2 != Opt_trace_iterator::end);
+  EXPECT_EQ(14U, info.trace_length);
+  EXPECT_EQ(19U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it2.next();
+  ASSERT_TRUE(it2.at_end());
 }
 
 
 /** Test reaction to out-of-memory condition */
 #ifndef DBUG_OFF
-TEST(Trace_content_test, out_of_memory)
+TEST_F(TraceContentTest, OutOfMemory)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, false, false, -1, 1, ULONG_MAX,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -1, 1, ULONG_MAX,
                                all_features));
   {
     Opt_trace_object oto(&trace);
     {
       Opt_trace_array ota(&trace, "one array");
-      Opt_trace_stmt::simulate_oom= true;
+      Opt_trace_context::simulate_oom_in_buffers= true;
       ota.add(200.4);
-      Opt_trace_stmt::simulate_oom= false;
+      Opt_trace_context::simulate_oom_in_buffers= false;
     }
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
     "\n"                    // 200.4 missing
     "  ]\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(true, info.malloc_error);   // malloc error reported
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(true, info.malloc_error);   // malloc error reported
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 #endif // !DBUG_OFF
 
 
 /** Test filtering by feature */
-TEST(Trace_content_test, filtering_by_feature)
+TEST_F(TraceContentTest, FilteringByFeature)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, false, false, -1, 1, ULONG_MAX,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -1, 1, ULONG_MAX,
                                Opt_trace_context::MISC));
   {
     Opt_trace_object oto(&trace);
@@ -660,8 +691,9 @@ TEST(Trace_content_test, filtering_by_fe
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   const char expected[]=
     "{\n"
     "  \"one array\": [\n"
@@ -678,29 +710,28 @@ TEST(Trace_content_test, filtering_by_fe
     "    4\n"
     "  ]\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
   check_json_compliance(info.trace_ptr, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /** Test escaping of characters */
-TEST(Trace_content_test, escaping)
+TEST_F(TraceContentTest, Escaping)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
                                all_features));
   // All ASCII 0-127 chars are valid UTF8 encodings
   char all_chars[130];
   for (uint c= 0; c < sizeof(all_chars) - 2 ; c++)
     all_chars[c]= c;
   // Now a character with a two-byte code in utf8: ä
-  all_chars[128]= 0xc3;
-  all_chars[129]= 0xa4;
+  all_chars[128]= static_cast<char>(0xc3);
+  all_chars[129]= static_cast<char>(0xa4);
   // all_chars is used both as query...
   trace.set_query(all_chars, sizeof(all_chars), system_charset_info);
   {
@@ -710,32 +741,32 @@ TEST(Trace_content_test, escaping)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   // we get the trace escaped, JSON-compliant:
   const char expected[]=
     "{\n"
     "  \"somekey\": \"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\u0008\\t\\n\\u000b\\u000c\\r\\u000e\\u000f\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001f !\\\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~ä\"\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
   check_json_compliance(info.trace_ptr, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  ASSERT_EQ(sizeof(all_chars), info.query_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  EXPECT_EQ(sizeof(all_chars), info.query_length);
   // we get the query unescaped, verbatim, not 0-terminated:
-  ASSERT_EQ(0, memcmp(all_chars, info.query_ptr, sizeof(all_chars)));
-  ASSERT_EQ(system_charset_info, info.query_charset);
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(0, memcmp(all_chars, info.query_ptr, sizeof(all_chars)));
+  EXPECT_EQ(system_charset_info, info.query_charset);
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
 
 /** Test how the system handles non-UTF8 characters, a violation of its API */
-TEST(Trace_content_test, non_utf8)
+TEST_F(TraceContentTest, NonUtf8)
 {
-  Opt_trace_context trace;
-  ASSERT_EQ(false, trace.start(true, true, false, -1, 1, ULONG_MAX,
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, true, false, -1, 1, ULONG_MAX,
                                all_features));
   /*
     A string which starts with invalid utf8 (the four first bytes are éèÄà in
@@ -772,23 +803,91 @@ TEST(Trace_content_test, non_utf8)
   }
   trace.end();
   Opt_trace_iterator it(&trace);
-  ASSERT_EQ(true, it != Opt_trace_iterator::end);
-  const Opt_trace_info info= *it;
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
   // This is UTF8 and thus JSON-compliant; ABC is present
   const char expected[]=
     "{\n"
     "  \"somekey\": \"????ABC\"\n"
     "}";
-  ASSERT_STREQ(expected, info.trace_ptr);
-  ASSERT_EQ(sizeof(expected) - 1, info.trace_length);
+  EXPECT_STREQ(expected, info.trace_ptr);
+  EXPECT_EQ(sizeof(expected) - 1, info.trace_length);
   check_json_compliance(info.trace_ptr, info.trace_length);
-  ASSERT_EQ((size_t)0, info.missing_bytes);
-  ASSERT_EQ(false, info.malloc_error);
-  ASSERT_EQ(sizeof(all_chars), info.query_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  EXPECT_EQ(sizeof(all_chars), info.query_length);
   // we get the query unescaped, verbatim, not 0-terminated:
-  ASSERT_EQ(0, memcmp(all_chars, info.query_ptr, sizeof(all_chars)));
-  it++;
-  ASSERT_EQ(false, it != Opt_trace_iterator::end);
+  EXPECT_EQ(0, memcmp(all_chars, info.query_ptr, sizeof(all_chars)));
+  it.next();
+  ASSERT_TRUE(it.at_end());
 }
 
+
+void open_object(Opt_trace_context *trace)
+{
+  static int count= 100;
+  if (count == 0)
+    return;
+  count--;
+  Opt_trace_object oto(trace, "abc");
+  open_object(trace);
+}
+
+/**
+   Test indentation by many blanks.
+   By creating a 100-level deep structure, we force an indentation which
+   enters the while() block in Opt_trace_stmt::next_line().
+*/
+TEST_F(TraceContentTest, Indent)
+{
+  ASSERT_EQ(false, trace.start(YES_FOR_THIS, false, false, -1, 1, ULONG_MAX,
+                               all_features));
+  {
+    Opt_trace_object oto(&trace);
+    open_object(&trace);
+  }
+  trace.end();
+  Opt_trace_iterator it(&trace);
+  ASSERT_FALSE(it.at_end());
+  Opt_trace_info info;
+  it.get_value(&info);
+  /*
+    Formula for the expected size.
+    Before the Nth call to open_object(), indentation inside the innermost
+    empty object is noted I(N); so the relationship between the size before
+    Nth call and the size after Nth call is:
+    S(N+1) = S(N)
+             + I(N)   (indentation before added '"abc": {\n' )
+             + 9      (length of added '"abc": {\n' )
+             + I(N)   (indentation before added '}\n' )
+             + 2      (length of added '}\n' )
+    and the indentation is increased by two as we are one level deeper:
+    I(N+1) = I(N) + 2
+    With S(1) = 3 (length of '{\n}') and I(1) = 2.
+    So I(N) = 2 * N and
+    S(N+1) - S(N) = 11 + 4 * N
+    So S(N) = 3 + 11 * (N - 1) + 2 * N * (N - 1).
+    For 100 calls, the final size is S(101) = 21303.
+    Each call adds 10 non-space characters, so there should be
+    21303
+    - 10 * 100 (added non-spaces characters)
+    - 3 (non-spaces of initial object before first function call)
+    = 20300 spaces.
+  */
+  EXPECT_EQ(21303U, info.trace_length);
+  uint spaces= 0;
+  for (uint i= 0; i < info.trace_length; i++)
+    if (info.trace_ptr[i] == ' ')
+      spaces++;
+  EXPECT_EQ(20300U, spaces);
+  check_json_compliance(info.trace_ptr, info.trace_length);
+  EXPECT_EQ(0U, info.missing_bytes);
+  EXPECT_EQ(false, info.malloc_error);
+  it.next();
+  ASSERT_TRUE(it.at_end());
+}
+
+}  // namespace
+
 #endif // OPTIMIZER_TRACE

No bundle (reason: revision is a merge (you can force generation of a bundle with env var BZR_FORCE_BUNDLE=1)).
Thread
bzr commit into mysql-next-mr-bugfixing branch (guilhem.bichot:3265) Guilhem Bichot15 Feb