MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Matthias Leich Date:February 3 2009 7:06pm
Subject:bzr commit into mysql-6.0-bugteam branch (Matthias.Leich:3005)
View as plain text  
#At file:///work2/6.0/mysql-6.0-bugteam-push/

 3005 Matthias Leich	2009-02-03 [merge]
      Merge of last changes into GCA tree, no conflicts
removed:
  sql/backup/debug.h
added:
  mysql-test/include/blackhole.inc
  mysql-test/r/join_optimizer.result
  mysql-test/r/myisam_keycache_coverage.result
  mysql-test/suite/backup/r/backup_datatypes.result
  mysql-test/suite/backup/r/backup_myisam.result
  mysql-test/suite/backup/r/backup_myisam_coverage.result
  mysql-test/suite/backup/r/backup_stream_errors.result
  mysql-test/suite/backup/t/backup_datatypes.test
  mysql-test/suite/backup/t/backup_myisam.test
  mysql-test/suite/backup/t/backup_myisam_coverage.test
  mysql-test/suite/backup/t/backup_stream_errors.test
  mysql-test/suite/backup_engines/r/backup_partitioning.result
  mysql-test/suite/backup_engines/t/backup_partitioning.test
  mysql-test/t/join_optimizer.test
  mysql-test/t/myisam_keycache_coverage.test
renamed:
  mysql-test/suite/backup/r/backup_myisam1.result => mysql-test/suite/backup/r/backup_myisam_extlocking.result
  mysql-test/suite/backup/r/backup_myisam2.result => mysql-test/suite/backup/r/backup_myisam_sync.result
  mysql-test/suite/backup/t/backup_myisam1-master.opt => mysql-test/suite/backup/t/backup_myisam_extlocking-master.opt
  mysql-test/suite/backup/t/backup_myisam1.test => mysql-test/suite/backup/t/backup_myisam_extlocking.test
  mysql-test/suite/backup/t/backup_myisam2.test => mysql-test/suite/backup/t/backup_myisam_sync.test
modified:
  client/mysql.cc
  include/m_string.h
  mysql-test/include/mrr_tests.inc
  mysql-test/lib/mtr_report.pm
  mysql-test/r/ctype_ldml.result
  mysql-test/r/innodb_mrr.result
  mysql-test/r/join_cache.result
  mysql-test/r/join_nested_jcl6.result
  mysql-test/r/maria_mrr.result
  mysql-test/r/myisam_mrr.result
  mysql-test/r/null_key.result
  mysql-test/r/order_by.result
  mysql-test/r/subselect.result
  mysql-test/r/subselect3_jcl6.result
  mysql-test/r/subselect_no_mat.result
  mysql-test/r/subselect_no_opts.result
  mysql-test/r/subselect_no_semijoin.result
  mysql-test/r/subselect_sj.result
  mysql-test/r/subselect_sj2_jcl6.result
  mysql-test/r/subselect_sj_jcl6.result
  mysql-test/r/view.result
  mysql-test/std_data/client-cert.pem
  mysql-test/std_data/server-cert.pem
  mysql-test/suite/backup/r/backup_errors.result
  mysql-test/suite/backup/r/backup_logs.result
  mysql-test/suite/backup/r/backup_logs_purge.result
  mysql-test/suite/backup/r/backup_vp_nontx.result
  mysql-test/suite/backup/t/backup.test
  mysql-test/suite/backup/t/backup_errors.test
  mysql-test/suite/backup/t/backup_logs.test
  mysql-test/suite/backup/t/backup_logs_purge.test
  mysql-test/suite/backup/t/backup_vp_nontx.test
  mysql-test/suite/backup/t/disabled.def
  mysql-test/suite/federated/federated.inc
  mysql-test/suite/federated/federated.result
  mysql-test/suite/federated/federated_archive.result
  mysql-test/suite/federated/federated_bug_13118.result
  mysql-test/suite/federated/federated_bug_25714.result
  mysql-test/suite/federated/federated_bug_25714.test
  mysql-test/suite/federated/federated_cleanup.inc
  mysql-test/suite/federated/federated_innodb.result
  mysql-test/suite/federated/federated_server.result
  mysql-test/suite/parts/r/partition_special_innodb.result
  mysql-test/suite/parts/r/partition_special_myisam.result
  mysql-test/suite/parts/r/rpl_partition.result
  mysql-test/suite/rpl/r/rpl_backup.result
  mysql-test/suite/rpl/r/rpl_trigger.result
  mysql-test/suite/rpl/t/rpl_backup.test
  mysql-test/suite/rpl/t/rpl_trigger.test
  mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_2ch.result
  mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.cnf
  mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test
  mysql-test/suite/sys_vars/t/rpl_max_binlog_size_func.test
  mysql-test/t/ctype_ldml.test
  mysql-test/t/disabled.def
  mysql-test/t/innodb_mrr.test
  mysql-test/t/join_cache.test
  mysql-test/t/maria_mrr.test
  mysql-test/t/myisam_mrr.test
  mysql-test/t/subselect_sj.test
  mysql-test/t/system_mysql_db_fix30020.test
  mysql-test/t/variables.test
  mysys/charset.c
  mysys/mf_keycache.c
  mysys/my_delete.c
  sql/backup/Doxyfile
  sql/backup/api_types.h
  sql/backup/backup_aux.h
  sql/backup/backup_engine.h
  sql/backup/backup_info.cc
  sql/backup/backup_info.h
  sql/backup/backup_kernel.h
  sql/backup/backup_test.cc
  sql/backup/be_default.cc
  sql/backup/be_default.h
  sql/backup/be_logical.h
  sql/backup/be_native.h
  sql/backup/be_nodata.cc
  sql/backup/be_nodata.h
  sql/backup/be_snapshot.cc
  sql/backup/be_snapshot.h
  sql/backup/be_thread.cc
  sql/backup/be_thread.h
  sql/backup/buffer_iterator.cc
  sql/backup/buffer_iterator.h
  sql/backup/data_backup.cc
  sql/backup/error.h
  sql/backup/image_info.cc
  sql/backup/image_info.h
  sql/backup/kernel.cc
  sql/backup/logger.cc
  sql/backup/logger.h
  sql/backup/restore_info.h
  sql/backup/stream.cc
  sql/backup/stream.h
  sql/backup/stream_v1.c
  sql/backup/stream_v1.h
  sql/backup/stream_v1_transport.c
  sql/handler.cc
  sql/handler.h
  sql/mysql_priv.h
  sql/mysqld.cc
  sql/opt_range.cc
  sql/opt_range.h
  sql/set_var.cc
  sql/share/errmsg.txt
  sql/si_objects.cc
  sql/si_objects.h
  sql/sql_class.cc
  sql/sql_join_cache.cc
  sql/sql_lex.cc
  sql/sql_lex.h
  sql/sql_parse.cc
  sql/sql_repl.cc
  sql/sql_repl.h
  sql/sql_select.cc
  sql/sql_select.h
  storage/falcon/ha_falcon.cpp
  storage/maria/ha_maria.h
  storage/maria/ma_key.c
  storage/myisam/ha_myisam.cc
  storage/myisam/ha_myisam.h
  storage/myisam/mi_close.c
  storage/myisam/mi_key.c
  storage/myisam/mi_open.c
  storage/myisam/myisam_backup_engine.cc
  mysql-test/suite/backup/r/backup_myisam_extlocking.result
  mysql-test/suite/backup/r/backup_myisam_sync.result
  mysql-test/suite/backup/t/backup_myisam_extlocking.test
  mysql-test/suite/backup/t/backup_myisam_sync.test

=== modified file 'client/mysql.cc'
--- a/client/mysql.cc	2009-01-30 14:13:39 +0000
+++ b/client/mysql.cc	2009-02-03 09:16:53 +0000
@@ -1135,6 +1135,7 @@ int main(int argc,char *argv[])
   {
     put_error(NULL);
     free_defaults(defaults_argv);
+    batch_readline_end(status.line_buff);
     my_end(0);
     exit(1);
   }
@@ -1204,7 +1205,9 @@ int main(int argc,char *argv[])
 					    MYF(MY_WME))))
       {
 	fprintf(stderr, "Couldn't allocate memory for temp histfile!\n");
-	exit(1);
+        status.exit_status= 1;
+        mysql_end(1);
+        exit(1); /* purecov: deadcode */
       }
       sprintf(histfile_tmp, "%s.TMP", histfile);
     }
@@ -1238,11 +1241,11 @@ sig_handler mysql_end(int sig)
     if (!write_history(histfile_tmp))
       my_rename(histfile_tmp, histfile, MYF(MY_WME));
   }
-  batch_readline_end(status.line_buff);
   completion_hash_free(&ht);
   free_root(&hash_mem_root,MYF(0));
-
 #endif
+  batch_readline_end(status.line_buff);
+
   if (sig >= 0)
     put_info(sig ? "Aborted" : "Bye", INFO_RESULT);
   glob_buffer.free();

=== modified file 'include/m_string.h'
--- a/include/m_string.h	2008-12-13 19:55:44 +0000
+++ b/include/m_string.h	2009-01-29 21:17:59 +0000
@@ -306,6 +306,14 @@ typedef struct st_mysql_lex_string LEX_S
 #define USTRING_WITH_LEN(X) ((uchar*) X), ((size_t) (sizeof(X) - 1))
 #define C_STRING_WITH_LEN(X) ((char *) (X)), ((size_t) (sizeof(X) - 1))
 
+/* A variant with const */
+struct st_mysql_const_lex_string
+{
+  const char *str;
+  size_t length;
+};
+typedef struct st_mysql_const_lex_string LEX_CSTRING;
+
 /* A variant with const and unsigned */
 struct st_mysql_const_unsigned_lex_string
 {

=== added file 'mysql-test/include/blackhole.inc'
--- a/mysql-test/include/blackhole.inc	1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/blackhole.inc	2008-12-23 15:56:18 +0000
@@ -0,0 +1,5 @@
+disable_query_log;
+--require r/true.require
+select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'blackhole';
+enable_query_log;
+

=== modified file 'mysql-test/include/mrr_tests.inc'
--- a/mysql-test/include/mrr_tests.inc	2007-03-09 21:08:24 +0000
+++ b/mysql-test/include/mrr_tests.inc	2009-01-25 16:59:07 +0000
@@ -97,4 +97,19 @@ explain
 select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
 
 select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+drop table t1, t2, t3, t4;
 
+#
+# Check how ICP works with NULLs and partially-covered indexes
+#
+create table t1 (a int, b int not null,unique key (a,b),index(b));
+insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
+create table t2 like t1;
+insert into t2 select * from t1;
+alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+
+select * from t1 where a is null;
+select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+
+select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+drop table t1, t2;

=== modified file 'mysql-test/lib/mtr_report.pm'
--- a/mysql-test/lib/mtr_report.pm	2009-02-02 11:13:26 +0000
+++ b/mysql-test/lib/mtr_report.pm	2009-02-03 09:16:53 +0000
@@ -260,7 +260,6 @@ sub mtr_report_stats ($) {
   }
 
   print "\n";
-
   # Print a list of check_testcases that failed(if any)
   if ( $::opt_check_testcases )
   {

=== modified file 'mysql-test/r/ctype_ldml.result'
--- a/mysql-test/r/ctype_ldml.result	2008-12-09 08:41:43 +0000
+++ b/mysql-test/r/ctype_ldml.result	2009-02-02 20:50:45 +0000
@@ -71,6 +71,25 @@ select * from t1 where c1='b';
 c1
 a
 drop table t1;
+CREATE TABLE t1 (
+col1 varchar(100) character set utf8 collate utf8_test_ci
+);
+INSERT INTO t1 (col1) VALUES ('abcd'),('efgh'),('ijkl');
+ALTER TABLE t1 ADD FULLTEXT INDEX (col1);
+SELECT * FROM t1 where match (col1) against ('abcd');
+col1
+abcd
+SELECT * FROM t1 where match (col1) against ('abcd' IN BOOLEAN MODE);
+col1
+abcd
+ALTER TABLE t1 ADD (col2 varchar(100) character set latin1);
+UPDATE t1 SET col2=col1;
+SELECT * FROM t1 WHERE col1=col2 ORDER BY col1;
+col1	col2
+abcd	abcd
+efgh	efgh
+ijkl	ijkl
+DROP TABLE t1;
 Vietnamese experimental collation
 show collation like 'ucs2_vn_ci';
 Collation	Charset	Id	Default	Compiled	Sortlen

=== modified file 'mysql-test/r/innodb_mrr.result'
--- a/mysql-test/r/innodb_mrr.result	2008-12-29 03:42:30 +0000
+++ b/mysql-test/r/innodb_mrr.result	2009-01-25 16:59:07 +0000
@@ -270,8 +270,28 @@ bb-1	NULL	cc-2	NULL-4
 bb-1	NULL	cc-2	NULL-3
 bb-1	NULL	cc-2	NULL-2
 bb-1	NULL	cc-2	NULL-1
-set storage_engine= @save_storage_engine;
 drop table t1, t2, t3, t4;
+create table t1 (a int, b int not null,unique key (a,b),index(b));
+insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
+create table t2 like t1;
+insert into t2 select * from t1;
+alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+select * from t1 where a is null;
+a	b	c
+NULL	7	0
+NULL	9	0
+NULL	9	0
+select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+a	b	c
+NULL	9	0
+NULL	9	0
+select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+a	b	c
+NULL	7	0
+NULL	9	0
+NULL	9	0
+drop table t1, t2;
+set storage_engine= @save_storage_engine;
 set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
 set read_rnd_buffer_size=64;
 create table t1(a int);

=== modified file 'mysql-test/r/join_cache.result'
--- a/mysql-test/r/join_cache.result	2009-01-08 05:47:10 +0000
+++ b/mysql-test/r/join_cache.result	2009-01-14 10:29:36 +0000
@@ -867,7 +867,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -1174,7 +1174,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -1481,7 +1481,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -1788,7 +1788,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -2099,7 +2099,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -2313,7 +2313,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -2527,7 +2527,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -2741,7 +2741,7 @@ CountryLanguage.Percentage > 50;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	CountryLanguage	ALL	PRIMARY,Percentage	NULL	NULL	NULL	984	Using where
 1	SIMPLE	Country	eq_ref	PRIMARY	PRIMARY	3	world.CountryLanguage.Country	1	Using where; Using join buffer
-1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using where; Using join buffer
+1	SIMPLE	City	ref	Country	Country	3	world.Country.Code	18	Using index condition(BKA); Using where; Using join buffer
 SELECT City.Name, Country.Name, CountryLanguage.Language
 FROM City,Country,CountryLanguage
 WHERE City.Country=Country.Code AND
@@ -3619,3 +3619,171 @@ COUNT(*)
 set join_buffer_size=default;
 set join_cache_level=default;
 DROP TABLE t1,t2,t3;
+#
+# Bug #42020: join buffer is used  for outer join with fields of 
+#             several outer tables in join buffer
+#
+CREATE TABLE t1 (
+a bigint NOT NULL,
+PRIMARY KEY (a) 
+);
+INSERT INTO t1 VALUES
+(2), (1);
+CREATE TABLE t2 (
+a bigint NOT NULL,
+b bigint NOT NULL,
+PRIMARY KEY (a,b)
+);
+INSERT INTO t2 VALUES
+(2,30), (2,40), (2,50), (2,60), (2,70), (2,80),
+(1,10), (1, 20), (1,30), (1,40), (1,50);
+CREATE TABLE t3 (
+pk bigint NOT NULL AUTO_INCREMENT,
+a bigint NOT NULL,
+b bigint NOT NULL,
+val bigint DEFAULT '0',
+PRIMARY KEY (pk),
+KEY idx (a,b)
+);
+INSERT INTO t3(a,b) VALUES
+(2,30), (2,40), (2,50), (2,60), (2,70), (2,80),
+(4,30), (4,40), (4,50), (4,60), (4,70), (4,80),
+(5,30), (5,40), (5,50), (5,60), (5,70), (5,80),
+(7,30), (7,40), (7,50), (7,60), (7,70), (7,80);
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+WHERE t1.a=t2.a;
+a	a	a	b	b	val
+1	1	NULL	10	NULL	NULL
+1	1	NULL	20	NULL	NULL
+1	1	NULL	30	NULL	NULL
+1	1	NULL	40	NULL	NULL
+1	1	NULL	50	NULL	NULL
+2	2	2	30	30	0
+2	2	2	40	40	0
+2	2	2	50	50	0
+2	2	2	60	60	0
+2	2	2	70	70	0
+2	2	2	80	80	0
+set join_cache_level=6;
+set join_buffer_size=256;
+EXPLAIN
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+WHERE t1.a=t2.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	index	PRIMARY	PRIMARY	8	NULL	2	Using index
+1	SIMPLE	t2	ref	PRIMARY	PRIMARY	8	test.t1.a	1	Using index
+1	SIMPLE	t3	ref	idx	idx	16	test.t1.a,test.t2.b	2	Using join buffer
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+WHERE t1.a=t2.a;
+a	a	a	b	b	val
+2	2	2	30	30	0
+1	1	NULL	10	NULL	NULL
+1	1	NULL	20	NULL	NULL
+1	1	NULL	30	NULL	NULL
+1	1	NULL	40	NULL	NULL
+1	1	NULL	50	NULL	NULL
+2	2	2	40	40	0
+2	2	2	50	50	0
+2	2	2	60	60	0
+2	2	2	70	70	0
+2	2	2	80	80	0
+DROP INDEX idx ON t3;
+set join_cache_level=4;
+EXPLAIN
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+WHERE t1.a=t2.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	index	PRIMARY	PRIMARY	8	NULL	2	Using index
+1	SIMPLE	t2	ref	PRIMARY	PRIMARY	8	test.t1.a	1	Using index
+1	SIMPLE	t3	ALL	NULL	NULL	NULL	NULL	24	Using where; Using join buffer
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+WHERE t1.a=t2.a;
+a	a	a	b	b	val
+2	2	2	30	30	0
+2	2	2	40	40	0
+2	2	2	50	50	0
+2	2	2	60	60	0
+1	1	NULL	10	NULL	NULL
+1	1	NULL	20	NULL	NULL
+1	1	NULL	30	NULL	NULL
+1	1	NULL	40	NULL	NULL
+1	1	NULL	50	NULL	NULL
+2	2	2	70	70	0
+2	2	2	80	80	0
+set join_buffer_size=default;
+set join_cache_level=default;
+DROP TABLE t1,t2,t3;
+create table t1(f1 int, f2 int);
+insert into t1 values (1,1),(2,2),(3,3);
+create table t2(f1 int not null, f2 int not null, f3 char(200), key(f1,f2));
+insert into t2 values (1,1, 'qwerty'),(1,2, 'qwerty'),(1,3, 'qwerty');
+insert into t2 values (2,1, 'qwerty'),(2,2, 'qwerty'),(2,3, 'qwerty'),
+(2,4, 'qwerty'),(2,5, 'qwerty');
+insert into t2 values (3,1, 'qwerty'),(3,4, 'qwerty');
+insert into t2 values (4,1, 'qwerty'),(4,2, 'qwerty'),(4,3, 'qwerty'),
+(4,4, 'qwerty');
+insert into t2 values (1,1, 'qwerty'),(1,2, 'qwerty'),(1,3, 'qwerty');
+insert into t2 values (2,1, 'qwerty'),(2,2, 'qwerty'),(2,3, 'qwerty'),
+(2,4, 'qwerty'),(2,5, 'qwerty');
+insert into t2 values (3,1, 'qwerty'),(3,4, 'qwerty');
+insert into t2 values (4,1, 'qwerty'),(4,2, 'qwerty'),(4,3, 'qwerty'),
+(4,4, 'qwerty');
+set join_cache_level=5;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+f1	f2	f3
+1	1	qwerty
+2	2	qwerty
+1	1	qwerty
+2	2	qwerty
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	3	
+1	SIMPLE	t2	ref	f1	f1	4	test.t1.f1	3	Using index condition(BKA); Using join buffer
+set join_cache_level=6;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+f1	f2	f3
+1	1	qwerty
+2	2	qwerty
+1	1	qwerty
+2	2	qwerty
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	3	
+1	SIMPLE	t2	ref	f1	f1	4	test.t1.f1	3	Using index condition(BKA); Using join buffer
+set join_cache_level=7;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+f1	f2	f3
+1	1	qwerty
+2	2	qwerty
+1	1	qwerty
+2	2	qwerty
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	3	
+1	SIMPLE	t2	ref	f1	f1	4	test.t1.f1	3	Using index condition(BKA); Using join buffer
+set join_cache_level=8;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+f1	f2	f3
+1	1	qwerty
+2	2	qwerty
+1	1	qwerty
+2	2	qwerty
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	3	
+1	SIMPLE	t2	ref	f1	f1	4	test.t1.f1	3	Using index condition(BKA); Using join buffer
+drop table t1,t2;
+set join_cache_level=default;

=== modified file 'mysql-test/r/join_nested_jcl6.result'
--- a/mysql-test/r/join_nested_jcl6.result	2008-10-09 00:31:34 +0000
+++ b/mysql-test/r/join_nested_jcl6.result	2009-01-14 10:29:36 +0000
@@ -1451,12 +1451,12 @@ id	select_type	table	type	possible_keys
 1	SIMPLE	t2	ALL	NULL	NULL	NULL	NULL	X	
 1	SIMPLE	t3	ref	a	a	5	test.t2.b	X	Using join buffer
 1	SIMPLE	t5	ref	a	a	5	test.t3.b	X	Using join buffer
-1	SIMPLE	t4	ref	a	a	5	test.t3.b	X	Using where; Using join buffer
+1	SIMPLE	t4	ref	a	a	5	test.t3.b	X	Using index condition(BKA); Using join buffer
 explain select * from (t4 join t6 on t6.a=t4.b) right join t3 on t4.a=t3.b
 join t2 left join (t5 join t7 on t7.a=t5.b) on t5.a=t2.b where t3.a<=>t2.b;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t2	ALL	NULL	NULL	NULL	NULL	X	
-1	SIMPLE	t3	ref	a	a	5	test.t2.b	X	Using where; Using join buffer
+1	SIMPLE	t3	ref	a	a	5	test.t2.b	X	Using index condition(BKA); Using join buffer
 1	SIMPLE	t4	ref	a	a	5	test.t3.b	X	Using join buffer
 1	SIMPLE	t6	ref	a	a	5	test.t4.b	X	Using join buffer
 1	SIMPLE	t5	ref	a	a	5	test.t2.b	X	Using join buffer

=== added file 'mysql-test/r/join_optimizer.result'
--- a/mysql-test/r/join_optimizer.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/join_optimizer.result	2009-01-26 19:42:59 +0000
@@ -0,0 +1,37 @@
+drop table if exists t0,t1,t2,t3;
+#
+# BUG#38049 incorrect rows estimations with references from preceding table
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a varchar(32));
+insert into t1 values ('owner'),('requester'),('admincc'),('cc');
+CREATE TABLE t2 (
+id int(11) NOT NULL,
+type varchar(32) default NULL,
+PRIMARY KEY  (id)
+);
+insert into t2 values (1,'owner'), (2,'admincc');
+CREATE TABLE t3 (
+id int(11) NOT NULL,
+domain varchar(32) default NULL,
+type varchar(32) default NULL,
+PRIMARY KEY  (id)
+);
+set @domain='system';
+set @pk=0;
+INSERT INTO t3 select @pk:=@pk+1, 'system', t1.a from t1;
+INSERT INTO t3 select @pk:=@pk+1, 'queue', t1.a from t1, t0 where t0.a<3;
+INSERT INTO t3 select @pk:=@pk+1, 'ticket', t1.a from t1, t0 A, t0 B, t0 C;
+CREATE INDEX groups_d ON t3(domain);
+CREATE INDEX groups_t ON t3(type);
+CREATE INDEX groups_td ON t3(type, domain);
+CREATE INDEX groups_dt ON t3(domain, type);
+For table g this must use ref(groups_dt) and #rows should be around 15 and not 335:
+explain 
+SELECT STRAIGHT_JOIN g.id FROM t2 a, t3 g USE INDEX(groups_dt) 
+WHERE g.domain = 'queue' AND g.type = a.type;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	a	ALL	NULL	NULL	NULL	NULL	2	
+1	SIMPLE	g	ref	groups_dt	groups_dt	70	const,test.a.type	13	Using index condition
+drop table t0,t1,t2,t3;

=== modified file 'mysql-test/r/maria_mrr.result'
--- a/mysql-test/r/maria_mrr.result	2008-09-05 06:10:12 +0000
+++ b/mysql-test/r/maria_mrr.result	2009-01-25 16:59:07 +0000
@@ -275,6 +275,26 @@ bb-1	NULL	cc-2	NULL-4
 bb-1	NULL	cc-2	NULL-3
 bb-1	NULL	cc-2	NULL-2
 bb-1	NULL	cc-2	NULL-1
+drop table t1, t2, t3, t4;
+create table t1 (a int, b int not null,unique key (a,b),index(b));
+insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
+create table t2 like t1;
+insert into t2 select * from t1;
+alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+select * from t1 where a is null;
+a	b	c
+NULL	7	0
+NULL	9	0
+NULL	9	0
+select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+a	b	c
+NULL	9	0
+NULL	9	0
+select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+a	b	c
+NULL	7	0
+NULL	9	0
+NULL	9	0
+drop table t1, t2;
 set storage_engine= @save_storage_engine;
 set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-drop table t1, t2, t3, t4;

=== added file 'mysql-test/r/myisam_keycache_coverage.result'
--- a/mysql-test/r/myisam_keycache_coverage.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/myisam_keycache_coverage.result	2009-01-21 15:00:23 +0000
@@ -0,0 +1,52 @@
+#
+# MyISAM keycache coverage tests.
+#
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (c1 VARCHAR(5), c2 int);
+CREATE INDEX i1 ON t1 (c1, c2);
+INSERT INTO t1 VALUES ('A',1);
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+#
+# Positive tests.
+#
+SELECT COUNT(*) FROM t1 WHERE c2 < 5;
+COUNT(*)
+8
+LOAD INDEX INTO CACHE t1;
+Table	Op	Msg_type	Msg_text
+test.t1	preload_keys	status	OK
+UPDATE t1 SET c2=2;
+#
+# Close table and clear cache.
+#
+FLUSH TABLE t1;
+#
+# Inject error key_cache_read_block_error
+#
+SET debug='d,key_cache_read_block_error';
+SELECT COUNT(*) FROM t1 WHERE c2 < 5;
+ERROR HY000: Incorrect key file for table 't1.MYI'; try to repair it
+FLUSH TABLE t1;
+#
+# Inject error key_cache_insert_block_error
+#
+SET debug='d,key_cache_insert_block_error';
+LOAD INDEX INTO CACHE t1;
+Table	Op	Msg_type	Msg_text
+test.t1	preload_keys	error	Failed to read from index file (errno: 5)
+test.t1	preload_keys	status	Operation failed
+FLUSH TABLE t1;
+#
+# Inject error key_cache_write_block_error
+#
+SET debug='d,key_cache_write_block_error';
+UPDATE t1 SET c2=1;
+ERROR HY000: Incorrect key file for table 't1.MYI'; try to repair it
+FLUSH TABLE t1;
+#
+# Cleanup
+#
+SET debug='';
+DROP TABLE t1;

=== modified file 'mysql-test/r/myisam_mrr.result'
--- a/mysql-test/r/myisam_mrr.result	2008-12-08 21:15:06 +0000
+++ b/mysql-test/r/myisam_mrr.result	2009-01-26 11:21:27 +0000
@@ -273,8 +273,28 @@ bb-1	NULL	cc-2	NULL-4
 bb-1	NULL	cc-2	NULL-3
 bb-1	NULL	cc-2	NULL-2
 bb-1	NULL	cc-2	NULL-1
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
 drop table t1, t2, t3, t4;
+create table t1 (a int, b int not null,unique key (a,b),index(b));
+insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
+create table t2 like t1;
+insert into t2 select * from t1;
+alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+select * from t1 where a is null;
+a	b	c
+NULL	7	0
+NULL	9	0
+NULL	9	0
+select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+a	b	c
+NULL	9	0
+NULL	9	0
+select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+a	b	c
+NULL	7	0
+NULL	9	0
+NULL	9	0
+drop table t1, t2;
+set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
 CREATE TABLE t1 (
 ID int(10) unsigned NOT NULL AUTO_INCREMENT,
 col1 int(10) unsigned DEFAULT NULL,
@@ -332,3 +352,40 @@ id	select_type	table	type	possible_keys
 Warnings:
 Note	1003	select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
 DROP TABLE t1, t2;
+#
+# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
+insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
+update t1 set b=repeat(char(65+a), 20) where a < 25;
+This must show range + using index condition:
+explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	range	a	a	5	NULL	19	Using index condition; Using where
+select * from t1 where a < 10 and b = repeat(char(65+a), 20);
+a	b	filler
+0	AAAAAAAAAAAAAAAAAAAA	filler
+1	BBBBBBBBBBBBBBBBBBBB	filler
+2	CCCCCCCCCCCCCCCCCCCC	filler
+3	DDDDDDDDDDDDDDDDDDDD	filler
+4	EEEEEEEEEEEEEEEEEEEE	filler
+5	FFFFFFFFFFFFFFFFFFFF	filler
+6	GGGGGGGGGGGGGGGGGGGG	filler
+7	HHHHHHHHHHHHHHHHHHHH	filler
+8	IIIIIIIIIIIIIIIIIIII	filler
+9	JJJJJJJJJJJJJJJJJJJJ	filler
+drop table t0,t1;
+#
+# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b int, key(a));
+insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
+This mustn't show "Using MRR":
+explain select * from t1 where a < 20  order by a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	range	a	a	5	NULL	20	Using index condition
+drop table t0, t1;

=== modified file 'mysql-test/r/null_key.result'
--- a/mysql-test/r/null_key.result	2008-03-14 22:21:29 +0000
+++ b/mysql-test/r/null_key.result	2009-01-25 16:59:07 +0000
@@ -76,13 +76,13 @@ insert into t2 select * from t1;
 alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
 explain select * from t1 where a is null and b = 2;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ref	a,b	a	5	const	3	Using where
+1	SIMPLE	t1	ref	a,b	a	5	const	3	Using index condition; Using where
 explain select * from t1 where a is null and b = 2 and c=0;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ref	a,b	a	5	const	3	Using where
+1	SIMPLE	t1	ref	a,b	a	5	const	3	Using index condition; Using where
 explain select * from t1 where a is null and b = 7 and c=0;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ref	a,b	a	5	const	3	Using where
+1	SIMPLE	t1	ref	a,b	a	5	const	3	Using index condition; Using where
 explain select * from t1 where a=2 and b = 2;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	ref	a,b	a	5	const	1	Using where
@@ -91,25 +91,25 @@ id	select_type	table	type	possible_keys
 1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	12	Using where
 explain select * from t1 where (a is null or a > 0 and a < 3) and b < 5 and c=0 limit 3;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	range	a,b	a	5	NULL	5	Using where
+1	SIMPLE	t1	range	a,b	a	5	NULL	5	Using index condition; Using where
 explain select * from t1 where (a is null or a = 7) and b=7 and c=0;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ref_or_null	a,b	a	5	const	4	Using where
+1	SIMPLE	t1	ref_or_null	a,b	a	5	const	4	Using index condition; Using where
 explain select * from t1 where (a is null and b>a) or a is null and b=7 limit 2;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ref	a,b	a	5	const	3	Using where
+1	SIMPLE	t1	ref	a,b	a	5	const	3	Using index condition; Using where
 explain select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ref	a,b	a	5	const	3	Using where
+1	SIMPLE	t1	ref	a,b	a	5	const	3	Using index condition; Using where
 explain select * from t1 where a > 1 and a < 3 limit 1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	range	a	a	5	NULL	1	Using where
+1	SIMPLE	t1	range	a	a	5	NULL	1	Using index condition
 explain select * from t1 where a is null and b=7 or a > 1 and a < 3 limit 1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	range	a,b	a	5	NULL	4	Using where
+1	SIMPLE	t1	range	a,b	a	5	NULL	4	Using index condition; Using where
 explain select * from t1 where a > 8 and a < 9;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	range	a	a	5	NULL	1	Using where
+1	SIMPLE	t1	range	a	a	5	NULL	1	Using index condition
 explain select * from t1 where b like "6%";
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	range	b	b	12	NULL	1	Using where

=== modified file 'mysql-test/r/order_by.result'
--- a/mysql-test/r/order_by.result	2008-12-24 10:48:24 +0000
+++ b/mysql-test/r/order_by.result	2009-01-29 21:17:59 +0000
@@ -609,7 +609,7 @@ FieldKey	LongVal	StringVal
 1	2	1
 EXPLAIN SELECT * FROM t1 ignore index (FieldKey, LongField) WHERE FieldKey > '2' ORDER BY LongVal;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	range	StringField	StringField	38	NULL	4	Using where; Using filesort
+1	SIMPLE	t1	range	StringField	StringField	38	NULL	4	Using index condition; Using filesort
 SELECT * FROM t1 WHERE FieldKey > '2' ORDER BY LongVal;
 FieldKey	LongVal	StringVal
 3	1	2
@@ -1466,7 +1466,7 @@ SELECT d FROM t3 AS t1, t2 AS t2
 WHERE t2.b=14 AND t2.a=t1.a AND 5.1<t2.c AND t1.b='DE'
 ORDER BY t2.c LIMIT 1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t2	range	a,b,c	c	5	NULL	420	Using where; Using MRR
+1	SIMPLE	t2	range	a,b,c	c	5	NULL	420	Using where
 1	SIMPLE	t1	ref	a	a	39	test.t2.a,const	10	Using where; Using index
 SELECT d FROM t3 AS t1, t2 AS t2 
 WHERE t2.b=14 AND t2.a=t1.a AND 5.1<t2.c AND t1.b='DE'

=== modified file 'mysql-test/r/subselect.result'
--- a/mysql-test/r/subselect.result	2009-01-31 15:53:35 +0000
+++ b/mysql-test/r/subselect.result	2009-02-03 09:16:53 +0000
@@ -2965,7 +2965,7 @@ ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	PRIMARY	t1	system	PRIMARY	NULL	NULL	NULL	1	
 1	PRIMARY	r	const	PRIMARY	PRIMARY	4	const	1	
-2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition; Using MRR
+2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition
 SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r 
 ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' 
 ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;

=== modified file 'mysql-test/r/subselect3_jcl6.result'
--- a/mysql-test/r/subselect3_jcl6.result	2009-01-28 18:53:58 +0000
+++ b/mysql-test/r/subselect3_jcl6.result	2009-02-03 09:16:53 +0000
@@ -728,7 +728,7 @@ WHERE t3.name='xxx' AND t2.id=t3.id);
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	PRIMARY	t1	ALL	NULL	NULL	NULL	NULL	4	Using where
 2	DEPENDENT SUBQUERY	t2	eq_ref	PRIMARY	PRIMARY	4	func	1	Using where; Using index; Full scan on NULL key
-2	DEPENDENT SUBQUERY	t3	eq_ref	PRIMARY	PRIMARY	4	func	1	Using where; Full scan on NULL key; Using join buffer
+2	DEPENDENT SUBQUERY	t3	eq_ref	PRIMARY	PRIMARY	4	func	1	Using index condition(BKA); Using where; Full scan on NULL key; Using join buffer
 SELECT * FROM t1
 WHERE t1.id NOT IN (SELECT t2.id FROM t2,t3 
 WHERE t3.name='xxx' AND t2.id=t3.id);

=== modified file 'mysql-test/r/subselect_no_mat.result'
--- a/mysql-test/r/subselect_no_mat.result	2009-01-31 15:53:35 +0000
+++ b/mysql-test/r/subselect_no_mat.result	2009-02-03 09:16:53 +0000
@@ -2969,7 +2969,7 @@ ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	PRIMARY	t1	system	PRIMARY	NULL	NULL	NULL	1	
 1	PRIMARY	r	const	PRIMARY	PRIMARY	4	const	1	
-2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition; Using MRR
+2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition
 SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r 
 ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' 
 ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;

=== modified file 'mysql-test/r/subselect_no_opts.result'
--- a/mysql-test/r/subselect_no_opts.result	2009-01-31 15:53:35 +0000
+++ b/mysql-test/r/subselect_no_opts.result	2009-02-03 09:16:53 +0000
@@ -2969,7 +2969,7 @@ ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	PRIMARY	t1	system	PRIMARY	NULL	NULL	NULL	1	
 1	PRIMARY	r	const	PRIMARY	PRIMARY	4	const	1	
-2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition; Using MRR
+2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition
 SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r 
 ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' 
 ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;

=== modified file 'mysql-test/r/subselect_no_semijoin.result'
--- a/mysql-test/r/subselect_no_semijoin.result	2009-01-31 15:53:35 +0000
+++ b/mysql-test/r/subselect_no_semijoin.result	2009-02-03 09:16:53 +0000
@@ -2969,7 +2969,7 @@ ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	PRIMARY	t1	system	PRIMARY	NULL	NULL	NULL	1	
 1	PRIMARY	r	const	PRIMARY	PRIMARY	4	const	1	
-2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition; Using MRR
+2	DEPENDENT SUBQUERY	t2	range	b	b	40	NULL	2	Using index condition
 SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r 
 ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899' 
 ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;

=== modified file 'mysql-test/r/subselect_sj.result'
--- a/mysql-test/r/subselect_sj.result	2008-12-22 19:03:25 +0000
+++ b/mysql-test/r/subselect_sj.result	2009-01-28 19:24:54 +0000
@@ -236,7 +236,7 @@ select @@optimizer_switch;
 @@optimizer_switch
 no_materialization,no_loosescan
 set optimizer_switch='';
-drop table t0, t1;
+drop table t0, t1, t2;
 drop table t10, t11, t12;
 
 Bug#37899: Wrongly checked optimization prerequisite caused failed
@@ -261,3 +261,69 @@ pk	varchar_nokey
 2	j
 3	aew
 drop table t1;
+#
+# BUG#41842: Semi-join materialization strategy crashes when the upper query has HAVING
+#
+CREATE TABLE t1 (
+pk int(11) NOT NULL AUTO_INCREMENT,
+int_nokey int(11) NOT NULL,
+time_key time NOT NULL,
+datetime_key datetime NOT NULL,
+datetime_nokey datetime NOT NULL,
+varchar_key varchar(1) NOT NULL,
+varchar_nokey varchar(1) NOT NULL,
+PRIMARY KEY (pk),
+KEY time_key (time_key),
+KEY datetime_key (datetime_key),
+KEY varchar_key (varchar_key)
+);
+INSERT INTO t1 VALUES 
+(1,0, '00:16:10','2008-09-03 14:25:40','2008-09-03 14:25:40','h','h'),
+(2,7, '00:00:00','2001-01-13 00:00:00','2001-01-13 00:00:00','',''),
+(3,0, '00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','x','x'),
+(4,2, '16:29:24','2000-10-16 01:39:08','2000-10-16 01:39:08','w','w'),
+(5,1, '09:23:32','0000-00-00 00:00:00','0000-00-00 00:00:00','p','p'),
+(6,3, '00:00:00','2007-12-02 00:00:00','2007-12-02 00:00:00','o','o'),
+(7,3, '00:00:00','2008-09-11 00:00:00','2008-09-11 00:00:00','',''),
+(8,0, '13:59:04','0000-00-00 00:00:00','0000-00-00 00:00:00','s','s'),
+(9,7, '09:01:06','0000-00-00 00:00:00','0000-00-00 00:00:00','d','d'),
+(10,5,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','n','n'),
+(11,0,'21:06:46','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),
+(12,2,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','',''),
+(13,6,'14:45:34','2003-07-28 02:34:08','2003-07-28 02:34:08','w','w'),
+(14,1,'15:04:12','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),
+(15,0,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','x','x'),
+(16,0,'15:55:23','2004-03-17 00:32:27','2004-03-17 00:32:27','p','p'),
+(17,1,'16:30:00','2004-12-27 19:20:00','2004-12-27 19:20:00','d','d'),
+(18,0,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','h','h'),
+(19,0,'14:13:26','2008-11-09 05:53:48','2008-11-09 05:53:48','o','o'),
+(20,0,'00:00:00','2009-10-11 06:58:04','2009-10-11 06:58:04','k','k');
+CREATE TABLE t2 (
+pk int(11) NOT NULL AUTO_INCREMENT,
+int_nokey int(11) NOT NULL,
+time_key time NOT NULL,
+datetime_key datetime NOT NULL,
+datetime_nokey datetime NOT NULL,
+varchar_key varchar(1) NOT NULL,
+varchar_nokey varchar(1) NOT NULL,
+PRIMARY KEY (pk),
+KEY time_key (time_key),
+KEY datetime_key (datetime_key),
+KEY varchar_key (varchar_key)
+);
+INSERT INTO t2 VALUES 
+(10,0,'19:39:13','0000-00-00 00:00:00','0000-00-00 00:00:00','g','g'),
+(11,8,'03:43:53','0000-00-00 00:00:00','0000-00-00 00:00:00','b','b');
+SELECT OUTR.datetime_nokey AS X FROM t1 AS OUTR 
+WHERE 
+OUTR.varchar_nokey IN (SELECT 
+INNR . varchar_nokey AS Y 
+FROM t2 AS INNR 
+WHERE
+INNR . datetime_key >= INNR . time_key OR 
+INNR . pk = INNR . int_nokey  
+) 
+AND OUTR . varchar_nokey <= 'w' 
+HAVING X > '2012-12-12';
+X
+drop table t1, t2;

=== modified file 'mysql-test/r/subselect_sj2_jcl6.result'
--- a/mysql-test/r/subselect_sj2_jcl6.result	2008-12-22 19:03:25 +0000
+++ b/mysql-test/r/subselect_sj2_jcl6.result	2009-01-14 10:29:36 +0000
@@ -306,7 +306,7 @@ t2.Population > 100000);
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	PRIMARY	t1	range	Population,Country	Population	4	NULL	1	Using index condition; Using MRR
 1	PRIMARY	t3	eq_ref	PRIMARY,Percentage	PRIMARY	33	test.t1.Country,const	1	Using index condition; Using where; Using join buffer
-1	PRIMARY	t2	eq_ref	PRIMARY,Population	PRIMARY	3	test.t3.Country	1	Using where; Using join buffer
+1	PRIMARY	t2	eq_ref	PRIMARY,Population	PRIMARY	3	test.t3.Country	1	Using index condition(BKA); Using where; Using join buffer
 DROP TABLE t1,t2,t3;
 CREATE TABLE t1 (
 Code char(3) NOT NULL DEFAULT '',

=== modified file 'mysql-test/r/subselect_sj_jcl6.result'
--- a/mysql-test/r/subselect_sj_jcl6.result	2008-12-22 19:03:25 +0000
+++ b/mysql-test/r/subselect_sj_jcl6.result	2009-01-28 19:24:54 +0000
@@ -240,7 +240,7 @@ select @@optimizer_switch;
 @@optimizer_switch
 no_materialization,no_loosescan
 set optimizer_switch='';
-drop table t0, t1;
+drop table t0, t1, t2;
 drop table t10, t11, t12;
 
 Bug#37899: Wrongly checked optimization prerequisite caused failed
@@ -265,6 +265,72 @@ pk	varchar_nokey
 2	j
 3	aew
 drop table t1;
+#
+# BUG#41842: Semi-join materialization strategy crashes when the upper query has HAVING
+#
+CREATE TABLE t1 (
+pk int(11) NOT NULL AUTO_INCREMENT,
+int_nokey int(11) NOT NULL,
+time_key time NOT NULL,
+datetime_key datetime NOT NULL,
+datetime_nokey datetime NOT NULL,
+varchar_key varchar(1) NOT NULL,
+varchar_nokey varchar(1) NOT NULL,
+PRIMARY KEY (pk),
+KEY time_key (time_key),
+KEY datetime_key (datetime_key),
+KEY varchar_key (varchar_key)
+);
+INSERT INTO t1 VALUES 
+(1,0, '00:16:10','2008-09-03 14:25:40','2008-09-03 14:25:40','h','h'),
+(2,7, '00:00:00','2001-01-13 00:00:00','2001-01-13 00:00:00','',''),
+(3,0, '00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','x','x'),
+(4,2, '16:29:24','2000-10-16 01:39:08','2000-10-16 01:39:08','w','w'),
+(5,1, '09:23:32','0000-00-00 00:00:00','0000-00-00 00:00:00','p','p'),
+(6,3, '00:00:00','2007-12-02 00:00:00','2007-12-02 00:00:00','o','o'),
+(7,3, '00:00:00','2008-09-11 00:00:00','2008-09-11 00:00:00','',''),
+(8,0, '13:59:04','0000-00-00 00:00:00','0000-00-00 00:00:00','s','s'),
+(9,7, '09:01:06','0000-00-00 00:00:00','0000-00-00 00:00:00','d','d'),
+(10,5,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','n','n'),
+(11,0,'21:06:46','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),
+(12,2,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','',''),
+(13,6,'14:45:34','2003-07-28 02:34:08','2003-07-28 02:34:08','w','w'),
+(14,1,'15:04:12','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),
+(15,0,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','x','x'),
+(16,0,'15:55:23','2004-03-17 00:32:27','2004-03-17 00:32:27','p','p'),
+(17,1,'16:30:00','2004-12-27 19:20:00','2004-12-27 19:20:00','d','d'),
+(18,0,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','h','h'),
+(19,0,'14:13:26','2008-11-09 05:53:48','2008-11-09 05:53:48','o','o'),
+(20,0,'00:00:00','2009-10-11 06:58:04','2009-10-11 06:58:04','k','k');
+CREATE TABLE t2 (
+pk int(11) NOT NULL AUTO_INCREMENT,
+int_nokey int(11) NOT NULL,
+time_key time NOT NULL,
+datetime_key datetime NOT NULL,
+datetime_nokey datetime NOT NULL,
+varchar_key varchar(1) NOT NULL,
+varchar_nokey varchar(1) NOT NULL,
+PRIMARY KEY (pk),
+KEY time_key (time_key),
+KEY datetime_key (datetime_key),
+KEY varchar_key (varchar_key)
+);
+INSERT INTO t2 VALUES 
+(10,0,'19:39:13','0000-00-00 00:00:00','0000-00-00 00:00:00','g','g'),
+(11,8,'03:43:53','0000-00-00 00:00:00','0000-00-00 00:00:00','b','b');
+SELECT OUTR.datetime_nokey AS X FROM t1 AS OUTR 
+WHERE 
+OUTR.varchar_nokey IN (SELECT 
+INNR . varchar_nokey AS Y 
+FROM t2 AS INNR 
+WHERE
+INNR . datetime_key >= INNR . time_key OR 
+INNR . pk = INNR . int_nokey  
+) 
+AND OUTR . varchar_nokey <= 'w' 
+HAVING X > '2012-12-12';
+X
+drop table t1, t2;
 set join_cache_level=default;
 show variables like 'join_cache_level';
 Variable_name	Value

=== modified file 'mysql-test/r/view.result'
--- a/mysql-test/r/view.result	2008-12-24 10:48:24 +0000
+++ b/mysql-test/r/view.result	2009-01-29 21:17:59 +0000
@@ -2343,11 +2343,11 @@ CREATE VIEW v2 AS SELECT t3.* FROM t1,t3
 EXPLAIN SELECT t1.* FROM t1 JOIN t2 WHERE t1.a=t2.a AND t1.b=t2.b AND t1.a=1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	ref	a	a	5	const	1	Using index
-1	SIMPLE	t2	ref	a	a	10	const,test.t1.b	2	Using index
+1	SIMPLE	t2	ref	a	a	10	const,test.t1.b	1	Using index
 EXPLAIN SELECT * FROM v1 WHERE a=1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	ref	a	a	5	const	1	Using index
-1	SIMPLE	t2	ref	a	a	10	const,test.t1.b	2	Using index
+1	SIMPLE	t2	ref	a	a	10	const,test.t1.b	1	Using index
 EXPLAIN SELECT * FROM v2 WHERE a=1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	t1	ref	a	a	5	const	1	Using index

=== modified file 'mysql-test/std_data/client-cert.pem'
--- a/mysql-test/std_data/client-cert.pem	2009-01-28 15:00:28 +0000
+++ b/mysql-test/std_data/client-cert.pem	2009-02-03 09:16:53 +0000
@@ -7,7 +7,7 @@ Certificate:
         Validity
             Not Before: Jan 28 11:04:39 2009 GMT
             Not After : Jan 28 11:04:39 2010 GMT
-        Subject: C=SE, ST=Uppsala, O=MySQL AB/emailAddress=abstract.mysql.developer@stripped
+        Subject: C=SE, ST=Uppsala, O=MySQL AB/emailAddress=abstract.mysql.developer@stripped
         Subject Public Key Info:
             Public Key Algorithm: rsaEncryption
             RSA Public Key: (512 bit)

=== modified file 'mysql-test/std_data/server-cert.pem'
--- a/mysql-test/std_data/server-cert.pem	2009-01-28 15:00:28 +0000
+++ b/mysql-test/std_data/server-cert.pem	2009-02-03 09:16:53 +0000
@@ -7,7 +7,7 @@ Certificate:
         Validity
             Not Before: Jan 28 10:55:13 2009 GMT
             Not After : Jan 28 10:55:13 2010 GMT
-        Subject: C=SE, ST=Uppsala, O=MySQL AB, CN=localhost/emailAddress=abstract.mysql.developer@stripped
+        Subject: C=SE, ST=Uppsala, O=MySQL AB, CN=localhost/emailAddress=abstract.mysql.developer@stripped
         Subject Public Key Info:
             Public Key Algorithm: rsaEncryption
             RSA Public Key: (512 bit)

=== added file 'mysql-test/suite/backup/r/backup_datatypes.result'
--- a/mysql-test/suite/backup/r/backup_datatypes.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/r/backup_datatypes.result	2009-02-01 13:26:18 +0000
@@ -0,0 +1,232 @@
+
+starting the test for backup
+
+DROP DATABASE IF EXISTS `¥ü`;
+SET NAMES utf8;
+CREATE DATABASE `¥ü`;
+USE `¥ü`;
+Create table with all datatypes and load with data.
+CREATE TABLE `§Æ`(
+rint INT,
+tint TINYINT,
+sint SMALLINT,
+bint BIGINT,
+mint MEDIUMINT,
+name CHAR(100),
+city  VARCHAR(100),
+fl FLOAT(7,4),
+pers DECIMAL(8,2),
+sal DOUBLE,
+colours SET('red','blue','yellow'),
+continent ENUM('Asia', 'Europe','Africa','Antartica'),
+ts TIMESTAMP DEFAULT 0,
+dt DATETIME NOT NULL,
+dob DATE,
+time TIME,
+y YEAR
+);
+creating table with blob and text columns
+CREATE TABLE `§Æ2`(
+region TEXT,
+summary LONGTEXT,
+data BLOB,
+details MEDIUMBLOB,
+queries TINYTEXT,
+query2 TINYBLOB,
+extract LONGBLOB,
+paras MEDIUMTEXT
+);
+CREATE TABLE `§¶œ`(b1 BINARY(3), b2 VARBINARY(2),bitvalue BIT(64));
+INSERT INTO `§¶1111111111111111111111111111111'), (0x6120,0x4100,b'101010101010101'), (0x612020, 0x4120,b'000000001');
+SELECT HEX(b1), HEX(b2), HEX(bitvalue) FROM `§¶œ`;
+HEX(b1)	HEX(b2)	HEX(bitvalue)
+610000	2130	FFFFFFFFFFFFFFFF
+612000	4100	5555
+612020	4120	1
+INSERT INTO `§Æ` VALUES
+(785,127,7288,278829899,3777,'testing1','sweden','678.299',200.23,829899.909,
+'red','Asia','2008-06-01 16:23:30','98/12/31 11*30*45','1984-09-08','7:05','1984');
+INSERT INTO `§Æ2` VALUES
+('xxxxxxxx','Testofonline backup','aaaaaaaaaa','bbbbbbbbbbb','hhhhhhhhhhh',
+'kkkkkkkkkkkkk','mmmmmmmmmmmm','onlinebackup1');
+UPDATE `§Æ2` SET extract=repeat('z',100);
+SELECT * FROM `§Æ`;;
+rint	785
+tint	127
+sint	7288
+bint	278829899
+mint	3777
+name	testing1
+city	sweden
+fl	678.2990
+pers	200.23
+sal	829899.909
+colours	red
+continent	Asia
+ts	2008-06-01 16:23:30
+dt	1998-12-31 11:30:45
+dob	1984-09-08
+time	07:05:00
+y	1984
+SELECT * FROM `§Æ2`;
+region	summary	data	details	queries	query2	extract	paras
+xxxxxxxx	Testofonline backup	aaaaaaaaaa	bbbbbbbbbbb	hhhhhhhhhhh	kkkkkkkkkkkkk	zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz	onlinebackup1
+DESCRIBE `§Æ`;
+Field	Type	Null	Key	Default	Extra
+rint	int(11)	YES		NULL	
+tint	tinyint(4)	YES		NULL	
+sint	smallint(6)	YES		NULL	
+bint	bigint(20)	YES		NULL	
+mint	mediumint(9)	YES		NULL	
+name	char(100)	YES		NULL	
+city	varchar(100)	YES		NULL	
+fl	float(7,4)	YES		NULL	
+pers	decimal(8,2)	YES		NULL	
+sal	double	YES		NULL	
+colours	set('red','blue','yellow')	YES		NULL	
+continent	enum('Asia','Europe','Africa','Antartica')	YES		NULL	
+ts	timestamp	NO		0000-00-00 00:00:00	
+dt	datetime	NO		NULL	
+dob	date	YES		NULL	
+time	time	YES		NULL	
+y	year(4)	YES		NULL	
+DESCRIBE `§Æ2`;
+Field	Type	Null	Key	Default	Extra
+region	text	YES		NULL	
+summary	longtext	YES		NULL	
+data	blob	YES		NULL	
+details	mediumblob	YES		NULL	
+queries	tinytext	YES		NULL	
+query2	tinyblob	YES		NULL	
+extract	longblob	YES		NULL	
+paras	mediumtext	YES		NULL	
+DESCRIBE `§¶œ`;
+Field	Type	Null	Key	Default	Extra
+b1	binary(3)	YES		NULL	
+b2	varbinary(2)	YES		NULL	
+bitvalue	bit(64)	YES		NULL	
+** Backup data **
+
+BACKUP DATABASE `¥ü` to 'bup_datatype.bak';
+backup_id
+#
+** dropping  database**
+DROP DATABASE `¥ü`;
+**Restore**
+RESTORE FROM 'bup_datatype.bak' OVERWRITE;
+backup_id
+#
+SHOW DATABASES;
+Database
+information_schema
+¥ü
+mtr
+mysql
+test
+** checking the character set **
+SELECT @@character_set_client;
+@@character_set_client
+utf8
+SELECT @@character_set_results;
+@@character_set_results
+utf8
+SELECT @@character_set_connection;
+@@character_set_connection
+utf8
+USE `¥+rint	int(11)	YES		NULL	
+tint	tinyint(4)	YES		NULL	
+sint	smallint(6)	YES		NULL	
+bint	bigint(20)	YES		NULL	
+mint	mediumint(9)	YES		NULL	
+name	char(100)	YES		NULL	
+city	varchar(100)	YES		NULL	
+fl	float(7,4)	YES		NULL	
+pers	decimal(8,2)	YES		NULL	
+sal	double	YES		NULL	
+colours	set('red','blue','yellow')	YES		NULL	
+continent	enum('Asia','Europe','Africa','Antartica')	YES		NULL	
+ts	timestamp	NO		0000-00-00 00:00:00	
+dt	datetime	NO		NULL	
+dob	date	YES		NULL	
+time	time	YES		NULL	
+y	year(4)	YES		NULL	
+DESCRIBE `§Æ2`;
+Field	Type	Null	Key	Default	Extra
+region	text	YES		NULL	
+summary	longtext	YES		NULL	
+data	blob	YES		NULL	
+details	mediumblob	YES		NULL	
+queries	tinytext	YES		NULL	
+query2	tinyblob	YES		NULL	
+extract	longblob	YES		NULL	
+paras	mediumtext	YES		NULL	
+DESCRIBE `§¶œ`;
+Field	Type	Null	Key	Default	Extra
+b1	binary(3)	YES		NULL	
+b2	varbinary(2)	YES		NULL	
+bitvalue	bit(64)	YES		NULL	
+SELECT * FROM `§Æ`;;
+rint	785
+tint	127
+sint	7288
+bint	278829899
+mint	3777
+name	testing1
+city	sweden
+fl	678.2990
+pers	200.23
+sal	829899.909
+colours	red
+continent	Asia
+ts	2008-06-01 16:23:30
+dt	1998-12-31 11:30:45
+dob	1984-09-08
+time	07:05:00
+y	1984
+SELECT * FROM `§Æ2`;
+region	summary	data	details	queries	query2	extract	paras
+xxxxxxxx	Testofonline backup	aaaaaaaaaa	bbbbbbbbbbb	hhhhhhhhhhh	kkkkkkkkkkkkk	zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz	onlinebackup1
+SELECT HEX(b1), HEX(b2),HEX(bitvalue) FROM `§¶œ`;
+HEX(b1)	HEX(b2)	HEX(bitvalue)
+610000	2130	FFFFFFFFFFFFFFFF
+612000	4100	5555
+612020	4120	1
+INSERT INTO `§¶œ` VALUES(0x7120,0x41,b'1010101010101010101010101010101010101010101010101010101010101010'), (0x5122, 0x6120,b'1000000000000000000000000000000000000000000000000000000000000000');
+SELECT HEX(b1), HEX(b2), HEX(bitvalue) FROM `§¶œ`;
+HEX(b1)	HEX(b2)	HEX(bitvalue)
+610000	2130	FFFFFFFFFFFFFFFF
+612000	4100	5555
+612020	4120	1
+712000	41	AAAAAAAAAAAAAAAA
+512200	6120	8000000000000000
+Perform restore again by changing the character set
+SET NAMES latin1;
+**Restore**
+RESTORE FROM 'bup_datatype.bak' OVERWRITE;
+backup_id
+#
+SHOW DATABASES;
+Database
+information_schema
+��
+mysql
+test
+** checking client character set **
+SELECT @@character_set_client;
+@@character_set_client
+latin1
+SELECT @@character_set_results;
+@@character_set_results
+latin1
+SELECT @@character_set_connection;
+@@character_set_connection
+latin1
+SET NAMES utf8;
+
+***  DROP `¥ü` DATABASE ****
+
+DROP DATABASE `¥ü`;

=== modified file 'mysql-test/suite/backup/r/backup_errors.result'
--- a/mysql-test/suite/backup/r/backup_errors.result	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/backup/r/backup_errors.result	2009-02-01 13:26:18 +0000
@@ -1,46 +1,120 @@
 DROP DATABASE IF EXISTS adb;
 DROP DATABASE IF EXISTS bdb;
+# non-existent backup archive
 RESTORE FROM 'test.bak';
-ERROR HY000: File 'MYSQLTEST_VARDIR/master-data/test.bak' not found (Errcode: #)
+ERROR HY000: File 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' not found (Errcode: #)
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "RESTORE FROM 'test%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	restore	test.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Can't read backup location 'test.bak'
+error
 CREATE DATABASE adb;
 CREATE DATABASE bdb;
 CREATE TABLE bdb.t1(a int) ENGINE=MEMORY;
+# invalid location
 BACKUP DATABASE adb TO '';
 ERROR HY000: Malformed file path ''
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Malformed file path ''
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE adb TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Malformed file path ''
+error
+# don't overwrite existing files
 BACKUP DATABASE adb TO "bdb/t1.frm";
-ERROR HY000: Can't create/write to file 'MYSQLTEST_VARDIR/master-data/bdb/t1.frm' (Errcode: #)
+ERROR HY000: Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/bdb/t1.frm' (Errcode: #)
 SHOW WARNINGS;
 Level	Code	Message
-Error	#	Can't create/write to file 'MYSQLTEST_VARDIR/master-data/bdb/t1.frm' (Errcode: #)
+Error	#	Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/bdb/t1.frm' (Errcode: #)
 Error	#	Can't write to backup location 'bdb/t1.frm'
+Warning	#	Operation aborted
 BACKUP DATABASE adb TO "test.bak";
 backup_id
 #
 SHOW WARNINGS;
 Level	Code	Message
+# don't overwrite existing backup image
 BACKUP DATABASE adb TO "test.bak";
-ERROR HY000: Can't create/write to file 'MYSQLTEST_VARDIR/master-data/test.bak' (Errcode: #)
+ERROR HY000: Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' (Errcode: #)
 SHOW WARNINGS;
 Level	Code	Message
-Error	#	Can't create/write to file 'MYSQLTEST_VARDIR/master-data/test.bak' (Errcode: #)
+Error	#	Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' (Errcode: #)
 Error	#	Can't write to backup location 'test.bak'
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE adb TO%";
+verify backup history and progress logs for backup_state.
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	test.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Can't write to backup location 'test.bak'
+error
+# non-existent database
 DROP DATABASE IF EXISTS foo;
 DROP DATABASE IF EXISTS bar;
 BACKUP DATABASE foo TO 'test.bak';
 ERROR 42000: Unknown database 'foo'
 SHOW WARNINGS;
 Level	Code	Message
-Error	1049	Unknown database 'foo'
+Error	#	Unknown database 'foo'
+Warning	#	Operation aborted
 BACKUP DATABASE test,foo,bdb,bar TO 'test.bak';
 ERROR 42000: Unknown database 'foo,bar'
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Unknown database 'foo,bar'
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE test,foo,bdb,bar TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation,backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	test.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Unknown database 'foo,bar'
+error
+# repeated database
 BACKUP DATABASE foo,test,bar,foo TO 'test.bak';
 ERROR 42000: Not unique database: 'foo'
+SHOW WARNINGS;
+Level	Code	Message
+Error	#	Not unique database: 'foo'
 use adb;
 create table t1 (a int);
 create procedure p1() backup database test to 'test.bak';
@@ -71,36 +145,117 @@ ERROR HY000: Database 'mysql' cannot be
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Database 'mysql' cannot be included in a backup
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	t.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Database 'mysql' cannot be included in a backup
+error
 Backup of mysql, information_schema scenario 2
 BACKUP DATABASE information_schema TO 't.bak';
 ERROR HY000: Database 'information_schema' cannot be included in a backup
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Database 'information_schema' cannot be included in a backup
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE information_schema TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	t.bak
+SELECT  notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Database 'information_schema' cannot be included in a backup
+error
 Backup of mysql, information_schema scenario 3
 BACKUP DATABASE mysql, information_schema TO 't.bak';
 ERROR HY000: Database 'mysql' cannot be included in a backup
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Database 'mysql' cannot be included in a backup
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql, information_schema TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	t.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Database 'mysql' cannot be included in a backup
+error
 Backup of mysql, information_schema scenario 4
 BACKUP DATABASE mysql, test TO 't.bak';
 ERROR HY000: Database 'mysql' cannot be included in a backup
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Database 'mysql' cannot be included in a backup
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql, test TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	t.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Database 'mysql' cannot be included in a backup
+error
 Backup of mysql, information_schema scenario 5
 BACKUP DATABASE information_schema, test TO 't.bak';
 ERROR HY000: Database 'information_schema' cannot be included in a backup
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Database 'information_schema' cannot be included in a backup
+Warning	#	Operation aborted
 Backup of mysql, information_schema scenario 6
 BACKUP DATABASE mysql, information_schema, test TO 't.bak';
 ERROR HY000: Database 'mysql' cannot be included in a backup
 SHOW WARNINGS;
 Level	Code	Message
 Error	#	Database 'mysql' cannot be included in a backup
+Warning	#	Operation aborted
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql, information_schema, test TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	t.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Database 'mysql' cannot be included in a backup
+error
 Making copies of progress tables.
 CREATE TABLE IF NOT EXISTS test.ob_copy LIKE mysql.backup_history;
 CREATE TABLE IF NOT EXISTS test.obp_copy LIKE mysql.backup_progress;
@@ -168,6 +323,21 @@ trigger metadata
 SET SESSION DEBUG='+d,backup_fail_add_trigger';
 BACKUP DATABASE db1 TO 'bup_db1.bak';
 ERROR HY000: Failed to obtain meta-data for trigger `db1`.`trg`
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE db1 TO%";
+verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+backup_state	operation	backup_file
+error	backup	bup_db1.bak
+SELECT notes FROM mysql.backup_progress
+WHERE backup_id=@bup_id;
+notes
+starting
+running
+Failed to obtain meta-data for trigger `db1`.`trg`
+error
 SET DEBUG_SYNC= 'reset';
 DROP DATABASE db1;
 SET SESSION DEBUG='';
@@ -219,4 +389,54 @@ Show that inserted value 2 is not there
 SELECT * FROM table1;
 text
 Inserted before
+#
+# Test error handling by backup code when injecting commit blocker error.
+#
+SET SESSION DEBUG='+d,backup_grl_fail';
+BACKUP DATABASE db1 TO 'overwrite1.bak';
+ERROR HY000: Backup failed to synchronize table images.
+SET SESSION DEBUG='-d';
+
+SET SESSION DEBUG='+d,backup_grl_block_commit_fail';
+BACKUP DATABASE db1 TO 'overwrite1.bak';
+ERROR HY000: Backup failed to synchronize table images.
+SET SESSION DEBUG='-d';
+#
+# Test that BACKUP and RESTORE clears warning stack
+#
+BACKUP DATABASE db1 TO 'test.bak';
+backup_id
+#
+# Generate warning - test.bak cannot be overwritten
+BACKUP DATABASE db1 TO 'test.bak';
+ERROR HY000: Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' (Errcode: #)
+SHOW WARNINGS;
+Level	Code	Message
+Error	#	Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' (Errcode: #)
+Error	#	Can't write to backup location 'test.bak'
+Warning	#	Operation aborted
+
+# Test that there are no warnings after successful BACKUP
+BACKUP DATABASE db1 TO 'newtest.bak';
+backup_id
+#
+SHOW WARNINGS;
+Level	Code	Message
+
+# Generate warning - test.bak cannot be overwritten
+BACKUP DATABASE db1 TO 'test.bak';
+ERROR HY000: Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' (Errcode: #)
+SHOW WARNINGS;
+Level	Code	Message
+Error	#	Can't create/write to file 'MYSQLTEST_VARDIR/mysqld.1/data/test.bak' (Errcode: #)
+Error	#	Can't write to backup location 'test.bak'
+Warning	#	Operation aborted
+
+# Test that there are no warnings after successful RESTORE
+RESTORE FROM 'newtest.bak' OVERWRITE;
+backup_id
+#
+SHOW WARNINGS;
+Level	Code	Message
+
 DROP DATABASE db1;

=== modified file 'mysql-test/suite/backup/r/backup_logs.result'
--- a/mysql-test/suite/backup/r/backup_logs.result	2008-12-06 00:24:23 +0000
+++ b/mysql-test/suite/backup/r/backup_logs.result	2009-01-29 21:17:59 +0000
@@ -32,11 +32,78 @@ Now starting real tests
 
 DROP DATABASE IF EXISTS backup_logs;
 PURGE BACKUP LOGS;
+Check backup logs when log_backup_output is TABLE and FILE
+SET @@global.log_backup_output = 'TABLE,FILE';
+CREATE USER 'tom'@'localhost' IDENTIFIED BY 'abc';
+GRANT ALL ON *.* TO 'tom'@'localhost' WITH GRANT OPTION;
+SHOW GRANTS FOR 'tom'@'localhost';
+Grants for tom@localhost
+GRANT ALL PRIVILEGES ON *.* TO 'tom'@'localhost' IDENTIFIED BY PASSWORD '*0D3CED9BEC10A777AEC23CCC353A8C08A633045E' WITH GRANT OPTION
+SELECT user, host, password FROM mysql.user WHERE user='tom';
+user	host	password
+tom	localhost	*0D3CED9BEC10A777AEC23CCC353A8C08A633045E
+SELECT CURRENT_USER();
+CURRENT_USER()
+tom@localhost
 CREATE DATABASE backup_logs;
-con1: Create table and new users.
+Perform backup
+BACKUP DATABASE backup_logs TO 'backup_logs1.bak'
+Get last backup_id
+SELECT MAX(backup_id) INTO @backup_id_history FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE backup_logs TO%";
+Verify the result
+LET =`SELECT @backup_id_history = backup_id AS are_identical`
+
+Verification of backup_id from history table and command is:
+1
+We can notice that, if result is 1 then backup_id
+from backup_history log and from backup_command is same.
+con1: Activate sync points for the backup statement.
+SET DEBUG_SYNC= 'after_backup_log_init     SIGNAL started   WAIT_FOR do_run';
+SET DEBUG_SYNC= 'after_backup_start_backup SIGNAL running   WAIT_FOR finish';
+Perform backup database operation with database alone.
+BACKUP DATABASE backup_logs TO 'backup_logs1.bak';
+con default: Wait for the backup to be started.
+SET DEBUG_SYNC= 'now WAIT_FOR started';
+Let backup step to running state.
+SET DEBUG_SYNC= 'now SIGNAL do_run WAIT_FOR running';
+con default: Let backup finish.
+SET DEBUG_SYNC= 'now SIGNAL finish';
+con1: Finish backup command
+backup_id
+#
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE backup_logs TO%";
+SELECT operation,num_objects, username, command FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+operation	num_objects	username	command
+backup	0	tom	BACKUP DATABASE backup_logs TO 'backup_logs1.bak'
+SELECT CURRENT_USER();
+CURRENT_USER()
+root@localhost
+Perform Backup and verify the username as 'root' in backup history log
+BACKUP DATABASE backup_logs TO 'backup_logs1.bak';
+backup_id
+#
+Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE backup_logs TO%";
+SELECT operation,num_objects, username, command FROM mysql.backup_history
+WHERE backup_id=@bup_id;
+operation	num_objects	username	command
+backup	0	root	BACKUP DATABASE backup_logs TO 'backup_logs1.bak'
+
+From the above tables we can notice that num_objects shows '0' if only DB
+is included in backup image(BUG#39109)
+
+Include all objects in database(Databases, tables, procedures and
+functions, views, triggers and events) and perform backup operation.
+con1: Create tables
 CREATE TABLE backup_logs.t1 (a char(30)) ENGINE=MYISAM;
 CREATE TABLE backup_logs.t2 (a char(30)) ENGINE=INNODB;
 CREATE TABLE backup_logs.t3 (a char(30)) ENGINE=MEMORY;
+CREATE TABLE backup_logs.t4(id INT, name CHAR(20))ENGINE=BLACKHOLE;
 INSERT INTO backup_logs.t1 VALUES ("01 Test #1 - progress");
 INSERT INTO backup_logs.t1 VALUES ("02 Test #1 - progress");
 INSERT INTO backup_logs.t1 VALUES ("03 Test #1 - progress");
@@ -54,6 +121,35 @@ INSERT INTO backup_logs.t3 VALUES ("01 T
 INSERT INTO backup_logs.t3 VALUES ("02 Test #1 - progress");
 INSERT INTO backup_logs.t3 VALUES ("03 Test #1 - progress");
 INSERT INTO backup_logs.t3 VALUES ("04 Test #1 - progress");
+INSERT INTO backup_logs.t4 VALUES(1,'aa1'),(2,'aa2'),(3,'aa3');
+SELECT * FROM backup_logs.t4;
+id	name
+create all objects like views, procedures, functions, triggers
+and events.
+** create view **
+CREATE VIEW backup_logs.v1 AS SELECT * FROM backup_logs.t1;
+CREATE VIEW backup_logs.vv AS SELECT * FROM backup_logs.v1;
+** create triggers **
+CREATE TRIGGER backup_logs.trg AFTER INSERT ON backup_logs.t1 FOR EACH ROW
+BEGIN
+INSERT INTO backup_logs.t3 VALUES('Test objects count');
+END;||
+** create procedures **
+CREATE PROCEDURE backup_logs.p1()
+BEGIN
+SELECT * FROM backup_logs.t1;
+END;
+||
+** create functions **
+CREATE FUNCTION backup_logs.f1() RETURNS INTEGER
+BEGIN
+RETURN (SELECT COUNT(*) FROM backup_logs.t1);
+END;
+||
+** create event **
+CREATE EVENT backup_logs.e1 ON SCHEDULE EVERY 1 YEAR DO
+DELETE FROM objects.t4 WHERE id=10;
+
 Do backup of database
 con2: Activate sync points for the backup statement.
 SET DEBUG_SYNC= 'after_backup_log_init     SIGNAL started   WAIT_FOR do_run';
@@ -65,37 +161,47 @@ SET SESSION debug="+d,set_backup_id";
 con2: Send backup command. 
 con2: Backup id = 500.
 BACKUP DATABASE backup_logs to 'backup_logs_orig.bak';
+SET time_zone='+0:00';
+SELECT now() INTO @start_backup;
 con1: Wait for the backup to be started.
 SET DEBUG_SYNC= 'now WAIT_FOR started';
+
 con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
-notes
-starting
+backup progress tables will always show start_time, stop_time,total_bytes
+and progress as '0' for all phases of backup operation.
+BUG#39356 Backup progress table details aren't updated properly
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+WHERE backup_id = 500;
+total_bytes	progress	notes
+0	0	starting
 con1: Let backup step to running state.
 SET DEBUG_SYNC= 'now SIGNAL do_run WAIT_FOR phase1';
 con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
-notes
-starting
-running
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+WHERE backup_id = 500;
+total_bytes	progress	notes
+0	0	starting
+0	0	running
 con1: Let backup do the backup phase1.
 SET DEBUG_SYNC= 'now SIGNAL backup WAIT_FOR validated';
 con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
-notes
-starting
-running
-validity point
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+WHERE backup_id = 500;
+total_bytes	progress	notes
+0	0	starting
+0	0	running
+0	0	validity point
 con1: Let backup do the backup phase2.
 SET DEBUG_SYNC= 'now SIGNAL do_phase2 WAIT_FOR phase2';
 con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
-notes
-starting
-running
-validity point
-vp time
-running
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+WHERE backup_id = 500;
+total_bytes	progress	notes
+0	0	starting
+0	0	running
+0	0	validity point
+0	0	vp time
+0	0	running
 con1: Let backup finish.
 SET DEBUG_SYNC= 'now SIGNAL finish';
 con2: Finish backup command
@@ -104,6 +210,42 @@ backup_id
 FLUSH BACKUP LOGS;
 Turn off debugging session.
 SET SESSION debug="-d";
+SET time_zone='+0:00';
+SELECT now() INTO @stop_backup;
+We calculate the timedifference between backup start time and stop
+time. If this difference is '0', then backup start time and stop time
+are same.
+SELECT timediff(@stop_backup, @start_backup) > 5;
+timediff(@stop_backup, @start_backup) > 5
+0
+
+Now verify actual start time / stop time of backup and start time /
+stop time from backup_history table. If the both times are same, 
+the timediff will be '0'
+
+SELECT timediff(start_time, @start_backup) > 5 from mysql.backup_history
+WHERE backup_id=500;
+timediff(start_time, @start_backup) > 5
+0
+SELECT timediff(stop_time, @stop_backup) > 5 from mysql.backup_history
+WHERE backup_id=500;
+timediff(stop_time, @stop_backup) > 5
+0
+Now verify that start_time <= vp_time <= stop_time
+SELECT timediff(validity_point_time, start_time) >= 0, 
+timediff(stop_time, validity_point_time) >=0
+from mysql.backup_history WHERE backup_id=500;
+timediff(validity_point_time, start_time) >= 0	timediff(stop_time, validity_point_time) >=0
+1	1
+
+From backup_history log we will notice that "drivers" column will show
+Myisam, snapshot, default and no-data drivers
+"error_num" will be '0' as both backup and restore was successful
+"num_objects" count is always 5(as there are 5 tables in database).
+It does not list other objects from the backup image(BUG#39109)
+SHOW VARIABLES LIKE 'log_backup_output';
+Variable_name	Value
+log_backup_output	FILE,TABLE
 SELECT * FROM mysql.backup_history WHERE backup_id = 500;;
 backup_id	#
 process_id	#
@@ -112,18 +254,18 @@ binlog_file	#
 backup_state	complete
 operation	backup
 error_num	0
-num_objects	3
-total_bytes	3971
+num_objects	4
+total_bytes	5215
 validity_point_time	#
 start_time	#
 stop_time	#
 host_or_server_name	localhost
-username	root
+username	tom
 backup_file	#
 backup_file_path	#
 user_comment	
 command	BACKUP DATABASE backup_logs to 'backup_logs_orig.bak'
-drivers	MyISAM, Snapshot, Default
+drivers	MyISAM, Snapshot, Default, Nodata
 SELECT * FROM mysql.backup_progress WHERE backup_id = 500;
 backup_id	object	start_time	stop_time	total_bytes	progress	error_num	notes
 #	backup kernel	#	#	0	0	0	starting
@@ -132,33 +274,47 @@ backup_id	object	start_time	stop_time	to
 #	backup kernel	#	#	0	0	0	vp time
 #	backup kernel	#	#	0	0	0	running
 #	backup kernel	#	#	0	0	0	complete
+File sizes are not identical
+
+The actual backup file size and from backup_history logs are different
+because of bug#37980. Once this bug is fixed, both should show 
+same bytes.
 con2: Activate sync points for the backup statement.
 SET DEBUG_SYNC= 'after_backup_log_init      SIGNAL started WAIT_FOR do_run';
 SET DEBUG_SYNC= 'after_backup_start_restore SIGNAL running WAIT_FOR finish';
 con2: Send restore command.
 con2: Backup id = 501.
 RESTORE FROM 'backup_logs_orig.bak' OVERWRITE;
+SELECT now() INTO @start_restore;
 con1: Wait for the restore to be started.
 SET DEBUG_SYNC= 'now WAIT_FOR started';
 con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 501;
-notes
-starting
+SELECT total_bytes, progress,notes FROM mysql.backup_progress 
+WHERE backup_id = 501;
+total_bytes	progress	notes
+0	0	starting
 con1: Let restore step to running state.
 SET DEBUG_SYNC= 'now SIGNAL do_run WAIT_FOR running';
 con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 501;
-notes
-starting
-running
+SELECT total_bytes, progress,notes FROM mysql.backup_progress 
+WHERE backup_id = 501;
+total_bytes	progress	notes
+0	0	starting
+0	0	running
 con1: Let restore do its job and finish.
 SET DEBUG_SYNC= 'now SIGNAL finish';
 con2: Finish restore command
 backup_id
-501
+#
 FLUSH BACKUP LOGS;
 SET DEBUG_SYNC= 'now SIGNAL complete';
 SET DEBUG_SYNC= 'now WAIT_FOR complete';
+We calculate the time difference between restore start time and stop
+time. If this difference is '0', then restore start time and stop time
+are same.
+SELECT timediff(now(),@start_restore) > 5;
+timediff(now(),@start_restore) > 5
+0
 SELECT * FROM mysql.backup_history WHERE backup_id = 501;;
 backup_id	#
 process_id	#
@@ -167,18 +323,18 @@ binlog_file	#
 backup_state	complete
 operation	restore
 error_num	0
-num_objects	3
+num_objects	4
 total_bytes	1483
 validity_point_time	#
 start_time	#
 stop_time	#
 host_or_server_name	localhost
-username	root
+username	tom
 backup_file	#
 backup_file_path	#
 user_comment	
 command	RESTORE FROM 'backup_logs_orig.bak' OVERWRITE
-drivers	MyISAM, Snapshot, Default
+drivers	MyISAM, Snapshot, Default, Nodata
 SELECT * FROM mysql.backup_progress WHERE backup_id = 501;
 backup_id	object	start_time	stop_time	total_bytes	progress	error_num	notes
 #	backup kernel	#	#	0	0	0	starting

=== modified file 'mysql-test/suite/backup/r/backup_logs_purge.result'
--- a/mysql-test/suite/backup/r/backup_logs_purge.result	2008-11-17 09:57:51 +0000
+++ b/mysql-test/suite/backup/r/backup_logs_purge.result	2009-01-08 14:57:41 +0000
@@ -252,7 +252,7 @@ count(*)
 24
 SET SESSION debug="+d,set_backup_id";
 con2: Activate sync points for the backup statement.
-SET DEBUG_SYNC= 'before_backup_done SIGNAL ready WAIT_FOR proceed';
+SET DEBUG_SYNC= 'before_backup_completed SIGNAL ready WAIT_FOR proceed';
 BACKUP DATABASE backup_logs to 'backup5.bak';
 con1: Wait for the backup to be ready.
 SET DEBUG_SYNC= 'now WAIT_FOR ready';
@@ -305,7 +305,7 @@ SELECT count(*) FROM mysql.backup_progre
 count(*)
 13
 con2: Activate sync points for the backup statement.
-SET DEBUG_SYNC= 'before_restore_done SIGNAL ready WAIT_FOR proceed';
+SET DEBUG_SYNC= 'before_restore_completed SIGNAL ready WAIT_FOR proceed';
 RESTORE FROM 'backup5.bak' OVERWRITE;
 con1: Wait for the backup to be ready.
 SET DEBUG_SYNC= 'now WAIT_FOR ready';

=== added file 'mysql-test/suite/backup/r/backup_myisam.result'
--- a/mysql-test/suite/backup/r/backup_myisam.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/r/backup_myisam.result	2008-12-30 12:13:31 +0000
@@ -0,0 +1,41 @@
+USE test;
+DROP DATABASE IF EXISTS mysql_db1;
+#
+# Bug#40944 - Backup: crash after myisampack
+#
+CREATE DATABASE mysql_db1;
+CREATE TABLE mysql_db1.t1 (c1 VARCHAR(5), c2 int);
+CREATE INDEX i1 ON mysql_db1.t1 (c1, c2);
+INSERT INTO mysql_db1.t1 VALUES ('A',1);
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+FLUSH TABLE mysql_db1.t1;
+BACKUP DATABASE mysql_db1 to 'bup_myisam.bak';
+backup_id
+#
+RESTORE FROM 'bup_myisam.bak' OVERWRITE;
+backup_id
+#
+SELECT COUNT(*) FROM mysql_db1.t1 WHERE c2 < 5;
+COUNT(*)
+128
+DROP TABLE mysql_db1.t1;
+DROP DATABASE mysql_db1;
+
+#
+# Bug#38045 - Backup, MyISAM and file system encoding
+#
+CREATE DATABASE mysqltest;
+USE mysqltest;
+CREATE TABLE `äöüߣå` (id SERIAL) ENGINE=MyISAM;
+BACKUP DATABASE mysqltest TO 'bup_myisam.bak';
+backup_id
+#
+DROP TABLE `äöüߣå`;
+USE test;
+DROP DATABASE mysqltest;

=== added file 'mysql-test/suite/backup/r/backup_myisam_coverage.result'
--- a/mysql-test/suite/backup/r/backup_myisam_coverage.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/r/backup_myisam_coverage.result	2008-12-30 12:13:31 +0000
@@ -0,0 +1,27 @@
+USE test;
+DROP DATABASE IF EXISTS mysql_db1;
+CREATE DATABASE mysql_db1;
+CREATE TABLE mysql_db1.t1 (c1 VARCHAR(5), c2 int);
+CREATE INDEX i1 ON mysql_db1.t1 (c1, c2);
+INSERT INTO mysql_db1.t1 VALUES ('A',1);
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+FLUSH TABLE mysql_db1.t1;
+BACKUP DATABASE mysql_db1 to 'bup_myisam3.bak';
+backup_id
+#
+SET debug='d,Backup_restore_ctx_unlock_tables_alloc';
+RESTORE FROM 'bup_myisam3.bak' OVERWRITE;
+backup_id
+#
+SELECT COUNT(*) FROM mysql_db1.t1 WHERE c2 < 5;
+COUNT(*)
+128
+SET debug='';
+DROP TABLE mysql_db1.t1;
+DROP DATABASE mysql_db1;

=== renamed file 'mysql-test/suite/backup/r/backup_myisam1.result' => 'mysql-test/suite/backup/r/backup_myisam_extlocking.result'
--- a/mysql-test/suite/backup/r/backup_myisam1.result	2008-10-07 17:15:44 +0000
+++ b/mysql-test/suite/backup/r/backup_myisam_extlocking.result	2008-12-30 12:13:31 +0000
@@ -1,7 +1,9 @@
-drop database if exists mysqltest;
-create database mysqltest;
-use mysqltest;
+USE test;
+DROP DATABASE IF EXISTS mysqltest;
+CREATE DATABASE mysqltest;
+USE mysqltest;
 CREATE TABLE t1 (a int) engine=myisam;
 BACKUP DATABASE mysqltest TO 'test.ba';
 ERROR HY000: Got error -1 'online backup impossible with --external-locking' from MyISAM
+USE test;
 DROP DATABASE mysqltest;

=== renamed file 'mysql-test/suite/backup/r/backup_myisam2.result' => 'mysql-test/suite/backup/r/backup_myisam_sync.result'
--- a/mysql-test/suite/backup/r/backup_myisam2.result	2008-10-07 17:15:44 +0000
+++ b/mysql-test/suite/backup/r/backup_myisam_sync.result	2008-12-30 12:13:31 +0000
@@ -1,4 +1,5 @@
 SET DEBUG_SYNC= 'RESET';
+USE test;
 DROP DATABASE IF EXISTS mysqltest;
 CREATE DATABASE mysqltest;
 USE mysqltest;
@@ -7,7 +8,7 @@ CREATE TABLE t1 (c1 LONGTEXT) ENGINE=MyI
 connection backup: Start backup
 SET DEBUG_SYNC= 'before_backup_data_prepare SIGNAL bup_sync
                      WAIT_FOR bup_finish';
-BACKUP DATABASE mysqltest TO 'test.ba';
+BACKUP DATABASE mysqltest TO 'bup_myisam_sync.bak';
 
 connection default: Wait for BACKUP to reach its sync point
 SET DEBUG_SYNC= 'now WAIT_FOR bup_sync';
@@ -45,7 +46,7 @@ REPAIR TABLE t1 QUICK;
 Table	Op	Msg_type	Msg_text
 mysqltest.t1	repair	status	OK
 DROP DATABASE mysqltest;
-RESTORE FROM 'test.ba';
+RESTORE FROM 'bup_myisam_sync.bak';
 backup_id
 #
 SELECT LENGTH(c1) FROM t1;
@@ -58,15 +59,6 @@ mysqltest.t1	1728069308
 connection default: cleanup
 SET DEBUG_SYNC= 'RESET';
 
-#
-# Bug#38045 - Backup, MyISAM and file system encoding
-#
-CREATE TABLE `�ߣ�(id SERIAL) ENGINE=MyISAM;
-BACKUP DATABASE mysqltest TO 'test.ba';
-backup_id
-#
-DROP TABLE `�ߣ�
-
 # final cleanup
 USE test;
 DROP DATABASE mysqltest;

=== added file 'mysql-test/suite/backup/r/backup_stream_errors.result'
--- a/mysql-test/suite/backup/r/backup_stream_errors.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/r/backup_stream_errors.result	2009-01-20 11:36:32 +0000
@@ -0,0 +1,26 @@
+# Preparations
+DROP DATABASE IF EXISTS db;
+CREATE DATABASE db;
+USE db;
+# Create procedures whose definitions will be shorter than MIN_WRITE_SIZE 
+# (approx 700 bytes).
+CREATE PROCEDURE p1() SET @foo='12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+CREATE PROCEDURE p2() SET @foo='12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+CREATE PROCEDURE p3() SET @foo='12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+CREATE PROCEDURE p4() SET @foo='12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+CREATE PROCEDURE p5() SET @foo='12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+CREATE PROCEDURE p6() SET @foo='12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+# Now create procedure whose definition is longer than 1024 (MIN_WRITE_SIZE)
+# but such that in total we have no more than 8k of metadata. Use string of
+# length 1.5k.
+CREATE PROCEDURE q1() SET @foo='123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678';
+# Backup the database - the image should be corrupted without bug#39375 fix.
+BACKUP DATABASE db TO 'db.bkp';
+backup_id
+#
+# Try to restore - should fail if image is corrupted.
+RESTORE FROM 'db.bkp' OVERWRITE;
+backup_id
+#
+# Cleanup
+DROP DATABASE db;

=== modified file 'mysql-test/suite/backup/r/backup_vp_nontx.result'
--- a/mysql-test/suite/backup/r/backup_vp_nontx.result	2009-01-29 10:19:15 +0000
+++ b/mysql-test/suite/backup/r/backup_vp_nontx.result	2009-01-29 21:17:59 +0000
@@ -65,6 +65,7 @@ con_ntx1: Backup has now released CB. Pe
 SET DEBUG_SYNC= 'now WAIT_FOR commit_unblocked';
 INSERT INTO bup_vp.t1 VALUES ("ntx1: Should NOT be in backup");
 SET DEBUG_SYNC= 'now SIGNAL finish_bup';
+con_ntx2: Reap insert
 
 con_ntx1: Reap backup
 backup_id

=== modified file 'mysql-test/suite/backup/t/backup.test'
--- a/mysql-test/suite/backup/t/backup.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/backup/t/backup.test	2009-02-01 13:26:18 +0000
@@ -236,7 +236,7 @@ FROM mysql.backup_history
 WHERE backup_id = @bid; 
 
 DROP DATABASE db1;
---remove_file $MYSQLTEST_VARDIR/master-data/db1.bkp
+--remove_file $MYSQLD_DATADIR/db1.bkp
 
 
 #

=== added file 'mysql-test/suite/backup/t/backup_datatypes.test'
--- a/mysql-test/suite/backup/t/backup_datatypes.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/t/backup_datatypes.test	2009-02-01 13:26:18 +0000
@@ -0,0 +1,144 @@
+####################################################################
+# Author: Hema
+# Date: 2008-03-11
+# Purpose: To test the backup/restore of all datatypes using accented letters.
+# We will ensure that backup stores identifiers properly in utf8 format and
+# is retreived during restore without changing the client character set.
+#########################################################################
+
+--source include/not_embedded.inc
+--source include/have_debug.inc
+
+connect (backup,localhost,root,,);
+connect (breakpoints,localhost,root,,);
+
+##############################################################
+--echo
+--echo starting the test for backup
+--echo
+##############################################################
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--error 0,1
+--remove_file $MYSQLD_DATADIR/bup_datatype.bak
+
+#Create Database and tables with different datatypes for this test.
+
+--disable_warnings
+DROP DATABASE IF EXISTS `¥ü`;
+--enable_warnings
+
+#SET NAMES latin1;
+SET NAMES utf8;
+CREATE DATABASE `¥ü`;
+USE `¥ü`;
+
+--echo Create table with all datatypes and load with data.
+
+CREATE TABLE `§Æ`(
+rint INT,
+tint TINYINT,
+sint SMALLINT,
+bint BIGINT,
+mint MEDIUMINT,
+name CHAR(100),
+city  VARCHAR(100),
+fl FLOAT(7,4),
+pers DECIMAL(8,2),
+sal DOUBLE,
+colours SET('red','blue','yellow'),
+continent ENUM('Asia', 'Europe','Africa','Antartica'),
+ts TIMESTAMP DEFAULT 0,
+dt DATETIME NOT NULL,
+dob DATE,
+time TIME,
+y YEAR
+);
+
+--echo creating table with blob and text columns
+
+CREATE TABLE `§Æ2`(
+region TEXT,
+summary LONGTEXT,
+data BLOB,
+details MEDIUMBLOB,
+queries TINYTEXT,
+query2 TINYBLOB,
+extract LONGBLOB,
+paras MEDIUMTEXT
+);
+
+CREATE TABLE `§¶œ`(b1 BINARY(3), b2 VARBINARY(2),bitvalue BIT(64));
+INSERT INTO `§¶œ` VALUES(0x61,0x2130,b'1111111111111111111111111111111111111111111111111111111111111111'), (0x6120,0x4100,b'101010101010101'), (0x612020, 0x4120,b'000000001');
+SELECT HEX(b1), HEX(b2), HEX(bitvalue) FROM `§¶œ`;
+
+INSERT INTO `§Æ` VALUES
+(785,127,7288,278829899,3777,'testing1','sweden','678.299',200.23,829899.909,
+'red','Asia','2008-06-01 16:23:30','98/12/31 11*30*45','1984-09-08','7:05','1984');
+
+INSERT INTO `§Æ2` VALUES
+('xxxxxxxx','Testofonline backup','aaaaaaaaaa','bbbbbbbbbbb','hhhhhhhhhhh',
+'kkkkkkkkkkkkk','mmmmmmmmmmmm','onlinebackup1');
+
+# Bug #37212  Restore crashes if table has longblob of size 1MB
+UPDATE `§ * FROM `§Æ`;
+SELECT * FROM `§Æ2`;
+
+DESCRIBE `§Æ`;
+DESCRIBE `§Æ2`;
+DESCRIBE `§¶œ`;
+
+--echo ** Backup data **
+--echo
+--replace_column 1 #
+BACKUP DATABASE `¥ü` to 'bup_datatype.bak';
+
+--echo ** dropping  database**
+DROP DATABASE `¥ü`;
+
+--echo **Restore**
+--replace_column 1 #
+RESTORE FROM 'bup_datatype.bak' OVERWRITE;
+
+SHOW DATABASES;
+--echo ** checking the character set **
+SELECT @@character_set_client;
+SELECT @@character_set_results;
+SELECT @@character_set_connection;
+USE `¥ü`;
+#show data and table columns
+DESCRIBE `§Æ`;
+DESCRIBE `§Æ2`;
+DESCRIBE `§¶œ`;
+
+--query_vertical SELECT * FROM `§Æ`;
+SELECT * FROM `§Æ2`;
+SELECT HEX(b1), HEX(b2),HEX(bitvalue) FROM `§¶œ`;
+
+INSERT INTO `§¶œ` VALUES(0x7120,0x41,b'1010101010101010101010101010101010101010101010101010101010101010'), (0x5122, 0x6120,b'1000000000000000000000000000000000000000000000000000000000000000');
+SELECT HEX(b1), HEX(b2), HEX(bitvalue) FROM `§¶œ`;
+
+--echo Perform restore again by changing the character set
+SET NAMES latin1;
+
+--echo **Restore**
+--replace_column 1 #
+RESTORE FROM 'bup_datatype.bak' OVERWRITE;
+
+SHOW DATABASES;
+
+--echo ** checking client character set **
+SELECT @@character_set_client;
+SELECT @@character_set_results;
+SELECT @@character_set_connection;
+SET NAMES utf8;
+# Test cleanup section
+--echo
+--echo ***  DROP `¥ü` DATABASE ****
+--echo
+
+DROP DATABASE `¥ü`;
+
+--remove_file $MYSQLD_DATADIR/bup_datatype.bak
+

=== modified file 'mysql-test/suite/backup/t/backup_errors.test'
--- a/mysql-test/suite/backup/t/backup_errors.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/backup/t/backup_errors.test	2009-02-01 13:26:18 +0000
@@ -3,9 +3,7 @@
 --source include/have_debug_sync.inc
 
 # Check that BACKUP/RESTORE commands correctly report errors 
-#
-# TODO: When we know how to do that, check that the backup progress table
-# contains appropriate rows when errors have been detected.
+# and are updated in backup history and progress logs
 
 disable_query_log;
 call mtr.add_suppression("Backup:");
@@ -24,23 +22,43 @@ let $MYSQLD_DATADIR= `select @@datadir`;
 --remove_file $MYSQLD_DATADIR/test.bak
 --enable_warnings
 
-# non-existent backup archive
+--echo # non-existent backup archive
 --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
 --replace_regex /Errcode: [0-9]+/Errcode: #/
 --error 29
 RESTORE FROM 'test.bak';
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "RESTORE FROM 'test%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 CREATE DATABASE adb;
 CREATE DATABASE bdb;
 CREATE TABLE bdb.t1(a int) ENGINE=MEMORY;
 
-# invalid location
+--echo # invalid location
 --error ER_BAD_PATH
 BACKUP DATABASE adb TO '';
 --replace_column 2 #
 SHOW WARNINGS;
 
-# don't overwrite existing files
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE adb TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
+--echo # don't overwrite existing files
 --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
 --replace_regex /Errcode: [0-9]+/Errcode: #/
 --error 1
@@ -52,10 +70,10 @@ SHOW WARNINGS;
 
 --replace_column 1 #
 BACKUP DATABASE adb TO "test.bak";
---replace_column 2 #
+# There should be no warnings after a successful backup
 SHOW WARNINGS;
 
-# don't overwrite existing backup image
+--echo # don't overwrite existing backup image
 --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
 --replace_regex /Errcode: [0-9]+/Errcode: #/
 --error 1
@@ -65,9 +83,19 @@ BACKUP DATABASE adb TO "test.bak";
 --replace_regex /Errcode: [0-9]+/Errcode: #/
 SHOW WARNINGS;
 
---remove_file $MYSQLD_DATADIR/test.bak
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE adb TO%";
+
+--echo verify backup history and progress logs for backup_state.
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
 
-# non-existent database
+--remove_file $MYSQLD_DATADIR/test.bak     
+
+--echo # non-existent database
 --disable_warnings
 DROP DATABASE IF EXISTS foo;
 DROP DATABASE IF EXISTS bar;
@@ -75,15 +103,33 @@ DROP DATABASE IF EXISTS bar;
 
 -- error ER_BAD_DB_ERROR
 BACKUP DATABASE foo TO 'test.bak';
+--replace_column 2 #
 SHOW WARNINGS;
+
 -- error ER_BAD_DB_ERROR
 BACKUP DATABASE test,foo,bdb,bar TO 'test.bak';
 --replace_column 2 #
 SHOW WARNINGS;
 
-# repeated database
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE test,foo,bdb,bar TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation,backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
+#
+# Note: the following error is detected on parser level. Thus no 
+# backup_id is assigned and nothing is written to backup logs.
+#
+--echo # repeated database
 -- error ER_NONUNIQ_DB
 BACKUP DATABASE foo,test,bar,foo TO 'test.bak';
+--replace_column 2 #
+SHOW WARNINGS;
 
 # Test that BACKUP/RESTORE statements are disable inside stored routines,
 # triggers and events.
@@ -151,6 +197,16 @@ BACKUP DATABASE mysql TO 't.bak';
 --replace_column 2 #
 SHOW WARNINGS;
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 --error 0, 1
 --remove_file $MYSQLD_DATADIR/t.bak
 
@@ -161,6 +217,16 @@ BACKUP DATABASE information_schema TO 't
 --replace_column 2 #
 SHOW WARNINGS;
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE information_schema TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT  notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 --error 0, 1
 --remove_file $MYSQLD_DATADIR/t.bak
 
@@ -171,6 +237,16 @@ BACKUP DATABASE mysql, information_schem
 --replace_column 2 #
 SHOW WARNINGS;
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql, information_schema TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 --error 0, 1
 --remove_file $MYSQLD_DATADIR/t.bak
 
@@ -181,6 +257,16 @@ BACKUP DATABASE mysql, test TO 't.bak';
 --replace_column 2 #
 SHOW WARNINGS;
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql, test TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 --error 0, 1
 --remove_file $MYSQLD_DATADIR/t.bak
 
@@ -201,6 +287,16 @@ BACKUP DATABASE mysql, information_schem
 --replace_column 2 #
 SHOW WARNINGS;
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE mysql, information_schema, test TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 --error 0, 1
 --remove_file $MYSQLD_DATADIR/t.bak
 #
@@ -247,6 +343,7 @@ DROP TABLE mysql.backup_progress;
 --echo Backup the database;
 --error ER_BACKUP_PROGRESS_TABLES
 BACKUP DATABASE test_ob_error TO 'ob_err.bak';
+
 --error 0,1
 --remove_file $MYSQLD_DATADIR/ob_err.bak
 --replace_column 2 #
@@ -259,7 +356,6 @@ DROP TABLE test.obp_copy;
 
 DROP DATABASE test_ob_error;
 
-
 --echo
 --echo Bug#38624
 --echo Test that backup fails with error if database files are removed 
@@ -332,6 +428,16 @@ SET SESSION DEBUG='+d,backup_fail_add_tr
 --error ER_BACKUP_GET_META_TRIGGER
 BACKUP DATABASE db1 TO 'bup_db1.bak';
 
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE db1 TO%";
+
+--echo verify backup history and progress logs:
+SELECT backup_state,operation, backup_file FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+SELECT notes FROM mysql.backup_progress
+     WHERE backup_id=@bup_id;
+
 SET DEBUG_SYNC= 'reset';
 DROP DATABASE db1;
 
@@ -339,8 +445,7 @@ SET SESSION DEBUG='';
 --echo
 --echo Done testing for Bug#38624
 
-
---echo 
+--echo
 --echo Testing RESTORE ... OVERWRITE functionality
 --echo See bug#34579
 --echo
@@ -353,39 +458,101 @@ USE db1;
 CREATE TABLE table1 (text VARCHAR(20));
 INSERT INTO table1 VALUES ('Inserted before');
 
---echo 
+--echo
 --echo Backup database
 --replace_column 1 #
 BACKUP DATABASE db1 TO 'overwrite.bak';
 
---echo 
+--echo
 --echo Insert more data and display
 INSERT INTO table1 VALUES ('Inserted after');
 SELECT * FROM table1;
 
---echo 
+--echo
 --echo Restore without OVERWRITE flag; will fail
 --error ER_RESTORE_DB_EXISTS
 RESTORE FROM 'overwrite.bak';
 
---echo 
+--echo
 --echo Restore with OVERWRITE flag; will succeed
 --replace_column 1 #
 RESTORE FROM 'overwrite.bak' OVERWRITE;
 
---echo 
+--echo
 --echo Show that inserted value 2 is not there
 SELECT * FROM table1;
 
 DROP DATABASE db1;
 
---echo 
+--echo
 --echo Restore after deleting db; will succeed
 --replace_column 1 #
 RESTORE FROM 'overwrite.bak';
-
---echo 
+--echo
 --echo Show that inserted value 2 is not there
 SELECT * FROM table1;
 
+--echo #
+--echo # Test error handling by backup code when injecting commit blocker error.
+--echo #
+
+SET SESSION DEBUG='+d,backup_grl_fail';
+--error ER_BACKUP_SYNCHRONIZE
+BACKUP DATABASE db1 TO 'overwrite1.bak';
+SET SESSION DEBUG='-d';
+
+--echo
+
+SET SESSION DEBUG='+d,backup_grl_block_commit_fail';
+--error ER_BACKUP_SYNCHRONIZE
+BACKUP DATABASE db1 TO 'overwrite1.bak';
+SET SESSION DEBUG='-d';
+
+--echo #
+--echo # Test that BACKUP and RESTORE clears warning stack
+--echo #
+
+--replace_column 1 #
+BACKUP DATABASE db1 TO 'test.bak';
+
+--echo # Generate warning - test.bak cannot be overwritten
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--replace_regex /Errcode: [0-9]+/Errcode: #/
+--error 1
+BACKUP DATABASE db1 TO 'test.bak';
+--replace_column 2 #
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--replace_regex /Errcode: [0-9]+/Errcode: #/
+SHOW WARNINGS;
+--echo
+
+--echo # Test that there are no warnings after successful BACKUP
+--replace_column 1 #
+BACKUP DATABASE db1 TO 'newtest.bak';
+--replace_column 2 #
+SHOW WARNINGS;
+--echo
+
+--echo # Generate warning - test.bak cannot be overwritten
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--replace_regex /Errcode: [0-9]+/Errcode: #/
+--error 1
+BACKUP DATABASE db1 TO 'test.bak';
+--replace_column 2 #
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--replace_regex /Errcode: [0-9]+/Errcode: #/
+SHOW WARNINGS;
+--echo
+
+--echo # Test that there are no warnings after successful RESTORE
+--replace_column 1 #
+RESTORE FROM 'newtest.bak' OVERWRITE;
+--replace_column 2 #
+SHOW WARNINGS;
+
+--remove_file $MYSQLD_DATADIR/test.bak
+--remove_file $MYSQLD_DATADIR/newtest.bak
+
+--echo
 DROP DATABASE db1;
+

=== modified file 'mysql-test/suite/backup/t/backup_logs.test'
--- a/mysql-test/suite/backup/t/backup_logs.test	2008-12-06 00:24:23 +0000
+++ b/mysql-test/suite/backup/t/backup_logs.test	2009-02-01 13:26:18 +0000
@@ -1,5 +1,5 @@
 #
-# This test includes tests for ensuring the backup progress tables
+# This test includes tests for ensuring the backup progress and history tables
 # are updated.
 #
 
@@ -8,6 +8,7 @@
 --source include/have_innodb.inc
 --source include/not_embedded.inc
 --source include/have_debug.inc
+--source include/blackhole.inc
 
 SET DEBUG_SYNC= 'RESET';
 
@@ -51,22 +52,120 @@ remove_file $MYSQLD_BACKUPDIR/backup_log
 
 PURGE BACKUP LOGS;
 
-connect (con1,localhost,root,,);
-connect (con2,localhost,root,,);
+--echo Check backup logs when log_backup_output is TABLE and FILE
+SET @@global.log_backup_output = 'TABLE,FILE';
+
+#Create users and assign privileges
+CREATE USER 'tom'@'localhost' IDENTIFIED BY 'abc';
+GRANT ALL ON *.* TO 'tom'@'localhost' WITH GRANT OPTION;
+
+#Checking the grant privileges
+SHOW GRANTS FOR 'tom'@'localhost';
+SELECT user, host, password FROM mysql.user WHERE user='tom';
+
+# Verify different username entry in backup_history logs.
+
+connect (con1,localhost,tom,abc);
+connect (con2,localhost,tom,abc);
+connect (con3,localhost,root,);
+
+connection con1;
+SELECT CURRENT_USER();
+CREATE DATABASE backup_logs;
+
+# Test 1: Verifying backupid
+
+--echo Perform backup
+--echo BACKUP DATABASE backup_logs TO 'backup_logs1.bak'
+
+Let $backup_id=`BACKUP DATABASE backup_logs TO 'backup_logs1.bak'`;
+
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @backup_id_history FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE backup_logs TO%";
+
+--echo Verify the result
+--echo LET $result=`SELECT @backup_id_history = backup_id AS are_identical`
+LET $result=`SELECT @backup_id_history = $backup_id AS are_identical`;
+
+--echo
+--echo Verification of backup_id from history table and command is:
+--echo $result
+
+--echo We can notice that, if result is 1 then backup_id
+--echo from backup_history log and from backup_command is same.
+--remove_file $MYSQLD_DATADIR/backup_logs1.bak
 
 #
-# Test 1 - Check output of backup.
+# Test 2 - Check output of backup and verify different columns in
+#          backup history and progress logs.
 #
 
+--echo con1: Activate sync points for the backup statement.
+SET DEBUG_SYNC= 'after_backup_log_init     SIGNAL started   WAIT_FOR do_run';
+SET DEBUG_SYNC= 'after_backup_start_backup SIGNAL running   WAIT_FOR finish';
+
+--echo Perform backup database operation with database alone.
+send BACKUP DATABASE backup_logs TO 'backup_logs1.bak';
+
+connection default;
+
+--echo con default: Wait for the backup to be started.
+SET DEBUG_SYNC= 'now WAIT_FOR started';
+--echo Let backup step to running state.
+SET DEBUG_SYNC= 'now SIGNAL do_run WAIT_FOR running';
+
+--echo con default: Let backup finish.
+SET DEBUG_SYNC= 'now SIGNAL finish';
+
 connection con1;
+--echo con1: Finish backup command
+--replace_column 1 #
+reap;
 
-CREATE DATABASE backup_logs;
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE backup_logs TO%";
+SELECT operation,num_objects, username, command FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+--remove_file $MYSQLD_DATADIR/backup_logs1.bak
+
+connection con3;
+SELECT CURRENT_USER();
+--echo Perform Backup and verify the username as 'root' in backup history log
+--replace_column 1 #
+BACKUP DATABASE backup_logs TO 'backup_logs1.bak';
+
+--echo Get last backup_id
+SELECT MAX(backup_id) INTO @bup_id FROM mysql.backup_history
+WHERE command LIKE "BACKUP DATABASE backup_logs TO%";
+SELECT operation,num_objects, username, command FROM mysql.backup_history
+     WHERE backup_id=@bup_id;
+--remove_file $MYSQLD_DATADIR/backup_logs1.bak
+
+--echo
+--echo From the above tables we can notice that num_objects shows '0' if only DB
+--echo is included in backup image(BUG#39109)
+--echo
+
+# BUG#39109: Mysql Online Backup table doesn't show correct num_object count
+# Once BUG#39109 is fixed, all the objects should be listed in the num_object
+# count of backup_history log
 
---echo con1: Create table and new users.
+--echo Include all objects in database(Databases, tables, procedures and
+--echo functions, views, triggers and events) and perform backup operation.
+
+connection con1;
+--echo con1: Create tables
+
+# Tables are created using Myisam(Native driver), Innodb(Consistent Snapshot
+# driver), Memory(Default driver) and Blackhole(no-data driver). Ensure that
+# drivers column indicates all types of drivers for backup database operation.
 
 CREATE TABLE backup_logs.t1 (a char(30)) ENGINE=MYISAM;
 CREATE TABLE backup_logs.t2 (a char(30)) ENGINE=INNODB;
 CREATE TABLE backup_logs.t3 (a char(30)) ENGINE=MEMORY;
+CREATE TABLE backup_logs.t4(id INT, name CHAR(20))ENGINE=BLACKHOLE;
 
 INSERT INTO backup_logs.t1 VALUES ("01 Test #1 - progress"); 
 INSERT INTO backup_logs.t1 VALUES ("02 Test #1 - progress"); 
@@ -88,6 +187,43 @@ INSERT INTO backup_logs.t3 VALUES ("02 T
 INSERT INTO backup_logs.t3 VALUES ("03 Test #1 - progress"); 
 INSERT INTO backup_logs.t3 VALUES ("04 Test #1 - progress"); 
 
+INSERT INTO backup_logs.t4 VALUES(1,'aa1'),(2,'aa2'),(3,'aa3');
+SELECT * FROM backup_logs.t4;
+
+--echo create all objects like views, procedures, functions, triggers
+--echo and events.
+
+--echo ** create view **
+CREATE VIEW backup_logs.v1 AS SELECT * FROM backup_logs.t1;
+CREATE VIEW backup_logs.vv AS SELECT * FROM backup_logs.v1;
+
+--echo ** create triggers **
+delimiter ||;
+CREATE TRIGGER backup_logs.trg AFTER INSERT ON backup_logs.t1 FOR EACH ROW
+BEGIN
+ INSERT INTO backup_logs.t3 VALUES('Test objects count');
+END;||
+
+--echo ** create procedures **
+CREATE PROCEDURE backup_logs.p1()
+BEGIN
+  SELECT * FROM backup_logs.t1;
+END;
+||
+
+--echo ** create functions **
+CREATE FUNCTION backup_logs.f1() RETURNS INTEGER
+BEGIN
+RETURN (SELECT COUNT(*) FROM backup_logs.t1);
+END;
+||
+delimiter ;||
+
+--echo ** create event **
+CREATE EVENT backup_logs.e1 ON SCHEDULE EVERY 1 YEAR DO
+ DELETE FROM objects.t4 WHERE id=10;
+
+--echo
 --echo Do backup of database
 
 connection con2;
@@ -107,29 +243,47 @@ send BACKUP DATABASE backup_logs to 'bac
 
 connection con1;
 
+# Record the time when BACKUP has started.
+# Set the time_zone to +0:00 to have same server timezone as of
+# backup history table.
+
+SET time_zone='+0:00';
+SELECT now() INTO @start_backup;
+
 --echo con1: Wait for the backup to be started.
 SET DEBUG_SYNC= 'now WAIT_FOR started';
 
+--echo
 --echo con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
+--echo backup progress tables will always show start_time, stop_time,total_bytes
+--echo and progress as '0' for all phases of backup operation.
+--echo BUG#39356 Backup progress table details aren't updated properly
+# Correct backup/restore times and progress should be indicated in the backup
+# logs once this bug is fixed
+
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+       WHERE backup_id = 500;
 
 --echo con1: Let backup step to running state.
 SET DEBUG_SYNC= 'now SIGNAL do_run WAIT_FOR phase1';
 
 --echo con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+       WHERE backup_id = 500;
 
 --echo con1: Let backup do the backup phase1.
 SET DEBUG_SYNC= 'now SIGNAL backup WAIT_FOR validated';
 
 --echo con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+       WHERE backup_id = 500;
 
 --echo con1: Let backup do the backup phase2.
 SET DEBUG_SYNC= 'now SIGNAL do_phase2 WAIT_FOR phase2';
 
 --echo con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 500;
+SELECT total_bytes, progress, notes FROM mysql.backup_progress 
+       WHERE backup_id = 500;
 
 --echo con1: Let backup finish.
 SET DEBUG_SYNC= 'now SIGNAL finish';
@@ -145,12 +299,79 @@ SET SESSION debug="-d";
 
 connection con1;
 
+SET time_zone='+0:00';
+SELECT now() INTO @stop_backup;
+
+--echo We calculate the timedifference between backup start time and stop
+--echo time. If this difference is '0', then backup start time and stop time
+--echo are same.
+
+SELECT timediff(@stop_backup, @start_backup) > 5;
+
+--echo
+--echo Now verify actual start time / stop time of backup and start time /
+--echo stop time from backup_history table. If the both times are same, 
+--echo the timediff will be '0'
+--echo
+
+SELECT timediff(start_time, @start_backup) > 5 from mysql.backup_history
+WHERE backup_id=500;
+
+SELECT timediff(stop_time, @stop_backup) > 5 from mysql.backup_history
+WHERE backup_id=500;
+
+--echo Now verify that start_time <= vp_time <= stop_time
+
+SELECT timediff(validity_point_time, start_time) >= 0, 
+timediff(stop_time, validity_point_time) >=0
+from mysql.backup_history WHERE backup_id=500;
+
+--echo
+--echo From backup_history log we will notice that "drivers" column will show
+--echo Myisam, snapshot, default and no-data drivers
+--echo "error_num" will be '0' as both backup and restore was successful
+--echo "num_objects" count is always 5(as there are 5 tables in database).
+--echo It does not list other objects from the backup image(BUG#39109)
+
 #Show results
+
+SHOW VARIABLES LIKE 'log_backup_output';
 --replace_column 1 # 2 # 3 # 4 # 10 # 11 # 12 # 15 # 16 #
 --query_vertical SELECT * FROM mysql.backup_history WHERE backup_id = 500;
 --replace_column 1 # 3 # 4 #
 SELECT * FROM mysql.backup_progress WHERE backup_id = 500;
 
+# Check the backup file size and compare it from backup_history
+# logs. Note that total_bytes in backup_progress is always
+# '0' and backup_history shows different file size because of bug#37980
+
+LET FILE_SIZE_HISTORY = `SELECT total_bytes FROM mysql.backup_history WHERE      backup_id = 500`;
+
+perl;
+my $filename = "var/master-data/backup_logs_orig.bak";
+my $filesize = -s $filename;
+
+if ($filesize ne $ENV{FILE_SIZE_HISTORY})
+{
+print "File sizes are not identical\n";
+}
+else
+{
+print "File sizes are identical\n";
+}
+EOF
+
+--echo
+--echo The actual backup file size and from backup_history logs are different
+--echo because of bug#37980. Once this bug is fixed, both should show 
+--echo same bytes.
+
+--file_exists $MYSQLD_DATADIR/backup_history.log
+--file_exists $MYSQLD_DATADIR/backup_progress.log
+
+#cat_file $MYSQLD_DATADIR/backup_history.log;
+#cat_file $MYSQLD_DATADIR/backup_progress.log;
+
 connection con2;
 
 --echo con2: Activate sync points for the backup statement.
@@ -163,23 +384,29 @@ send RESTORE FROM 'backup_logs_orig.bak'
 
 connection con1;
 
+# Record the time when RESTORE has started.
+SELECT now() INTO @start_restore;
+
 --echo con1: Wait for the restore to be started.
 SET DEBUG_SYNC= 'now WAIT_FOR started';
 
 --echo con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 501;
+SELECT total_bytes, progress,notes FROM mysql.backup_progress 
+       WHERE backup_id = 501;
 
 --echo con1: Let restore step to running state.
 SET DEBUG_SYNC= 'now SIGNAL do_run WAIT_FOR running';
 
 --echo con1: Display progress
-SELECT notes FROM mysql.backup_progress WHERE backup_id = 501;
+SELECT total_bytes, progress,notes FROM mysql.backup_progress 
+       WHERE backup_id = 501;
 
 --echo con1: Let restore do its job and finish.
 SET DEBUG_SYNC= 'now SIGNAL finish';
 
 connection con2;
 --echo con2: Finish restore command
+--replace_column 1 #
 reap;
 
 FLUSH BACKUP LOGS;
@@ -190,12 +417,27 @@ connection con1;
 
 SET DEBUG_SYNC= 'now WAIT_FOR complete';
 
+--echo We calculate the time difference between restore start time and stop
+--echo time. If this difference is '0', then restore start time and stop time
+--echo are same.
+
+SELECT timediff(now(),@start_restore) > 5;
+
 #Show results
 --replace_column 1 # 2 # 3 # 4 # 10 # 11 # 12 # 15 # 16 #
 --query_vertical SELECT * FROM mysql.backup_history WHERE backup_id = 501;
+
+# Correct backup/restore times and progress should be indicated in the backup
+# logs once this bug 39356 is fixed
+
 --replace_column 1 # 3 # 4 #
 SELECT * FROM mysql.backup_progress WHERE backup_id = 501;
 
+--file_exists $MYSQLD_DATADIR/backup_history.log
+--file_exists $MYSQLD_DATADIR/backup_progress.log
+#cat_file $MYSQLD_DATADIR/backup_history.log;
+#cat_file $MYSQLD_DATADIR/backup_progress.log;
+
 SET DEBUG_SYNC= 'RESET';
 
 #

=== modified file 'mysql-test/suite/backup/t/backup_logs_purge.test'
--- a/mysql-test/suite/backup/t/backup_logs_purge.test	2008-12-06 00:24:23 +0000
+++ b/mysql-test/suite/backup/t/backup_logs_purge.test	2009-01-29 21:17:59 +0000
@@ -276,7 +276,7 @@ SET SESSION debug="+d,set_backup_id";
 # it writes the history log and last complete log entry.
 #
 --echo con2: Activate sync points for the backup statement.
-SET DEBUG_SYNC= 'before_backup_done SIGNAL ready WAIT_FOR proceed';
+SET DEBUG_SYNC= 'before_backup_completed SIGNAL ready WAIT_FOR proceed';
 SEND BACKUP DATABASE backup_logs to 'backup5.bak';
 
 connection con1;
@@ -330,7 +330,7 @@ connection con2;
 # it writes the history log and last complete log entry.
 #
 --echo con2: Activate sync points for the backup statement.
-SET DEBUG_SYNC= 'before_restore_done SIGNAL ready WAIT_FOR proceed';
+SET DEBUG_SYNC= 'before_restore_completed SIGNAL ready WAIT_FOR proceed';
 SEND RESTORE FROM 'backup5.bak' OVERWRITE;
 
 connection con1;

=== added file 'mysql-test/suite/backup/t/backup_myisam.test'
--- a/mysql-test/suite/backup/t/backup_myisam.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/t/backup_myisam.test	2009-01-30 13:28:43 +0000
@@ -0,0 +1,63 @@
+#
+# Tests with MyISAM native driver.
+#
+
+--source include/not_embedded.inc
+
+#
+# Precautionary cleanup
+#
+let $MYSQLD_DATADIR= `select @@datadir`;
+--disable_warnings
+USE test;
+DROP DATABASE IF EXISTS mysql_db1;
+--error 0,1
+--remove_file $MYSQLD_DATADIR/bup_myisam.bak
+--enable_warnings
+
+--echo #
+--echo # Bug#40944 - Backup: crash after myisampack
+--echo #
+CREATE DATABASE mysql_db1;
+CREATE TABLE mysql_db1.t1 (c1 VARCHAR(5), c2 int);
+CREATE INDEX i1 ON mysql_db1.t1 (c1, c2);
+INSERT INTO mysql_db1.t1 VALUES ('A',1);
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+FLUSH TABLE mysql_db1.t1;
+#
+--exec $MYISAMPACK -s $MYSQLD_DATADIR/mysql_db1/t1
+--exec $MYISAMCHK -srq $MYSQLD_DATADIR/mysql_db1/t1
+#
+--replace_column 1 #
+BACKUP DATABASE mysql_db1 to 'bup_myisam.bak';
+--replace_column 1 #
+RESTORE FROM 'bup_myisam.bak' OVERWRITE;
+#
+SELECT COUNT(*) FROM mysql_db1.t1 WHERE c2 < 5;
+#
+# Cleanup
+#
+DROP TABLE mysql_db1.t1;
+DROP DATABASE mysql_db1;
+--remove_file $MYSQLD_DATADIR/bup_myisam.bak
+
+--echo
+--echo #
+--echo # Bug#38045 - Backup, MyISAM and file system encoding
+--echo #
+CREATE DATABASE mysqltest;
+USE mysqltest;
+CREATE TABLE `äöüߣå` (id SERIAL) ENGINE=MyISAM;
+--replace_column 1 #
+BACKUP DATABASE mysqltest TO 'bup_myisam.bak';
+DROP TABLE `äöüߣå`;
+USE test;
+DROP DATABASE mysqltest;
+--remove_file $MYSQLD_DATADIR/bup_myisam.bak
+

=== added file 'mysql-test/suite/backup/t/backup_myisam_coverage.test'
--- a/mysql-test/suite/backup/t/backup_myisam_coverage.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/t/backup_myisam_coverage.test	2009-02-01 13:26:18 +0000
@@ -0,0 +1,54 @@
+#
+# Tests with MyISAM native driver, coverage testing.
+#
+
+--source include/have_debug.inc
+--source include/not_embedded.inc
+
+#
+# Precautionary cleanup
+#
+let $MYSQLD_DATADIR= `select @@datadir`;
+--disable_warnings
+USE test;
+DROP DATABASE IF EXISTS mysql_db1;
+--error 0,1
+--remove_file $MYSQLD_DATADIR/bup_myisam3.bak
+--enable_warnings
+
+#
+# Bug#40944 - Backup: crash after myisampack
+#
+CREATE DATABASE mysql_db1;
+CREATE TABLE mysql_db1.t1 (c1 VARCHAR(5), c2 int);
+CREATE INDEX i1 ON mysql_db1.t1 (c1, c2);
+INSERT INTO mysql_db1.t1 VALUES ('A',1);
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+INSERT INTO mysql_db1.t1 SELECT * FROM mysql_db1.t1;
+FLUSH TABLE mysql_db1.t1;
+#
+--exec $MYISAMPACK -s $MYSQLD_DATADIR/mysql_db1/t1
+--exec $MYISAMCHK -srq $MYSQLD_DATADIR/mysql_db1/t1
+#
+--replace_column 1 #
+BACKUP DATABASE mysql_db1 to 'bup_myisam3.bak';
+#
+# Inject error at Backup_restore_ctx_unlock_tables_alloc
+SET debug='d,Backup_restore_ctx_unlock_tables_alloc';
+--replace_column 1 #
+RESTORE FROM 'bup_myisam3.bak' OVERWRITE;
+#
+SELECT COUNT(*) FROM mysql_db1.t1 WHERE c2 < 5;
+#
+# Cleanup
+#
+SET debug='';
+DROP TABLE mysql_db1.t1;
+DROP DATABASE mysql_db1;
+--remove_file $MYSQLD_DATADIR/bup_myisam3.bak
+

=== renamed file 'mysql-test/suite/backup/t/backup_myisam1-master.opt' => 'mysql-test/suite/backup/t/backup_myisam_extlocking-master.opt'
=== renamed file 'mysql-test/suite/backup/t/backup_myisam1.test' => 'mysql-test/suite/backup/t/backup_myisam_extlocking.test'
--- a/mysql-test/suite/backup/t/backup_myisam1.test	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/backup/t/backup_myisam_extlocking.test	2009-01-29 21:17:59 +0000
@@ -12,14 +12,15 @@ enable_query_log;
 # Cleanup from former test cases
 #
 --disable_warnings
-drop database if exists mysqltest;
+USE test;
+DROP DATABASE IF EXISTS mysqltest;
 --enable_warnings
 let $MYSQLD_DATADIR= `SELECT @@datadir`;
 --error 0,1
 remove_file $MYSQLD_DATADIR/test.ba;
 
-create database mysqltest;
-use mysqltest;
+CREATE DATABASE mysqltest;
+USE mysqltest;
 CREATE TABLE t1 (a int) engine=myisam;
 
 --replace_column 1 #
@@ -29,6 +30,7 @@ BACKUP DATABASE mysqltest TO 'test.ba';
 #
 # Cleanup from this test case
 #
+USE test;
 DROP DATABASE mysqltest;
 # Note: The backup file should not exist as BACKUP command failed.
 --error 1

=== renamed file 'mysql-test/suite/backup/t/backup_myisam2.test' => 'mysql-test/suite/backup/t/backup_myisam_sync.test'
--- a/mysql-test/suite/backup/t/backup_myisam2.test	2008-10-29 08:45:14 +0000
+++ b/mysql-test/suite/backup/t/backup_myisam_sync.test	2009-01-29 21:17:59 +0000
@@ -8,10 +8,11 @@
 #
 --disable_warnings
 SET DEBUG_SYNC= 'RESET';
+USE test;
 DROP DATABASE IF EXISTS mysqltest;
 let $MYSQLD_DATADIR= `SELECT @@datadir`;
 --error 0,1
-remove_file $MYSQLD_DATADIR/test.ba;
+remove_file $MYSQLD_DATADIR/bup_myisam_sync.bak;
 --enable_warnings
 
 #
@@ -39,7 +40,7 @@ CREATE TABLE t1 (c1 LONGTEXT) ENGINE=MyI
     # to be emitted by another connection.
     SET DEBUG_SYNC= 'before_backup_data_prepare SIGNAL bup_sync
                      WAIT_FOR bup_finish';
-    send BACKUP DATABASE mysqltest TO 'test.ba';
+    send BACKUP DATABASE mysqltest TO 'bup_myisam_sync.bak';
 
 --echo
 --echo connection default: Wait for BACKUP to reach its sync point
@@ -69,7 +70,7 @@ SET DEBUG_SYNC= 'now SIGNAL bup_finish';
     DROP DATABASE mysqltest;
 
     --replace_column 1 #
-    RESTORE FROM 'test.ba';
+    RESTORE FROM 'bup_myisam_sync.bak';
 
     SELECT LENGTH(c1) FROM t1;
     CHECKSUM TABLE t1;
@@ -79,21 +80,9 @@ SET DEBUG_SYNC= 'now SIGNAL bup_finish';
 --echo
 --echo connection default: cleanup
 connection default;
-remove_file $MYSQLD_DATADIR/test.ba;
+remove_file $MYSQLD_DATADIR/bup_myisam_sync.bak;
 SET DEBUG_SYNC= 'RESET';
 
-
---echo
---echo #
---echo # Bug#38045 - Backup, MyISAM and file system encoding
---echo #
-CREATE TABLE `�ߣ�(id SERIAL) ENGINE=MyISAM;
---replace_column 1 #
-BACKUP DATABASE mysqltest TO 'test.ba';
-DROP TABLE `�ߣ�
-remove_file $MYSQLD_DATADIR/test.ba;
-
-
 #
 # Cleanup from this test case
 #

=== added file 'mysql-test/suite/backup/t/backup_stream_errors.test'
--- a/mysql-test/suite/backup/t/backup_stream_errors.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup/t/backup_stream_errors.test	2009-01-20 11:36:32 +0000
@@ -0,0 +1,129 @@
+#
+# Test for BUG#39375
+# 
+# This test enforces the backup stream writing logic to enter the incorrect 
+# branch  inside function bstream_write_part(). This is tricky and depends 
+# very much on the low-level implementation details.
+#
+# The problem is triggered when BACKUP writes meta-data section of backup 
+# image.  More precisely, when writing the data chunk containing definitions 
+# of per-database objects (stored procedures in this case). The definitions 
+# have to be written in well designed stages to force internal buffering 
+# code to enter the incorrect execution path.
+#
+# Here is what must happen:
+# 
+# 1. Several procedures are stored such that the size of their meta-data 
+# do not exceed MIN_WRITE_SIZE constant (currently 1024 bytes). This is 
+# so that definition of each procedure is not directly written to the stream 
+# but rather stored in the output buffer.
+#
+# 2. The output buffer should be filled to slightly more than 4k bytes - the 
+# minimal size of a huge fragment (on the transport layer, data is split into 
+# chunk fragments and there are certain limits on possible sizes of such a 
+# fragment - see sql/backup/stream_v1_transport.c for some details).
+#
+# 3. Now BACKUP should write meta-data entry whose size is bigger than 
+# MIN_WRITE_SIZE (to force direct write to the output stream) but such that 
+# the total length of all meta-data entries written so far should not exceed 
+# 2*4k. This will force the internal buffering logic to follow the incorrect 
+# execution path:
+#
+# a) bstream_write_part() will be called with data blob of length > 
+#    MIN_WRITE_SIZE.
+#
+# b) At this time the output buffer will be filled with more than 4k of data - 
+#    the situation should be as follows:
+#
+#       out buffer (more than 4k)
+#     [hdr=======================]   data (more than MIN_WRITE_SIZE)
+#                                [====================]
+#
+# c) Now the biggest prefix of all the available data (data in the output 
+#    buffer + data passed to bstream_write_part) is determined which has 
+#    a valid size. This is done with calculations involving fragment blob:
+#
+#             prefix                fragment
+#         ------------------[-------------------------]
+#
+#   Since there is more than 4k of data to write but less than 8k, a prefix 
+#   of size 4k will be choosen. Fragment blob will hold the remainder of 
+#   the data. The crucial fact is that fragment starts inside the output 
+#   buffer (fragment.begin < buf.pos).
+#
+# d) Since there is more than MIN_WRITE_SIZE of new data available, this 
+#    branch will be entered:
+#
+#    if (fragment.end > (s->buf.pos + MIN_WRITE_SIZE))
+#    {
+#      /* write contents of the output buffer */
+#      ret= write_buffer(s);
+#      if (ret != BSTREAM_OK)
+#        return BSTREAM_ERROR;
+#
+#      /* write remainder of the fragment from data blob */
+#      saved_end= data->end;
+#      data->end= data->begin + (fragment.begin - s->buf.pos);
+#      ...
+#
+#    But this code is written under assumption that fragment starts outside 
+#    the output buffer (fragment.begin > s->buf.pos). Violating this assumption 
+#    leads to corrupted data in backup image.
+
+--source include/not_embedded.inc
+
+--echo # Preparations
+
+let $bdir=`SELECT @@backupdir`;
+--disable_warnings
+DROP DATABASE IF EXISTS db;
+--error 0,1
+--remove_file $bdir/db.bkp
+--enable_warnings
+
+CREATE DATABASE db;
+USE db;
+
+# Create string of length 512 (=2^9).
+
+let $string=12345678;
+let $power= 6;
+
+while ($power)
+{
+ let $string=$string$string;
+ dec $power;
+}
+
+
+--echo # Create procedures whose definitions will be shorter than MIN_WRITE_SIZE 
+--echo # (approx 700 bytes).
+
+eval CREATE PROCEDURE p1() SET @foo='$string';
+eval CREATE PROCEDURE p2() SET @foo='$string';
+eval CREATE PROCEDURE p3() SET @foo='$string';
+eval CREATE PROCEDURE p4() SET @foo='$string';
+eval CREATE PROCEDURE p5() SET @foo='$string';
+eval CREATE PROCEDURE p6() SET @foo='$string';
+
+# After 6 procedures we will have slightly more than 4k of metadata in the 
+# output buffer.
+
+--echo # Now create procedure whose definition is longer than 1024 (MIN_WRITE_SIZE)
+--echo # but such that in total we have no more than 8k of metadata. Use string of
+--echo # length 1.5k.
+
+let $string=$string$string$string;
+eval CREATE PROCEDURE q1() SET @foo='$string';
+
+--echo # Backup the database - the image should be corrupted without bug#39375 fix.
+--replace_column 1 #
+BACKUP DATABASE db TO 'db.bkp';
+
+--echo # Try to restore - should fail if image is corrupted.
+--replace_column 1 #
+RESTORE FROM 'db.bkp' OVERWRITE;
+
+--echo # Cleanup
+DROP DATABASE db;
+--remove_file $bdir/db.bkp

=== modified file 'mysql-test/suite/backup/t/backup_vp_nontx.test'
--- a/mysql-test/suite/backup/t/backup_vp_nontx.test	2009-01-29 10:19:15 +0000
+++ b/mysql-test/suite/backup/t/backup_vp_nontx.test	2009-01-29 21:17:59 +0000
@@ -208,6 +208,12 @@ SET DEBUG_SYNC= 'now SIGNAL insert_block
   SET DEBUG_SYNC= 'now SIGNAL finish_bup';
 
 
+### CON 2 ###
+    connection con_ntx2;
+    --echo con_ntx2: Reap insert
+    reap;
+
+
 ### CON BUP ###
 --echo 
 connection con_bup;

=== modified file 'mysql-test/suite/backup/t/disabled.def'
--- a/mysql-test/suite/backup/t/disabled.def	2008-12-10 15:53:06 +0000
+++ b/mysql-test/suite/backup/t/disabled.def	2008-12-29 12:06:48 +0000
@@ -1,4 +1,4 @@
-##############################################################################
+|##############################################################################
 #
 #  List the test cases that are to be disabled temporarily.
 #
@@ -11,10 +11,6 @@
 ##############################################################################
 backup_no_engine              : Bug#36021 2008-04-13 rsomla server crashes when openning table with unknown storage engine
 backup_triggers_and_events    : Bug#37762 2008-07-01 rafal Test fails on remove_file for unknown reasons
-#backup_no_be                 : Bug#38023 2008-07-16 rafal Test triggers valgrind warnings described in the bug
 backup_no_data                : Bug#41008 2008-12-08 alik union.test does not cleanup
 backup_ddl_blocker            : Bug#41008 2008-12-08 alik union.test does not cleanup
-backup                        : Bug#40807 2008-11-18 hakank Test fails on big-endian architecture
-backup_timeout                : Bug#40808 2008-11-18 hakank Test fails on big-endian architecture
-backup_errors                 : Bug#41359 2008-12-10 ingo Test fails after merge of main and backup trees
 backup_views                  : Bug#41360 2008-12-10 ingo Test fails after merge of main and backup trees

=== added file 'mysql-test/suite/backup_engines/r/backup_partitioning.result'
--- a/mysql-test/suite/backup_engines/r/backup_partitioning.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup_engines/r/backup_partitioning.result	2008-12-18 21:09:11 +0000
@@ -0,0 +1,519 @@
+SHOW VARIABLES LIKE 'storage_engine';
+Variable_name	Value
+storage_engine	#
+
+starting the test for backup
+
+DROP DATABASE IF EXISTS `¥`;
+*** The following defects were reported while executing WL#4227 ***
+Bug #34391 Character sets: crash if char(), utf32, innodb
+Bug #33566 Backup: crash with partitions and Falcon
+Bug #37551 Junk detected in data contents sometimes when utf8mb3
+character set is used.
+Bug #37554 Use of character set and collate as 'filename' shows
+unusual behaviour.
+Bug #35499 View when created with swe7 character set fails.
+Bug #38784 Mysql server crash if table is altered by partition changes.
+SET NAMES utf8;
+CREATE DATABASE `¥`;
+USE `¥`;
+*****Creating table with range partition*********
+CREATE TABLE `ê`(
+ID INT NOT NULL,
+NAME CHAR(20),
+AGE TINYINT,
+DOB DATE,
+SAL FLOAT,
+PRIMARY KEY(DOB)) PARTITION BY RANGE(Year(DOB))(
+PARTITION P0 values less than (1970),
+PARTITION P1 values less than (1980),
+PARTITION P2 values less than (1990),
+PARTITION P3 values less than (2000),
+PARTITION P4 values less than maxvalue);
+loading data
+INSERT INTO `ê` VALUES
+(10,'aa','20','1988-12-19',2345.89),(11,'bb',31,'1977-10-19',6009.89),
+(12,'cc',78,'1945-09-12',3897),(13,'dd',12,'1994-09-18',7892.78),
+(14,'ee',45,'1960-12-23',7845.90),(15,'ff',25,'1984-02-07',3500.89);
+select data
+SELECT * FROM `ê` ORDER BY DOB;
+ID	NAME	AGE	DOB	SAL
+12	cc	78	1945-09-12	3897
+14	ee	45	1960-12-23	7845.9
+11	bb	31	1977-10-19	6009.89
+15	ff	25	1984-02-07	3500.89
+10	aa	20	1988-12-19	2345.89
+13	dd	12	1994-09-18	7892.78
+SELECT COUNT(*) FROM `ê`;
+COUNT(*)
+6
+
+******Creating table partition with List**************
+CREATE TABLE `cÿd`(`numeric` INT, `char` CHAR(20))
+PARTITION BY LIST (`numeric`)
+(
+PARTITION p0 VALUES IN (1,3,5),
+PARTITION p1 VALUES IN (2,4,6)
+);
+insert data
+INSERT INTO `cÿd` VALUES(1,'a'),(6,'c'),(5,'j'),(2,'l');
+selecting data
+SELECT * FROM `cÿd` ORDER BY `numeric`;
+numeric	char
+1	a
+2	l
+5	j
+6	c
+SELECT COUNT(*) FROM `cÿd`;
+COUNT(*)
+4
+
+*******Creating table partition with Hash***********
+CREATE TABLE `µ`(
+col1 INT,
+col2 CHAR(20),
+col3 DATE)
+PARTITION BY HASH (YEAR(col3))
+PARTITIONS 4;
+Insert data
+INSERT INTO `µ` VALUES
+(1,'a','1942-10-09'),(2,'b','1950-08-07'),
+(3,'c','1952-10-10'),(4,'d','1954-10-04');
+selecting data
+SELECT * FROM `µ` ORDER BY col3;
+col1	col2	col3
+1	a	1942-10-09
+2	b	1950-08-07
+3	c	1952-10-10
+4	d	1954-10-04
+SELECT COUNT(*) FROM `µ`;
+COUNT(*)
+4
+********Creating table partition with keys*******
+CREATE TABLE `вап` (id INT)
+PARTITION BY KEY(id)
+PARTITIONS 2;
+insert data
+INSERT INTO `вап` VALUES(1),(2),(3),(4);
+selecting the data
+SELECT * FROM `вап`;
+id
+1
+3
+2
+4
+SELECT COUNT(*) FROM `вап`;
+COUNT(*)
+4
+***Creating views from partitoned Tables****
+CREATE VIEW `ïö` AS SELECT * FROM `BLES;
+Tables_in_¥	Table_type
+µ	BASE TABLE
+ê	BASE TABLE
+ïö	VIEW
+вап	BASE TABLE
+cÿd	BASE TABLE
+EXPLAIN PARTITIONS SELECT * FROM `ê`;;
+id	#
+select_type	SIMPLE
+table	ê
+partitions	P0,P1,P2,P3,P4
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `cÿd`;;
+id	#
+select_type	SIMPLE
+table	cÿd
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `µ`;;
+id	#
+select_type	SIMPLE
+table	µ
+partitions	p0,p1,p2,p3
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `вап`;;
+id	#
+select_type	SIMPLE
+table	вап
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+backup data
+BACKUP DATABASE `¥` TO 'partitions.bak';
+backup_id
+#
+dropping  database.
+DROP DATABASE `¥`;
+Restore
+RESTORE FROM 'partitions.bak';
+backup_id
+#
+
+** RESULTS AFTER RESTORE **
+
+Obtaining Information about Partitions
+SHOW FULL TABLES;;
+Tables_in_¥	µ
+Table_type	BASE TABLE
+Tables_in_¥	ê
+Table_type	BASE TABLE
+Tables_in_¥	ïö
+Table_type	VIEW
+Tables_in_¥	вап
+Table_type	BASE TABLE
+Tables_in_¥	cÿd
+Table_type	BASE TABLE
+EXPLAIN PARTITIONS SELECT * FROM `ê`;;
+id	#
+select_type	SIMPLE
+table	ê
+partitions	P0,P1,P2,P3,P4
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `cÿd`;;
+id	#
+select_type	SIMPLE
+table	cÿd
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `µ`;;
+id	#
+select_type	SIMPLE
+table	µ
+partitions	p0,p1,p2,p3
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `ваp0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+ALTER TABLE `ê` PARTITION BY KEY(DOB) PARTITIONS 2;
+INSERT INTO `ïö` VALUES(17,'kk','90','2020-12-19',2000);
+SELECT COUNT(*) FROM `ïö`;
+COUNT(*)
+7
+SELECT COUNT(*) FROM `ê`;
+COUNT(*)
+7
+
+**Creating table with subpartitions**
+CREATE TABLE `фы` (`int` INT)
+PARTITION BY range (`int`)
+SUBPARTITION BY key (`int`)
+(PARTITION p0 VALUES LESS THAN (2));
+INSERT INTO `фы` VALUES(0),(1);
+SHOW FULL TABLES;
+Tables_in_¥	Table_type
+µ	BASE TABLE
+ê	BASE TABLE
+ïö	VIEW
+фы	BASE TABLE
+вап	BASE TABLE
+cÿd	BASE TABLE
+EXPLAIN PARTITIONS SELECT * FROM `ê`;;
+id	#
+select_type	SIMPLE
+table	ê
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `cÿd`;;
+id	#
+select_type	SIMPLE
+table	cÿd
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `µ`;;
+id	#
+select_type	SIMPLE
+table	µ
+partitions	p0,p1,p2,p3
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `ons	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+BACKUP DATABASE `¥` TO 'partitions.bak';
+backup_id
+#
+DROP DATABASE `¥`;
+RESTORE FROM 'partitions.bak';
+backup_id
+#
+showing objects and create statements
+EXPLAIN PARTITIONS SELECT * FROM `ê`;;
+id	#
+select_type	SIMPLE
+table	ê
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `cÿd`;;
+id	#
+select_type	SIMPLE
+table	cÿd
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `µ`;;
+id	#
+select_type	SIMPLE
+table	µ
+partitions	p0,p1,p2,p3
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+EXPLAIN PARTITIONS SELECT * FROM `вап`;;
+id	#
+select_type	SIMPLE
+table	вап
+partitions	p0,p1
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+SELECT * FROM `cÿd` ORDER BY `numeric`;
+numeric	char
+1	a
+2	l
+5	j
+6	c
+SELECT * FROM `вап`;
+id
+1
+3
+2
+4
+SELECT * FROM `µ` ORDER BY col3;
+col1	col2	col3
+1	a	1942-10-09
+2	b	1950-08-07
+3	c	1952-10-10
+4	d	1954-10-04
+SELECT * FROM `ê` ORDER BY DOB;
+ID	NAME	AGE	DOB	SAL
+12	cc	78	1945-09-12	3897
+14	ee	45	1960-12-23	7845.9
+11	bb	31	1977-10-19	6009.89
+15	ff	25	1984-02-07	3500.89
+10	aa	20	1988-12-19	2345.89
+13	dd	12	1994-09-18	7892.78
+17	kk	90	2020-12-19	2000
+SELECT * FROM `фы`;
+int
+0
+1
+**creating view from view***
+CREATE VIEW v2 AS SELECT * FROM `ïö`;
+INSERT INTO v2 VALUES(18,'pp',51,'1990-10-19',6009.89);
+SELECT COUNT(*) FROM `ïö`;
+COUNT(*)
+8
+SELECT COUNT(*) FROM `ê`;
+COUNT(*)
+8
+Make some changes and then perform Restore.
+EXPLAIN PARTITIONS SELECT * FROM `фы`;;
+id	#
+select_type	SIMPLE
+table	ф	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+SHOW FULL TABLES;
+Tables_in_¥	Table_type
+µ	BASE TABLE
+ê	BASE TABLE
+ïö	VIEW
+фы	BASE TABLE
+вап	BASE TABLE
+cÿd	BASE TABLE
+v2	VIEW
+ALTER TABLE `фы` REMOVE PARTITIONING;
+BACKUP DATABASE `¥` to 'partitions.bak';
+backup_id
+#
+DROP DATABASE `¥`;
+RESTORE FROM 'partitions.bak';
+backup_id
+#
+SHOW FULL TABLES;
+Tables_in_¥	Table_type
+µ	BASE TABLE
+ê	BASE TABLE
+ïö	VIEW
+фы	BASE TABLE
+вап	BASE TABLE
+cÿd	BASE TABLE
+v2	VIEW
+SELECT * FROM `фы`;
+int
+0
+1
+EXPLAIN PARTITIONS SELECT * FROM `фы`;;
+id	#
+select_type	SIMPLE
+table	фы
+partitions	NULL
+type	ALL
+possible_keys	NULL
+key	NULL
+key_len	NULL
+ref	NULL
+rows	#
+Extra	
+SELECT * FROM `ïö` ORDER BY DOB;
+ID	NAME	AGE	DOB	SAL
+12	cc	78	1945-09-12	3897
+14	ee	45	1960-12-23	7845.9
+11	bb	31	1977-10-19	6009.89
+15	ff	25	1984-02-07	3500.89
+10	aa	20	1988-12-19	2345.89
+18	pp	51	1990-10-19	6009.89
+13	dd	12	1994-09-18	7892.78
+17	kk	90	2020-12-19	2000
+SELECT COUNT(*) FROM `ê`;
+COUNT(*)
+8
+Perform restore again by changing the character set
+SET NAMES latin7;
+**Restore**
+RESTORE FROM 'partitions.bak' OVERWRITE;
+backup_id
+#
+SHOW DATABASES;
+Database
+information_schema
+?
+mysql
+test
+SHOW FULL TABLES;
+Tables_in_?	Table_type
+�	BASE TABLE
+?	BASE TABLE
+?�EW
+??	BASE TABLE
+???	BASE TABLE
+c?d	BASE TABLE
+v2	VIEW
+** checking client character set **
+SELECT @@character_set_client;
+@@character_set_client
+latin7
+SELECT @@character_set_results;
+@@character_set_results
+latin7
+SELECT @@character_set_connection;
+@@character_set_connection
+latin7
+SET NAMES latin5;
+RESTORE FROM 'partitions.bak' OVERWRITE;
+backup_id
+#
+SHOW DATABASES;
+Database
+information_schema
+�
+mysql
+test
+SET NAMES utf8;
+
+***  DROP `¥` DATABASE ****
+
+DROP DATABASE `¥`;

=== added file 'mysql-test/suite/backup_engines/t/backup_partitioning.test'
--- a/mysql-test/suite/backup_engines/t/backup_partitioning.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/backup_engines/t/backup_partitioning.test	2008-12-18 21:09:11 +0000
@@ -0,0 +1,317 @@
+###########################################################################
+# Author: Hema
+# Date: 2008-06-28
+# Purpose: To test the backup and Restore of different partitions using
+# accented letters. We will ensure that backup stores identifiers properly in
+# utf8 format and is retreived during restore without changing the client 
+# character set.
+###############################################################################
+--source include/not_embedded.inc
+--source include/have_debug.inc
+--source suite/backup_engines/include/backup_engine.inc
+
+connect (backup,localhost,root,,);
+connect (breakpoints,localhost,root,,);
+
+##############################################################
+--echo
+--echo starting the test for backup
+--echo
+##############################################################
+
+--error 0,1
+--remove_file $MYSQLTEST_VARDIR/master-data/partitions.bak
+
+#Create Database and and tables with different types of partitions.
+
+--disable_warnings
+DROP DATABASE IF EXISTS `¥`;
+--enable_warnings
+
+--echo *** The following defects were reported while executing WL#4227 ***
+
+--echo Bug #34391 Character sets: crash if char(), utf32, innodb
+--echo Bug #33566 Backup: crash with partitions and Falcon
+--echo Bug #37551 Junk detected in data contents sometimes when utf8mb3
+--echo character set is used.
+--echo Bug #37554 Use of character set and collate as 'filename' shows
+--echo unusual behaviour.
+--echo Bug #35499 View when created with swe7 character set fails.
+--echo Bug #38784 Mysql server crash if table is altered by partition changes.
+
+#SET NAMES latin5;
+SET NAMES utf8;
+
+CREATE DATABASE `¥`;
+USE `¥`;
+
+#Create table and load with data.
+
+--echo *****Creating table with range partition*********
+
+CREATE TABLE `ê`(
+ ID INT NOT NULL,
+NAME CHAR(20),
+AGE TINYINT,
+DOB DATE,
+SAL FLOAT,
+PRIMARY KEY(DOB)) PARTITION BY RANGE(Year(DOB))(
+        PARTITION P0 values less than (1970),
+        PARTITION P1 values less than (1980),
+        PARTITION P2 values less than (1990),
+        PARTITION P3 values less than (2000),
+        PARTITION P4 values less than maxvalue);
+
+--echo loading data
+
+INSERT INTO `ê` VALUES
+(10,'aa','20','1988-12-19',2345.89),(11,'bb',31,'1977-10-19',6009.89),
+(12,'cc',78,'1945-09-12',3897),(13,'dd',12,'1994-09-18',7892.78),
+(14,'ee',45,'1960-12-23',7845.90),(15,'ff',25,'1984-02-07',3500.89);
+
+--echo select data
+
+SELECT * FROM `ê` ORDER BY DOB;
+SELECT COUNT(*) FROM `ê`;
+
+--echo
+--echo ******Creating table partition with List**************
+
+CREATE TABLE `cÿd`(`numeric` INT, `char` CHAR(20))
+PARTITION BY LIST (`numeric`)
+(
+   PARTITION p0 VALUES IN (1,3,5),
+   PARTITION p1 VALUES IN (2,4,6)
+);
+--echo insert data
+
+INSERT INTO `cÿd` VALUES(1,'a'),(6,'c'),(5,'j'),(2,'l');
+
+--echo selecting data
+
+SELECT * FROM `cÿd` ORDER BY `numeric`;
+SELECT COUNT(*) FROM `cÿd`;
+
+--echo
+--echo *******Creating table partition with Hash***********
+
+CREATE TABLE `µ`(
+col1 INT,
+col2 CHAR(20),
+col3 DATE)
+PARTITION BY HASH (YEAR(col3))
+PARTITIONS 4;
+
+--echo Insert data
+
+INSERT INTO `µ` VALUES
+(1,'a','1942-10-09'),(2,'b','1950-08-07'),
+(3,'c','1952-10-10'),(4,'d','1954-10-04');
+
+--echo selecting data
+
+SELECT * FROM `µ` ORDER BY col3;
+SELECT COUNT(*) FROM `µ`;
+
+--echo ********Creating table partition with keys*******
+
+CREATE TABLE `вап` (id INT)
+PARTITION BY KEY(id)
+PARTITIONS 2;
+
+--echo insert data
+
+INSERT INTO `вап` VALUES(1),(2),(3),(4);
+
+--echo selecting the data
+
+SELECT * FROM `вап`;
+SELECT COUNT(*) FROM `вап`;
+
+--echo ***Creating views from partitoned Tables****
+
+CREATE VIEW `ïö` AS SELECT * FROM `ê`;
+
+# We mask some of the columns in Explain Partitions command because of
+# Bug #37532 Explain command shows incorrect rows,
+# when table is partitioned and innodb.
+
+--echo Obtaining information about Partitions
+SHOW FULL TABLES;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `ê`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `cÿd`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `µ`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `вап`;
+
+--echo backup data
+--replace_column 1 #
+BACKUP DATABASE `¥` TO 'partitions.bak';
+
+--echo dropping  database.
+
+DROP DATABASE `¥`;
+
+--echo Restore
+--replace_column 1 #
+RESTORE FROM 'partitions.bak';
+--remove_file $MYSQLTEST_VARDIR/master-data/partitions.bak
+
+--echo
+--echo ** RESULTS AFTER RESTORE **
+--echo
+
+#show data and create statements
+--echo Obtaining Information about Partitions
+--query_vertical SHOW FULL TABLES;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `ê`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `cÿd`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `µ`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `вап`;
+
+# Make some changes to existing partitions, take backup and then perform
+# Restore
+
+ALTER TABLE `ê` PARTITION BY KEY(DOB) PARTITIONS 2;
+
+#All the maintenence operations of partitions like ANALYZE, OPTIMIZE
+#REPAIR are currently disabled in 6.0 and 5.0.
+#Bug#20129
+#We can include these operations in the test case once this bug is fixed.
+
+#Bug#38784 Mysql server crash if table is altered with partition changes.
+#This happens only in Windows OS. The # can be removed once the bug#38784
+#is fixed.
+
+#ALTER TABLE `cÿd` ADD PARTITION (PARTITION p2 VALUES IN (10,20,30));
+#INSERT INTO `cÿd` VALUES(10,'h'),(3,'b'),(10,'s'),(5,'l');
+#SELECT * FROM `cÿd`;
+#SELECT COUNT(*) FROM `cÿd`;
+
+INSERT INTO `ïö` VALUES(17,'kk','90','2020-12-19',2000);
+SELECT COUNT(*) FROM `ïö`;
+SELECT COUNT(*) FROM `ê`;
+--echo
+--echo **Creating table with subpartitions**
+
+CREATE TABLE `фы` (`int` INT)
+PARTITION BY range (`int`)
+SUBPARTITION BY key (`int`)
+(PARTITION p0 VALUES LESS THAN (2));
+
+INSERT INTO `фы` VALUES(0),(1);
+
+#show data and create statements
+SHOW FULL TABLES;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `ê`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `cÿd`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `µ`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `вап`;
+
+--replace_column 1 #
+BACKUP DATABASE `¥` TO 'partitions.bak';
+DROP DATABASE `¥`;
+--replace_column 1 #
+RESTORE FROM 'partitions.bak';
+--remove_file $MYSQLTEST_VARDIR/master-data/partitions.bak
+
+#show data and create statements
+--echo showing objects and create statements
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `ê`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `cÿd`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `µ`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `вап`;
+
+SELECT * FROM `cÿd` ORDER BY `numeric`;
+SELECT * FROM `вап`;
+SELECT * FROM `µ` ORDER BY col3;
+SELECT * FROM `ê` ORDER BY DOB;
+SELECT * FROM `фы`;
+
+--echo **creating view from view***
+CREATE VIEW v2 AS SELECT * FROM `ïö`;
+INSERT INTO v2 VALUES(18,'pp',51,'1990-10-19',6009.89);
+
+SELECT COUNT(*) FROM `ïö`;
+SELECT COUNT(*) FROM `ê`;
+
+--echo Make some changes and then perform Restore.
+
+#Bug#38784 Mysql server crash if table is altered with partition changes.
+#This happens only in Windows OS. The # can be removed once the bug#38784
+#is fixed.
+
+#ALTER TABLE `фы` ADD PARTITION (PARTITION p1 VALUES LESS THAN (4));
+#INSERT INTO `фы` VALUES(2),(3),(1);
+
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `фы`;
+SHOW FULL TABLES;
+ALTER TABLE `фы` REMOVE PARTITIONING;
+
+--replace_column 1 #
+BACKUP DATABASE `¥` to 'partitions.bak';
+
+DROP DATABASE `¥`;
+--replace_column 1 #
+RESTORE FROM 'partitions.bak';
+
+SHOW FULL TABLES;
+SELECT * FROM `фы`;
+--replace_column 1 # 10 #
+--query_vertical EXPLAIN PARTITIONS SELECT * FROM `фы`;
+SELECT * FROM `ïö` ORDER BY DOB;
+SELECT COUNT(*) FROM `ê`;
+
+--echo Perform restore again by changing the character set
+
+SET NAMES latin7;
+
+--echo **Restore**
+--replace_column 1 #
+RESTORE FROM 'partitions.bak' OVERWRITE;
+
+#show data and table columns
+
+SHOW DATABASES;
+SHOW FULL TABLES;
+
+--echo ** checking client character set **
+#check if restore does not change the character set.
+SELECT @@character_set_client;
+SELECT @@character_set_results;
+SELECT @@character_set_connection;
+
+#Change the character set and restore again 
+SET NAMES latin5;
+
+--replace_column 1 #
+RESTORE FROM 'partitions.bak' OVERWRITE;
+SHOW DATABASES;
+
+SET NAMES utf8;
+
+# Test cleanup section
+
+--echo
+--echo ***  DROP `¥` DATABASE ****
+--echo
+DROP DATABASE `¥`;
+
+--remove_file $MYSQLTEST_VARDIR/master-data/partitions.bak
+

=== modified file 'mysql-test/suite/federated/federated.inc'
--- a/mysql-test/suite/federated/federated.inc	2008-12-15 12:41:31 +0000
+++ b/mysql-test/suite/federated/federated.inc	2009-02-02 20:50:45 +0000
@@ -5,13 +5,7 @@ connect (master,127.0.0.1,root,,test,$MA
 connect (slave,127.0.0.1,root,,test,$SLAVE_MYPORT,);
 
 connection master;
---disable_warnings
-DROP DATABASE IF EXISTS federated;
---enable_warnings
 CREATE DATABASE federated;
 
 connection slave;
---disable_warnings
-DROP DATABASE IF EXISTS federated;
---enable_warnings
 CREATE DATABASE federated;

=== modified file 'mysql-test/suite/federated/federated.result'
--- a/mysql-test/suite/federated/federated.result	2008-12-24 10:48:24 +0000
+++ b/mysql-test/suite/federated/federated.result	2009-02-02 20:50:45 +0000
@@ -1,6 +1,4 @@
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 SET @OLD_MASTER_CONCURRENT_INSERT= @@GLOBAL.CONCURRENT_INSERT;
 SET @@GLOBAL.CONCURRENT_INSERT= 0;
@@ -2162,6 +2160,6 @@ End of 6.0 tests
 SET @@GLOBAL.CONCURRENT_INSERT= @OLD_MASTER_CONCURRENT_INSERT;
 SET @@GLOBAL.CONCURRENT_INSERT= @OLD_SLAVE_CONCURRENT_INSERT;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;

=== modified file 'mysql-test/suite/federated/federated_archive.result'
--- a/mysql-test/suite/federated/federated_archive.result	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/federated_archive.result	2009-02-02 11:36:03 +0000
@@ -1,6 +1,4 @@
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 DROP TABLE IF EXISTS federated.archive_table;
 CREATE TABLE federated.archive_table (
@@ -36,6 +34,6 @@ id	name
 DROP TABLE federated.t1;
 DROP TABLE federated.archive_table;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;

=== modified file 'mysql-test/suite/federated/federated_bug_13118.result'
--- a/mysql-test/suite/federated/federated_bug_13118.result	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/federated_bug_13118.result	2009-02-02 11:36:03 +0000
@@ -1,6 +1,4 @@
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 DROP TABLE IF EXISTS federated.bug_13118_table;
 CREATE TABLE federated.bug_13118_table (
@@ -27,6 +25,6 @@ foo	bar
 DROP TABLE federated.t1;
 DROP TABLE federated.bug_13118_table;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;

=== modified file 'mysql-test/suite/federated/federated_bug_25714.result'
--- a/mysql-test/suite/federated/federated_bug_25714.result	2008-10-29 09:52:46 +0000
+++ b/mysql-test/suite/federated/federated_bug_25714.result	2009-02-02 20:50:45 +0000
@@ -1,6 +1,4 @@
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 SET @OLD_MASTER_CONCURRENT_INSERT= @@GLOBAL.CONCURRENT_INSERT;
 SET @@GLOBAL.CONCURRENT_INSERT= 0;
@@ -50,6 +48,6 @@ SET @@GLOBAL.CONCURRENT_INSERT= @OLD_MAS
 DROP TABLE federated.t1;
 SET @@GLOBAL.CONCURRENT_INSERT= @OLD_SLAVE_CONCURRENT_INSERT;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;

=== modified file 'mysql-test/suite/federated/federated_bug_25714.test'
--- a/mysql-test/suite/federated/federated_bug_25714.test	2008-10-29 17:40:17 +0000
+++ b/mysql-test/suite/federated/federated_bug_25714.test	2009-02-02 20:50:45 +0000
@@ -1,11 +1,11 @@
-source federated.inc;
-
  # Check that path to the specific test program has been setup
 if (`select LENGTH("$MYSQL_BUG25714") = 0`)
 {
   skip Need bug25714 test program;
 }
 
+source federated.inc;
+
 connection master;
 # Disable concurrent inserts to avoid test failures when reading
 # data from concurrent connections (insert might return before

=== modified file 'mysql-test/suite/federated/federated_cleanup.inc'
--- a/mysql-test/suite/federated/federated_cleanup.inc	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/federated_cleanup.inc	2009-02-02 11:36:03 +0000
@@ -1,9 +1,9 @@
 connection master;
 --disable_warnings
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 
 connection slave;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 --enable_warnings

=== modified file 'mysql-test/suite/federated/federated_innodb.result'
--- a/mysql-test/suite/federated/federated_innodb.result	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/federated_innodb.result	2009-02-02 11:36:03 +0000
@@ -1,6 +1,4 @@
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 create table federated.t1 (a int primary key, b varchar(64))
 engine=myisam;
@@ -22,6 +20,6 @@ a	b
 drop table federated.t1;
 drop table federated.t1;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;

=== modified file 'mysql-test/suite/federated/federated_server.result'
--- a/mysql-test/suite/federated/federated_server.result	2009-01-22 13:07:58 +0000
+++ b/mysql-test/suite/federated/federated_server.result	2009-02-02 11:36:03 +0000
@@ -1,6 +1,4 @@
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 create database first_db;
 create database second_db;
@@ -277,6 +275,6 @@ call p1();
 drop procedure p1;
 drop server if exists s;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE IF EXISTS federated;
+DROP DATABASE federated;

=== modified file 'mysql-test/suite/parts/r/partition_special_innodb.result'
--- a/mysql-test/suite/parts/r/partition_special_innodb.result	2008-12-10 09:20:38 +0000
+++ b/mysql-test/suite/parts/r/partition_special_innodb.result	2009-01-27 11:03:30 +0000
@@ -31,9 +31,9 @@ a	b	c	d
 2000-06-15	jukg	zikhuk	m
 select * from t1 where a<19851231;
 a	b	c	d
-1983-12-31	cdef	srtbvsr	w
-1980-10-14	fgbbd	dtzndtz	w
 1975-01-01	abcde	abcde	m
+1980-10-14	fgbbd	dtzndtz	w
+1983-12-31	cdef	srtbvsr	w
 drop table t1;
 create table t1 (a date not null, b varchar(50) not null, c varchar(50) not null, d enum('m', 'w') not null, e int not null, f decimal (18,2) not null, g bigint not null, h tinyint not null, i char(255), primary key(a,b,c,d,e,f,g,h)) engine='InnoDB' 
 partition by key(a,b,c,d,e,f,g,h) (
@@ -73,9 +73,9 @@ a	b	c	d	e	f	g	h	i
 2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	pib mdotkbm.m
 select * from t1 where a<19851231;
 a	b	c	d	e	f	g	h	i
+1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
 1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 drop table t1;
 create table t1 (a date not null, b varchar(50) not null, c varchar(50) not null, d enum('m', 'w') not null, e int not null, f decimal (18,2) not null, g bigint not null, h tinyint not null, a1 date not null, b1 varchar(50) not null, c1 varchar(50) not null, d1 enum('m', 'w') not null, e1 int not null, f1 decimal (18,2) not null, g1 bigint not null, h1 tinyint not null, i char(255), primary key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1)) engine='InnoDB' 
 partition by key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1) (
@@ -123,9 +123,9 @@ a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1
 2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	pib mdotkbm.m
 select * from t1 where a<19851231;
 a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1	i
-1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
-1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
 1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
+1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
+1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
 drop table t1;
 create table t1 (a date not null, b varchar(50) not null, c varchar(50) not null, d enum('m', 'w') not null, e int not null, f decimal (18,2) not null, g bigint not null, h tinyint not null, a1 date not null, b1 varchar(50) not null, c1 varchar(50) not null, d1 enum('m', 'w') not null, e1 int not null, f1 decimal (18,2) not null, g1 bigint not null, h1 tinyint not null, a2 date not null, b2 varchar(50) not null, c2 varchar(50) not null, d2 enum('m', 'w') not null, e2 int not null, f2 decimal (18,2) not null, g2 bigint not null, h2 tinyint not null, a3 date not null, b3 varchar(50) not null, c3 varchar(50) not null, d3 enum('m', 'w') not null, e3 int not null, f3 decimal (18,2) not null, g3 bigint not null, h3 tinyint not null, i char(255), primary key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1,a2,b2,c2,d2,e2,f2,g2,h2,a3,b3,c3,d3,e3,f3,g3,h3)) engine='InnoDB' 
 partition by key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1,a2,b2,c2,d2,e2,f2,g2,h2,a3,b3,c3,d3,e3,f3,g3,h3) (
@@ -196,9 +196,9 @@ a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1
 2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	pib mdotkbm.m
 select * from t1 where a<19851231;
 a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1	a2	b2	c2	d2	e2	f2	g2	h2	a3	b3	c3	d3	e3	f3	g3	h3	i
+1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
 1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 drop table t1;
 # Bug#34604 - Assertion 'inited==RND' failed in handler::ha_rnd_end
 CREATE TABLE t1 (

=== modified file 'mysql-test/suite/parts/r/partition_special_myisam.result'
--- a/mysql-test/suite/parts/r/partition_special_myisam.result	2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_special_myisam.result	2009-02-03 12:01:22 +0000
@@ -73,9 +73,9 @@ a	b	c	d	e	f	g	h	i
 2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	pib mdotkbm.m
 select * from t1 where a<19851231;
 a	b	c	d	e	f	g	h	i
+1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
 1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 drop table t1;
 create table t1 (a date not null, b varchar(50) not null, c varchar(50) not null, d enum('m', 'w') not null, e int not null, f decimal (18,2) not null, g bigint not null, h tinyint not null, a1 date not null, b1 varchar(50) not null, c1 varchar(50) not null, d1 enum('m', 'w') not null, e1 int not null, f1 decimal (18,2) not null, g1 bigint not null, h1 tinyint not null, i char(255), primary key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1)) engine='MyISAM' 
 partition by key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1) (
@@ -123,9 +123,9 @@ a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1
 2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	pib mdotkbm.m
 select * from t1 where a<19851231;
 a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1	i
-1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
-1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
 1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
+1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
+1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
 drop table t1;
 create table t1 (a date not null, b varchar(50) not null, c varchar(50) not null, d enum('m', 'w') not null, e int not null, f decimal (18,2) not null, g bigint not null, h tinyint not null, a1 date not null, b1 varchar(50) not null, c1 varchar(50) not null, d1 enum('m', 'w') not null, e1 int not null, f1 decimal (18,2) not null, g1 bigint not null, h1 tinyint not null, a2 date not null, b2 varchar(50) not null, c2 varchar(50) not null, d2 enum('m', 'w') not null, e2 int not null, f2 decimal (18,2) not null, g2 bigint not null, h2 tinyint not null, a3 date not null, b3 varchar(50) not null, c3 varchar(50) not null, d3 enum('m', 'w') not null, e3 int not null, f3 decimal (18,2) not null, g3 bigint not null, h3 tinyint not null, i char(255), primary key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1,a2,b2,c2,d2,e2,f2,g2,h2,a3,b3,c3,d3,e3,f3,g3,h3)) engine='MyISAM' 
 partition by key(a,b,c,d,e,f,g,h,a1,b1,c1,d1,e1,f1,g1,h1,a2,b2,c2,d2,e2,f2,g2,h2,a3,b3,c3,d3,e3,f3,g3,h3) (
@@ -196,7 +196,7 @@ a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1
 2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	2000-06-15	jukg	zikhuk	m	45675	6465754.13	435242623462	18	pib mdotkbm.m
 select * from t1 where a<19851231;
 a	b	c	d	e	f	g	h	a1	b1	c1	d1	e1	f1	g1	h1	a2	b2	c2	d2	e2	f2	g2	h2	a3	b3	c3	d3	e3	f3	g3	h3	i
+1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	1980-10-14	fgbbd	dtzndtz	w	67856	5463354.67	3567845333	124	d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
 1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	1983-12-31	cdef	srtbvsr	w	45634	13452.56	3452346456	127	liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	1975-01-01	abcde	abcde	m	1234	123.45	32412341234	113	tbhth nrzh ztfghgfh fzh ftzhj fztjh
 drop table t1;

=== modified file 'mysql-test/suite/parts/r/rpl_partition.result'
--- a/mysql-test/suite/parts/r/rpl_partition.result	2009-02-01 12:00:48 +0000
+++ b/mysql-test/suite/parts/r/rpl_partition.result	2009-02-03 11:25:38 +0000
@@ -177,6 +177,8 @@ Last_IO_Errno	#
 Last_IO_Error	#
 Last_SQL_Errno	0
 Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
 SELECT count(*) "Slave norm" FROM t1;
 Slave norm	500
 SELECT count(*) "Slave bykey" FROM t2;

=== modified file 'mysql-test/suite/rpl/r/rpl_backup.result'
--- a/mysql-test/suite/rpl/r/rpl_backup.result	2008-12-06 00:24:23 +0000
+++ b/mysql-test/suite/rpl/r/rpl_backup.result	2009-02-01 19:17:09 +0000
@@ -335,6 +335,115 @@ the after position of the master's binlo
 should be 0.
 Delta
 0
+RESET MASTER;
+RESET SLAVE;
+SET DEBUG_SYNC = 'reset';
+SET DEBUG_SYNC = 'before_restore_done SIGNAL restore_running WAIT_FOR proceed';
+RESTORE FROM 'rpl_bup_s3.bak' OVERWRITE;
+SET DEBUG_SYNC = 'now WAIT_FOR restore_running';
+Try to start the slave while restore is running -- gets error.
+SLAVE START;
+ERROR HY000: Cannot start slave. SLAVE START is blocked by RESTORE.
+SET DEBUG_SYNC = 'now SIGNAL proceed';
+SHOW SLAVE STATUS;
+Slave_IO_State	#
+Master_Host	127.0.0.1
+Master_User	root
+Master_Port	MASTER_PORT
+Connect_Retry	1
+Master_Log_File	#
+Read_Master_Log_Pos	#
+Relay_Log_File	#
+Relay_Log_Pos	#
+Relay_Master_Log_File	
+Slave_IO_Running	No
+Slave_SQL_Running	No
+Replicate_Do_DB	
+Replicate_Ignore_DB	
+Replicate_Do_Table	
+Replicate_Ignore_Table	
+Replicate_Wild_Do_Table	
+Replicate_Wild_Ignore_Table	
+Last_Errno	0
+Last_Error	
+Skip_Counter	0
+Exec_Master_Log_Pos	#
+Relay_Log_Space	#
+Until_Condition	None
+Until_Log_File	
+Until_Log_Pos	0
+Master_SSL_Allowed	No
+Master_SSL_CA_File	
+Master_SSL_CA_Path	
+Master_SSL_Cert	
+Master_SSL_Cipher	
+Master_SSL_Key	
+Seconds_Behind_Master	#
+Master_SSL_Verify_Server_Cert	No
+Last_IO_Errno	0
+Last_IO_Error	
+Last_SQL_Errno	0
+Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
+Restore is now complete.
+backup_id
+#
+SET DEBUG_SYNC = 'now SIGNAL done';
+SET DEBUG_SYNC = 'now WAIT_FOR done';
+SHOW DATABASES;
+Database
+information_schema
+mtr
+mysql
+rpl_backup
+test
+SET DEBUG_SYNC = 'reset';
+Try to start the slave after restore is done -- should succeed.
+SLAVE START;
+SHOW SLAVE STATUS;
+Slave_IO_State	#
+Master_Host	127.0.0.1
+Master_User	root
+Master_Port	MASTER_PORT
+Connect_Retry	1
+Master_Log_File	#
+Read_Master_Log_Pos	#
+Relay_Log_File	#
+Relay_Log_Pos	#
+Relay_Master_Log_File	#
+Slave_IO_Running	Yes
+Slave_SQL_Running	Yes
+Replicate_Do_DB	
+Replicate_Ignore_DB	
+Replicate_Do_Table	
+Replicate_Ignore_Table	
+Replicate_Wild_Do_Table	
+Replicate_Wild_Ignore_Table	
+Last_Errno	0
+Last_Error	
+Skip_Counter	0
+Exec_Master_Log_Pos	#
+Relay_Log_Space	#
+Until_Condition	None
+Until_Log_File	
+Until_Log_Pos	0
+Master_SSL_Allowed	No
+Master_SSL_CA_File	
+Master_SSL_CA_Path	
+Master_SSL_Cert	
+Master_SSL_Cipher	
+Master_SSL_Key	
+Seconds_Behind_Master	#
+Master_SSL_Verify_Server_Cert	No
+Last_IO_Errno	0
+Last_IO_Error	
+Last_SQL_Errno	0
+Last_SQL_Error	
+Replicate_Ignore_Server_Ids	
+Master_Server_Id	1
+Now stop the slave.
+SLAVE STOP;
 FLUSH BACKUP LOGS;
 PURGE BACKUP LOGS;
 DROP DATABASE rpl_backup;

=== modified file 'mysql-test/suite/rpl/r/rpl_trigger.result'
--- a/mysql-test/suite/rpl/r/rpl_trigger.result	2008-12-08 13:31:24 +0000
+++ b/mysql-test/suite/rpl/r/rpl_trigger.result	2009-02-02 21:29:18 +0000
@@ -862,6 +862,9 @@ drop table t21,t31;
 drop table t11;
 STOP SLAVE;
 FLUSH LOGS;
+--> Stop master server
+--> Start master server
+--> Master binlog: Server ver: 5.0.16-debug-log, Binlog ver: 4
 RESET SLAVE;
 START SLAVE;
 SELECT MASTER_POS_WAIT('master-bin.000001', 513) >= 0;

=== modified file 'mysql-test/suite/rpl/t/rpl_backup.test'
--- a/mysql-test/suite/rpl/t/rpl_backup.test	2008-12-06 00:24:23 +0000
+++ b/mysql-test/suite/rpl/t/rpl_backup.test	2009-01-29 21:17:59 +0000
@@ -401,6 +401,63 @@ eval SELECT $master_after_pos - $master_
 --enable_query_log
 
 #
+# Now test 'slave start' while restore is in progress on slave.
+#
+
+RESET MASTER;
+
+connection slave;
+
+RESET SLAVE;
+
+SET DEBUG_SYNC = 'reset';
+
+connection slave1;
+
+SET DEBUG_SYNC = 'before_restore_done SIGNAL restore_running WAIT_FOR proceed';
+SEND RESTORE FROM 'rpl_bup_s3.bak' OVERWRITE;
+
+connection slave;
+
+SET DEBUG_SYNC = 'now WAIT_FOR restore_running';
+
+--echo Try to start the slave while restore is running -- gets error.
+--error ER_RESTORE_CANNOT_START_SLAVE
+SLAVE START;
+
+SET DEBUG_SYNC = 'now SIGNAL proceed';
+
+--replace_result $MASTER_MYPORT MASTER_PORT
+--replace_column 1 # 6 # 7 # 8 # 9 # 22 # 23 # 33 #
+--query_vertical SHOW SLAVE STATUS
+
+connection slave1;
+--echo Restore is now complete.
+--replace_column 1 #
+reap;
+SET DEBUG_SYNC = 'now SIGNAL done';
+
+connection slave;
+
+SET DEBUG_SYNC = 'now WAIT_FOR done';
+
+SHOW DATABASES; 
+
+SET DEBUG_SYNC = 'reset';
+
+--echo Try to start the slave after restore is done -- should succeed.
+SLAVE START;
+--source include/wait_for_slave_to_start.inc
+
+--replace_result $MASTER_MYPORT MASTER_PORT
+--replace_column 1 # 6 # 7 # 8 # 9 # 10 # 22 # 23 # 33 #
+--query_vertical SHOW SLAVE STATUS
+
+--echo Now stop the slave.
+SLAVE STOP;
+--source include/wait_for_slave_to_stop.inc
+
+#
 # Cleanup
 #
 connection master;

=== modified file 'mysql-test/suite/rpl/t/rpl_trigger.test'
--- a/mysql-test/suite/rpl/t/rpl_trigger.test	2009-02-01 14:30:58 +0000
+++ b/mysql-test/suite/rpl/t/rpl_trigger.test	2009-02-02 21:29:18 +0000
@@ -298,8 +298,28 @@ STOP SLAVE;
 connection master;
 let $MYSQLD_DATADIR= `select @@datadir`;
 FLUSH LOGS;
+
+# Stop master server
+--echo --> Stop master server
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait
+EOF
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+# Replace binlog
 remove_file $MYSQLD_DATADIR/master-bin.000001;
 copy_file $MYSQL_TEST_DIR/std_data/bug16266.000001 $MYSQLD_DATADIR/master-bin.000001;
+  
+--echo --> Start master server
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart
+EOF
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+let $binlog_version= query_get_value(SHOW BINLOG EVENTS, Info, 1);
+
+# Make the slave to replay the new binlog.
+--echo --> Master binlog: $binlog_version
 
 # Make the slave to replay the new binlog.
 

=== modified file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_2ch.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_2ch.result	2008-04-25 22:17:34 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_2ch.result	2009-02-02 20:31:01 +0000
@@ -45,58 +45,20 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INC
 *** Basic testing  ***
 Insert rows via all hosts
 Check data on both clusters 
-* Cluster A *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 1 GROUP BY b ORDER BY b;
-COUNT(*)	SUM(a)	b
-10	190	master
-10	210	master1
-10	200	slave
-10	220	slave1
-* Cluster B *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 1 GROUP BY b ORDER BY b;
-COUNT(*)	SUM(a)	b
-10	190	master
-10	210	master1
-10	200	slave
-10	220	slave1
-
+Comparing tables master:test.t1 and slave:test.t1
 *** Transaction testing ***
 BEGIN;
 BEGIN;
 COMMIT;
 COMMIT;
 Check data on both clusters 
-* Cluster A *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 2 GROUP BY b ORDER BY b;
-COUNT(*)	SUM(a)	b
-100	23900	master
-100	24100	master1
-100	24000	slave
-100	24200	slave1
-* Cluster B *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 2 GROUP BY b ORDER BY b;
-COUNT(*)	SUM(a)	b
-100	23900	master
-100	24100	master1
-100	24000	slave
-100	24200	slave1
-
+Comparing tables master:test.t1 and slave:test.t1
 BEGIN;
 BEGIN;
 ROLLBACK;
 ROLLBACK;
 Check data on both clusters 
-* Cluster A *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 3 GROUP BY b ORDER BY b;
-COUNT(*)	SUM(a)	b
-100	64100	master1
-100	64000	slave
-* Cluster B *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 3 GROUP BY b ORDER BY b;
-COUNT(*)	SUM(a)	b
-100	64100	master1
-100	64000	slave
-
+Comparing tables master:test.t1 and slave:test.t1
 DROP TABLE t1;
 DROP TABLE IF EXISTS t1;
 

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.cnf'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.cnf	2008-10-31 14:11:44 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.cnf	2009-02-02 20:34:46 +0000
@@ -15,11 +15,6 @@ skip-slave-start
 
 [mysqld.2.slave]
 server-id= 2
-master-host=		127.0.0.1
-master-port=		@mysqld.2.1.port
-master-password=	@mysqld.2.1.#password
-master-user=		@mysqld.2.1.#user
-master-connect-retry=	1
 init-rpl-role=		slave
 log-bin
 skip-slave-start

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test	2008-10-31 14:11:44 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test	2009-02-02 20:31:01 +0000
@@ -75,13 +75,10 @@ let $wait_condition= SELECT COUNT(*)=40
 
 # Check data
 --echo Check data on both clusters 
---connection master
---echo * Cluster A *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 1 GROUP BY b ORDER BY b;
---connection slave
---echo * Cluster B *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 1 GROUP BY b ORDER BY b;
---echo
+let $diff_table_1=master:test.t1;
+let $diff_table_2=slave:test.t1;
+
+source include/diff_tables.inc;
 
 --echo *** Transaction testing ***
 # Start transaction for one mysqld and do mass of inserts for other.
@@ -119,13 +116,10 @@ let $wait_condition= SELECT COUNT(*)=400
 --source include/wait_condition.inc
 
 --echo Check data on both clusters 
---connection master
---echo * Cluster A *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 2 GROUP BY b ORDER BY b;
---connection slave
---echo * Cluster B *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 2 GROUP BY b ORDER BY b;
---echo
+let $diff_table_1=master:test.t1;
+let $diff_table_2=slave:test.t1;
+
+source include/diff_tables.inc;
 
 # Start transaction and then roll back
 
@@ -161,13 +155,10 @@ let $wait_condition= SELECT COUNT(*)=200
 --source include/wait_condition.inc
 
 --echo Check data on both clusters 
---connection master
---echo * Cluster A *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 3 GROUP BY b ORDER BY b;
---connection slave
---echo * Cluster B *
-SELECT COUNT(*), SUM(a), b FROM t1 WHERE c = 3 GROUP BY b ORDER BY b;
---echo
+let $diff_table_1=master:test.t1;
+let $diff_table_2=slave:test.t1;
+
+source include/diff_tables.inc;
 
 # Clean up
 --connection master

=== modified file 'mysql-test/suite/sys_vars/t/rpl_max_binlog_size_func.test'
--- a/mysql-test/suite/sys_vars/t/rpl_max_binlog_size_func.test	2009-01-31 15:53:35 +0000
+++ b/mysql-test/suite/sys_vars/t/rpl_max_binlog_size_func.test	2009-02-03 09:16:53 +0000
@@ -37,7 +37,6 @@ let $MYSQLD_DATADIR= `select @@datadir`;
 SET @@global.max_binlog_size= @saved_max_binlog_size;
 
 DROP TABLE t1;
-
 ###############################################################################
 # End of functionality testing for max_binlog_size                            #
 ###############################################################################

=== modified file 'mysql-test/t/ctype_ldml.test'
--- a/mysql-test/t/ctype_ldml.test	2009-01-26 16:03:39 +0000
+++ b/mysql-test/t/ctype_ldml.test	2009-02-02 20:50:45 +0000
@@ -55,6 +55,25 @@ insert into t1 values ('a');
 select * from t1 where c1='b';
 drop table t1;
 
+
+#
+# Bug#41084 full-text index added to custom UCA collation not working
+#
+CREATE TABLE t1 (
+  col1 varchar(100) character set utf8 collate utf8_test_ci
+);
+INSERT INTO t1 (col1) VALUES ('abcd'),('efgh'),('ijkl');
+ALTER TABLE t1 ADD FULLTEXT INDEX (col1);
+SELECT * FROM t1 where match (col1) against ('abcd');
+SELECT * FROM t1 where match (col1) against ('abcd' IN BOOLEAN MODE);
+ALTER TABLE t1 ADD (col2 varchar(100) character set latin1);
+UPDATE t1 SET col2=col1;
+SELECT * FROM t1 WHERE col1=col2 ORDER BY col1;
+DROP TABLE t1;
+
+#
+#  Vietnamese experimental collation
+#
 --echo  Vietnamese experimental collation
 
 show collation like 'ucs2_vn_ci';

=== modified file 'mysql-test/t/disabled.def'
--- a/mysql-test/t/disabled.def	2009-01-31 15:53:35 +0000
+++ b/mysql-test/t/disabled.def	2009-02-03 09:16:53 +0000
@@ -34,7 +34,9 @@ federated_transactions            : Bug#
 ;backup_no_engine                  : Bug#36021 2008-04-13 rsomla server crashes when openning table with unknown storage engine
 ;backup_triggers_and_events       : Bug#37762 2008-07-01 rafal Test fails on remove_file for unknown reasons
 ;backup_no_be                     : Bug#38023 2008-07-16 rafal Test triggers valgrind warnings described in the bug
+user_limits                       : Bug#41147 main.user_limits test fails with user exceeding the 'max_questions' resource (because of thread pool)
 ;wait_timeout_func                 : Bug #41225 joro wait_timeout_func fails
 ;kill                              : Bug#37780 2008-12-03 HHunger need some changes to be robust enough for pushbuild.
 query_cache_28249                 : Bug#41098 Query Cache returns wrong result with concurrent insert
 innodb_bug39438          : BUG#42383 2009-01-28 lsoares "This fails in embedded and on windows.  Note that this test is not run on windows and on embedded in PB for main trees currently"
+subselect3_jcl6          : BUG#42534 subselect3_jcl6 produces valgrind warnings with MTR2 (2008-02-02 spetrunia)

=== modified file 'mysql-test/t/innodb_mrr.test'
--- a/mysql-test/t/innodb_mrr.test	2008-12-29 03:42:30 +0000
+++ b/mysql-test/t/innodb_mrr.test	2009-01-25 16:59:07 +0000
@@ -10,13 +10,11 @@ set storage_engine=InnoDB;
 --source include/mrr_tests.inc 
 
 set storage_engine= @save_storage_engine;
-drop table t1, t2, t3, t4;
 
 # Try big rowid sizes
 set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
 set read_rnd_buffer_size=64;
 
-
 # By default InnoDB will fill values only for key parts used by the query,
 # which will cause DS-MRR to supply an invalid tuple on scan restoration. 
 # Verify that DS-MRR's code extra(HA_EXTRA_RETRIEVE_ALL_COLS) call has effect:

=== modified file 'mysql-test/t/join_cache.test'
--- a/mysql-test/t/join_cache.test	2009-01-08 05:47:10 +0000
+++ b/mysql-test/t/join_cache.test	2009-01-14 10:29:36 +0000
@@ -1023,3 +1023,121 @@ set join_buffer_size=default;
 set join_cache_level=default;
 
 DROP TABLE t1,t2,t3;
+
+--echo #
+--echo # Bug #42020: join buffer is used  for outer join with fields of 
+--echo #             several outer tables in join buffer
+--echo #
+
+CREATE TABLE t1 (
+  a bigint NOT NULL,
+  PRIMARY KEY (a) 
+);
+INSERT INTO t1 VALUES
+  (2), (1);
+
+CREATE TABLE t2 (
+  a bigint NOT NULL,
+  b bigint NOT NULL,
+  PRIMARY KEY (a,b)
+);
+INSERT INTO t2 VALUES
+  (2,30), (2,40), (2,50), (2,60), (2,70), (2,80),
+  (1,10), (1, 20), (1,30), (1,40), (1,50);
+
+CREATE TABLE t3 (
+  pk bigint NOT NULL AUTO_INCREMENT,
+  a bigint NOT NULL,
+  b bigint NOT NULL,
+  val bigint DEFAULT '0',
+  PRIMARY KEY (pk),
+  KEY idx (a,b)
+);
+INSERT INTO t3(a,b) VALUES
+  (2,30), (2,40), (2,50), (2,60), (2,70), (2,80),
+  (4,30), (4,40), (4,50), (4,60), (4,70), (4,80),
+  (5,30), (5,40), (5,50), (5,60), (5,70), (5,80),
+  (7,30), (7,40), (7,50), (7,60), (7,70), (7,80);
+
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+  FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+    WHERE t1.a=t2.a; 
+
+set join_cache_level=6;
+set join_buffer_size=256;
+
+EXPLAIN
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+  FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+    WHERE t1.a=t2.a; 
+
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+  FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+    WHERE t1.a=t2.a;
+
+DROP INDEX idx ON t3;
+set join_cache_level=4;
+
+EXPLAIN
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+  FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+    WHERE t1.a=t2.a; 
+
+SELECT t1.a, t2.a, t3.a, t2.b, t3.b, t3.val 
+  FROM (t1,t2) LEFT JOIN t3 ON (t1.a=t3.a AND t2.b=t3.b) 
+    WHERE t1.a=t2.a; 
+
+set join_buffer_size=default;
+set join_cache_level=default;
+
+DROP TABLE t1,t2,t3;
+
+#
+# WL#4424 Full index condition pushdown with batched key access join
+#
+create table t1(f1 int, f2 int);
+insert into t1 values (1,1),(2,2),(3,3);
+create table t2(f1 int not null, f2 int not null, f3 char(200), key(f1,f2));
+insert into t2 values (1,1, 'qwerty'),(1,2, 'qwerty'),(1,3, 'qwerty');
+insert into t2 values (2,1, 'qwerty'),(2,2, 'qwerty'),(2,3, 'qwerty'),
+                      (2,4, 'qwerty'),(2,5, 'qwerty');
+insert into t2 values (3,1, 'qwerty'),(3,4, 'qwerty');
+insert into t2 values (4,1, 'qwerty'),(4,2, 'qwerty'),(4,3, 'qwerty'),
+                      (4,4, 'qwerty');
+insert into t2 values (1,1, 'qwerty'),(1,2, 'qwerty'),(1,3, 'qwerty');
+insert into t2 values (2,1, 'qwerty'),(2,2, 'qwerty'),(2,3, 'qwerty'),
+                      (2,4, 'qwerty'),(2,5, 'qwerty');
+insert into t2 values (3,1, 'qwerty'),(3,4, 'qwerty');
+insert into t2 values (4,1, 'qwerty'),(4,2, 'qwerty'),(4,3, 'qwerty'),
+                      (4,4, 'qwerty');
+
+set join_cache_level=5;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+
+set join_cache_level=6;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+
+set join_cache_level=7;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+
+set join_cache_level=8;
+select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t1.f2 and t2.f2 + 1 >= t1.f1 + 1;
+
+explain select t2.f1, t2.f2, t2.f3 from t1,t2
+where t1.f1=t2.f1 and t2.f2 between t1.f1 and t2.f2;
+
+drop table t1,t2;
+set join_cache_level=default;

=== added file 'mysql-test/t/join_optimizer.test'
--- a/mysql-test/t/join_optimizer.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/join_optimizer.test	2009-01-26 19:42:59 +0000
@@ -0,0 +1,45 @@
+--disable_warnings
+drop table if exists t0,t1,t2,t3;
+--enable_warnings
+
+--echo #
+--echo # BUG#38049 incorrect rows estimations with references from preceding table
+--echo #
+
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (a varchar(32));
+insert into t1 values ('owner'),('requester'),('admincc'),('cc');
+
+CREATE TABLE t2 (
+  id int(11) NOT NULL,
+  type varchar(32) default NULL,
+  PRIMARY KEY  (id)
+);
+insert into t2 values (1,'owner'), (2,'admincc');
+
+
+CREATE TABLE t3 (
+  id int(11) NOT NULL,
+  domain varchar(32) default NULL,
+  type varchar(32) default NULL,
+  PRIMARY KEY  (id)
+);
+
+set @domain='system';
+set @pk=0;
+INSERT INTO t3 select @pk:=@pk+1, 'system', t1.a from t1;
+INSERT INTO t3 select @pk:=@pk+1, 'queue', t1.a from t1, t0 where t0.a<3;
+INSERT INTO t3 select @pk:=@pk+1, 'ticket', t1.a from t1, t0 A, t0 B, t0 C;
+
+CREATE INDEX groups_d ON t3(domain);
+CREATE INDEX groups_t ON t3(type);
+CREATE INDEX groups_td ON t3(type, domain);
+CREATE INDEX groups_dt ON t3(domain, type);
+--echo For table g this must use ref(groups_dt) and #rows should be around 15 and not 335:
+explain 
+SELECT STRAIGHT_JOIN g.id FROM t2 a, t3 g USE INDEX(groups_dt) 
+WHERE g.domain = 'queue' AND g.type = a.type;
+
+drop table t0,t1,t2,t3;

=== modified file 'mysql-test/t/maria_mrr.test'
--- a/mysql-test/t/maria_mrr.test	2008-10-15 22:37:44 +0000
+++ b/mysql-test/t/maria_mrr.test	2009-01-25 16:59:07 +0000
@@ -19,4 +19,3 @@ set storage_engine=Maria;
 set storage_engine= @save_storage_engine;
 set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
 
-drop table t1, t2, t3, t4;

=== added file 'mysql-test/t/myisam_keycache_coverage.test'
--- a/mysql-test/t/myisam_keycache_coverage.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/myisam_keycache_coverage.test	2009-01-21 15:00:23 +0000
@@ -0,0 +1,60 @@
+--echo #
+--echo # MyISAM keycache coverage tests.
+--echo #
+
+--source include/have_debug.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (c1 VARCHAR(5), c2 int);
+CREATE INDEX i1 ON t1 (c1, c2);
+INSERT INTO t1 VALUES ('A',1);
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+
+--echo #
+--echo # Positive tests.
+--echo #
+SELECT COUNT(*) FROM t1 WHERE c2 < 5;
+LOAD INDEX INTO CACHE t1;
+UPDATE t1 SET c2=2;
+
+--echo #
+--echo # Close table and clear cache.
+--echo #
+FLUSH TABLE t1;
+
+--echo #
+--echo # Inject error key_cache_read_block_error
+--echo #
+SET debug='d,key_cache_read_block_error';
+--replace_regex /'.*\//'/
+--error 126
+SELECT COUNT(*) FROM t1 WHERE c2 < 5;
+FLUSH TABLE t1;
+
+--echo #
+--echo # Inject error key_cache_insert_block_error
+--echo #
+SET debug='d,key_cache_insert_block_error';
+LOAD INDEX INTO CACHE t1;
+FLUSH TABLE t1;
+
+--echo #
+--echo # Inject error key_cache_write_block_error
+--echo #
+SET debug='d,key_cache_write_block_error';
+--replace_regex /'.*\//'/
+--error 126
+UPDATE t1 SET c2=1;
+FLUSH TABLE t1;
+
+--echo #
+--echo # Cleanup
+--echo #
+SET debug='';
+DROP TABLE t1;
+

=== modified file 'mysql-test/t/myisam_mrr.test'
--- a/mysql-test/t/myisam_mrr.test	2008-09-05 19:23:07 +0000
+++ b/mysql-test/t/myisam_mrr.test	2009-01-26 11:21:27 +0000
@@ -14,8 +14,6 @@ select @@read_rnd_buffer_size;
 
 set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
 
-drop table t1, t2, t3, t4;
-
 #
 # BUG#30622: Incorrect query results for MRR + filesort
 # 
@@ -74,3 +72,28 @@ FROM t1 WHERE EXISTS (
 
 DROP TABLE t1, t2;
 
+-- echo #
+-- echo # BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
+-- echo #
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
+insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
+update t1 set b=repeat(char(65+a), 20) where a < 25;
+
+--echo This must show range + using index condition:
+explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
+select * from t1 where a < 10 and b = repeat(char(65+a), 20);
+drop table t0,t1;
+
+-- echo #
+-- echo # BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
+-- echo #
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b int, key(a));
+insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C; 
+-- echo This mustn't show "Using MRR":
+explain select * from t1 where a < 20  order by a;
+drop table t0, t1;
+

=== modified file 'mysql-test/t/subselect_sj.test'
--- a/mysql-test/t/subselect_sj.test	2008-10-07 19:45:09 +0000
+++ b/mysql-test/t/subselect_sj.test	2009-01-28 19:24:54 +0000
@@ -123,7 +123,7 @@ set optimizer_switch='no_materialization
 select @@optimizer_switch; 
 set optimizer_switch='';
 
-drop table t0, t1;
+drop table t0, t1, t2;
 drop table t10, t11, t12;
 
 --echo
@@ -147,3 +147,73 @@ WHERE varchar_nokey IN (
  t1
 ) XOR pk = 30;
 drop table t1;
+
+--echo #
+--echo # BUG#41842: Semi-join materialization strategy crashes when the upper query has HAVING
+--echo #
+
+CREATE TABLE t1 (
+  pk int(11) NOT NULL AUTO_INCREMENT,
+  int_nokey int(11) NOT NULL,
+  time_key time NOT NULL,
+  datetime_key datetime NOT NULL,
+  datetime_nokey datetime NOT NULL,
+  varchar_key varchar(1) NOT NULL,
+  varchar_nokey varchar(1) NOT NULL,
+  PRIMARY KEY (pk),
+  KEY time_key (time_key),
+  KEY datetime_key (datetime_key),
+  KEY varchar_key (varchar_key)
+);
+INSERT INTO t1 VALUES 
+(1,0, '00:16:10','2008-09-03 14:25:40','2008-09-03 14:25:40','h','h'),
+(2,7, '00:00:00','2001-01-13 00:00:00','2001-01-13 00:00:00','',''),
+(3,0, '00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','x','x'),
+(4,2, '16:29:24','2000-10-16 01:39:08','2000-10-16 01:39:08','w','w'),
+(5,1, '09:23:32','0000-00-00 00:00:00','0000-00-00 00:00:00','p','p'),
+(6,3, '00:00:00','2007-12-02 00:00:00','2007-12-02 00:00:00','o','o'),
+(7,3, '00:00:00','2008-09-11 00:00:00','2008-09-11 00:00:00','',''),
+(8,0, '13:59:04','0000-00-00 00:00:00','0000-00-00 00:00:00','s','s'),
+(9,7, '09:01:06','0000-00-00 00:00:00','0000-00-00 00:00:00','d','d'),
+(10,5,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','n','n'),
+(11,0,'21:06:46','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),
+(12,2,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','',''),
+(13,6,'14:45:34','2003-07-28 02:34:08','2003-07-28 02:34:08','w','w'),
+(14,1,'15:04:12','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),
+(15,0,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','x','x'),
+(16,0,'15:55:23','2004-03-17 00:32:27','2004-03-17 00:32:27','p','p'),
+(17,1,'16:30:00','2004-12-27 19:20:00','2004-12-27 19:20:00','d','d'),
+(18,0,'00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','h','h'),
+(19,0,'14:13:26','2008-11-09 05:53:48','2008-11-09 05:53:48','o','o'),
+(20,0,'00:00:00','2009-10-11 06:58:04','2009-10-11 06:58:04','k','k');
+
+CREATE TABLE t2 (
+  pk int(11) NOT NULL AUTO_INCREMENT,
+  int_nokey int(11) NOT NULL,
+  time_key time NOT NULL,
+  datetime_key datetime NOT NULL,
+  datetime_nokey datetime NOT NULL,
+  varchar_key varchar(1) NOT NULL,
+  varchar_nokey varchar(1) NOT NULL,
+  PRIMARY KEY (pk),
+  KEY time_key (time_key),
+  KEY datetime_key (datetime_key),
+  KEY varchar_key (varchar_key)
+);
+INSERT INTO t2 VALUES 
+(10,0,'19:39:13','0000-00-00 00:00:00','0000-00-00 00:00:00','g','g'),
+(11,8,'03:43:53','0000-00-00 00:00:00','0000-00-00 00:00:00','b','b');
+SELECT OUTR.datetime_nokey AS X FROM t1 AS OUTR 
+WHERE 
+  OUTR.varchar_nokey IN (SELECT 
+                             INNR . varchar_nokey AS Y 
+                             FROM t2 AS INNR 
+                             WHERE
+                               INNR . datetime_key >= INNR . time_key OR 
+                               INNR . pk = INNR . int_nokey  
+                             ) 
+  AND OUTR . varchar_nokey <= 'w' 
+HAVING X > '2012-12-12';
+drop table t1, t2;
+
+

=== modified file 'mysql-test/t/system_mysql_db_fix30020.test'
--- a/mysql-test/t/system_mysql_db_fix30020.test	2008-10-07 10:26:19 +0000
+++ b/mysql-test/t/system_mysql_db_fix30020.test	2009-02-02 20:50:45 +0000
@@ -105,4 +105,5 @@ backup_history, backup_progress;
 # check that we dropped all system tables
 show tables;
 
+exit;
 # End of 4.1 tests

=== modified file 'mysql-test/t/variables.test'
--- a/mysql-test/t/variables.test	2009-01-31 15:53:35 +0000
+++ b/mysql-test/t/variables.test	2009-02-02 23:28:17 +0000
@@ -6,7 +6,7 @@ drop table if exists t1,t2;
 --enable_warnings
 
 #
-# Bug #19263: variables.test doesn't clean up after itself (I/II -- save)
+# Bug#19263: variables.test doesn't clean up after itself (I/II -- save)
 #
 set @my_binlog_cache_size         =@@global.binlog_cache_size;
 set @my_connect_timeout           =@@global.connect_timeout;
@@ -198,46 +198,46 @@ SELECT @@version_compile_os LIKE 'non-ex
 
 # The following should give errors
 
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set big_tables=OFFF;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set big_tables="OFFF";
---error 1193
+--error ER_UNKNOWN_SYSTEM_VARIABLE
 set unknown_variable=1;
---error 1232
+--error ER_WRONG_TYPE_FOR_VAR
 set max_join_size="hello";
---error 1286
+--error ER_UNKNOWN_STORAGE_ENGINE
 set storage_engine=UNKNOWN_TABLE_TYPE;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set storage_engine=MERGE, big_tables=2;
 show local variables like 'storage_engine';
---error 1229
+--error ER_GLOBAL_VARIABLE
 set SESSION query_cache_size=10000;
---error 1230
+--error ER_NO_DEFAULT
 set GLOBAL storage_engine=DEFAULT;
---error 1115
+--error ER_UNKNOWN_CHARACTER_SET
 set character_set_client=UNKNOWN_CHARACTER_SET;
---error 1273
+--error ER_UNKNOWN_COLLATION
 set collation_connection=UNKNOWN_COLLATION;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set character_set_client=NULL;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set collation_connection=NULL;
---error 1228
+--error ER_LOCAL_VARIABLE
 set global autocommit=1;
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 select @@global.timestamp;
---error 1238 
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR 
 set @@version='';
---error 1229
+--error ER_GLOBAL_VARIABLE
 set @@concurrent_insert=1;
---error 1228
+--error ER_LOCAL_VARIABLE
 set @@global.sql_auto_is_null=1;
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 select @@global.sql_auto_is_null;
---error 1229
+--error ER_GLOBAL_VARIABLE
 set myisam_max_sort_file_size=100;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set @@SQL_WARNINGS=NULL;
 
 # Test setting all variables
@@ -368,23 +368,23 @@ drop table t1,t2;
 # error conditions
 #
 
---error 1193
+--error ER_UNKNOWN_SYSTEM_VARIABLE
 select @@xxxxxxxxxx;
 select 1;
 
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 select @@session.key_buffer_size;
 
---error 1229
+--error ER_GLOBAL_VARIABLE
 set ft_boolean_syntax = @@init_connect;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set global ft_boolean_syntax = @@init_connect;
---error 1229
+--error ER_GLOBAL_VARIABLE
 set init_connect = NULL;
 set global init_connect = NULL;
---error 1229
+--error ER_GLOBAL_VARIABLE
 set ft_boolean_syntax = @@init_connect;
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set global ft_boolean_syntax = @@init_connect;
 
 # Bug#3754 SET GLOBAL myisam_max_sort_file_size doesn't work as
@@ -417,15 +417,15 @@ select @a, @b;
 #
 # Bug#2586:Disallow global/session/local as structured var. instance names
 #
---error 1064
+--error ER_PARSE_ERROR
 set @@global.global.key_buffer_size= 1;
---error 1064
+--error ER_PARSE_ERROR
 set GLOBAL global.key_buffer_size= 1;
---error 1064
+--error ER_PARSE_ERROR
 SELECT @@global.global.key_buffer_size;
---error 1064
+--error ER_PARSE_ERROR
 SELECT @@global.session.key_buffer_size;
---error 1064
+--error ER_PARSE_ERROR
 SELECT @@global.local.key_buffer_size;
 
 # BUG#5135: cannot turn on log_warnings with SET in 4.1 (and 4.0)
@@ -516,27 +516,27 @@ select @@lc_time_names;
 --echo *** LC_TIME_NAMES: testing with string expressions
 set lc_time_names=concat('de','_','DE');
 select @@lc_time_names;
---error 1105
+--error ER_UNKNOWN_ERROR
 set lc_time_names=concat('de','+','DE');
 select @@lc_time_names;
 --echo LC_TIME_NAMES: testing with numeric expressions
 set @@lc_time_names=1+2;
 select @@lc_time_names;
---error 1232
+--error ER_WRONG_TYPE_FOR_VAR
 set @@lc_time_names=1/0;
 select @@lc_time_names;
 set lc_time_names=en_US;
 --echo LC_TIME_NAMES: testing NULL and a negative number:
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set lc_time_names=NULL;
---error 1105
+--error ER_UNKNOWN_ERROR
 set lc_time_names=-1;
 select @@lc_time_names;
 --echo LC_TIME_NAMES: testing locale with the last ID:
 set lc_time_names=108;
 select @@lc_time_names;
 --echo LC_TIME_NAMES: testing a number beyond the valid ID range:
---error 1105
+--error ER_UNKNOWN_ERROR
 set lc_time_names=109;
 select @@lc_time_names;
 --echo LC_TIME_NAMES: testing that 0 is en_US:
@@ -578,7 +578,7 @@ select @@query_prealloc_size = @test;
 # Bug#31588 buffer overrun when setting variables
 #
 # Buffer-size Off By One. Should throw valgrind-warning without fix #31588.
---error 1231
+--error ER_WRONG_VALUE_FOR_VAR
 set global sql_mode=repeat('a',80);
 
 --echo End of 4.1 tests
@@ -596,9 +596,9 @@ drop table t1;
 # Bug #10339: read only variables.
 #
 
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 set @@warning_count=1;
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 set @@global.error_count=1;
 
 #
@@ -616,9 +616,9 @@ select @@max_heap_table_size > 0;
 # Bug #11775 Variable character_set_system does not exist (sometimes)
 #
 select @@character_set_system;
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 set global character_set_system = latin1;
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 set @@global.version_compile_os='234';
 
 #
@@ -729,7 +729,7 @@ select @@@;
 # Don't actually output, since it depends on the system
 --replace_column 1 #
 select @@hostname;
---error 1238
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
 set @@hostname= "anothername";
 --replace_column 2 #
 show variables like 'hostname';

=== modified file 'mysys/charset.c'
--- a/mysys/charset.c	2008-12-09 08:41:43 +0000
+++ b/mysys/charset.c	2009-02-02 20:50:45 +0000
@@ -212,6 +212,8 @@ copy_uca_collation(CHARSET_INFO *to, CHA
   to->max_sort_char= from->max_sort_char;
   to->mbminlen= from->mbminlen;
   to->mbmaxlen= from->mbmaxlen;
+  to->state|= MY_CS_AVAILABLE | MY_CS_LOADED |
+              MY_CS_STRNXFRM  | MY_CS_UNICODE;
 }
 
 

=== modified file 'mysys/mf_keycache.c'
--- a/mysys/mf_keycache.c	2008-11-12 15:23:22 +0000
+++ b/mysys/mf_keycache.c	2009-01-29 21:17:59 +0000
@@ -1373,7 +1373,11 @@ static void unreg_request(KEY_CACHE *key
   DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
   DBUG_ASSERT(!block->next_used);
   DBUG_ASSERT(!block->prev_used);
-  if (! --block->requests)
+  /*
+    Unregister the request, but do not link erroneous blocks into the
+    LRU ring.
+  */
+  if (!--block->requests && !(block->status & BLOCK_ERROR))
   {
     my_bool hot;
     if (block->hits_left)
@@ -1455,8 +1459,7 @@ static void wait_for_readers(KEY_CACHE *
 #ifdef THREAD
   struct st_my_thread_var *thread= my_thread_var;
   DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
-  DBUG_ASSERT(!(block->status & (BLOCK_ERROR | BLOCK_IN_FLUSH |
-                                 BLOCK_CHANGED)));
+  DBUG_ASSERT(!(block->status & (BLOCK_IN_FLUSH | BLOCK_CHANGED)));
   DBUG_ASSERT(block->hash_link);
   DBUG_ASSERT(block->hash_link->block == block);
   /* Linked in file_blocks or changed_blocks hash. */
@@ -2567,7 +2570,6 @@ uchar *key_cache_read(KEY_CACHE *keycach
     reg1 BLOCK_LINK *block;
     uint read_length;
     uint offset;
-    uint status;
     int page_st;
 
     /*
@@ -2665,7 +2667,7 @@ uchar *key_cache_read(KEY_CACHE *keycach
       }
 
       /* block status may have added BLOCK_ERROR in the above 'if'. */
-      if (!((status= block->status) & BLOCK_ERROR))
+      if (!(block->status & BLOCK_ERROR))
       {
 #ifndef THREAD
         if (! return_buffer)
@@ -2691,14 +2693,22 @@ uchar *key_cache_read(KEY_CACHE *keycach
 
       remove_reader(block);
 
-      /*
-         Link the block into the LRU ring if it's the last submitted
-         request for the block. This enables eviction for the block.
-           */
-      unreg_request(keycache, block, 1);
+      /* Error injection for coverage testing. */
+      DBUG_EXECUTE_IF("key_cache_read_block_error",
+                      block->status|= BLOCK_ERROR;);
 
-      if (status & BLOCK_ERROR)
+      /* Do not link erroneous blocks into the LRU ring, but free them. */
+      if (!(block->status & BLOCK_ERROR))
       {
+        /*
+          Link the block into the LRU ring if it's the last submitted
+          request for the block. This enables eviction for the block.
+        */
+        unreg_request(keycache, block, 1);
+      }
+      else
+      {
+        free_block(keycache, block);
         error= 1;
         break;
       }
@@ -2947,16 +2957,25 @@ int key_cache_insert(KEY_CACHE *keycache
 
       remove_reader(block);
 
-      /*
-         Link the block into the LRU ring if it's the last submitted
-         request for the block. This enables eviction for the block.
-      */
-      unreg_request(keycache, block, 1);
+      /* Error injection for coverage testing. */
+      DBUG_EXECUTE_IF("key_cache_insert_block_error",
+                      block->status|= BLOCK_ERROR; errno=EIO;);
 
-      error= (block->status & BLOCK_ERROR);
-
-      if (error)
+      /* Do not link erroneous blocks into the LRU ring, but free them. */
+      if (!(block->status & BLOCK_ERROR))
+      {
+        /*
+          Link the block into the LRU ring if it's the last submitted
+          request for the block. This enables eviction for the block.
+        */
+        unreg_request(keycache, block, 1);
+      }
+      else
+      {
+        free_block(keycache, block);
+        error= 1;
         break;
+      }
 
       buff+= read_length;
       filepos+= read_length+offset;
@@ -3245,14 +3264,24 @@ int key_cache_write(KEY_CACHE *keycache,
       */
       remove_reader(block);
 
-      /*
-         Link the block into the LRU ring if it's the last submitted
-         request for the block. This enables eviction for the block.
-      */
-      unreg_request(keycache, block, 1);
+      /* Error injection for coverage testing. */
+      DBUG_EXECUTE_IF("key_cache_write_block_error",
+                      block->status|= BLOCK_ERROR;);
 
-      if (block->status & BLOCK_ERROR)
+      /* Do not link erroneous blocks into the LRU ring, but free them. */
+      if (!(block->status & BLOCK_ERROR))
       {
+        /*
+          Link the block into the LRU ring if it's the last submitted
+          request for the block. This enables eviction for the block.
+        */
+        unreg_request(keycache, block, 1);
+      }
+      else
+      {
+        /* Pretend a "clean" block to avoid complications. */
+        block->status&= ~(BLOCK_CHANGED);
+        free_block(keycache, block);
         error= 1;
         break;
       }
@@ -3328,8 +3357,9 @@ static void free_block(KEY_CACHE *keycac
 {
   KEYCACHE_THREAD_TRACE("free block");
   KEYCACHE_DBUG_PRINT("free_block",
-                      ("block %u to be freed, hash_link %p",
-                       BLOCK_NUMBER(block), block->hash_link));
+                      ("block %u to be freed, hash_link %p  status: %u",
+                       BLOCK_NUMBER(block), block->hash_link,
+                       block->status));
   /*
     Assert that the block is not free already. And that it is in a clean
     state. Note that the block might just be assigned to a hash_link and
@@ -3411,10 +3441,14 @@ static void free_block(KEY_CACHE *keycac
   if (block->status & BLOCK_IN_EVICTION)
     return;
 
-  /* Here the block must be in the LRU ring. Unlink it again. */
-  DBUG_ASSERT(block->next_used && block->prev_used &&
-              *block->prev_used == block);
-  unlink_block(keycache, block);
+  /* Error blocks are not put into the LRU ring. */
+  if (!(block->status & BLOCK_ERROR))
+  {
+    /* Here the block must be in the LRU ring. Unlink it again. */
+    DBUG_ASSERT(block->next_used && block->prev_used &&
+                *block->prev_used == block);
+    unlink_block(keycache, block);
+  }
   if (block->temperature == BLOCK_WARM)
     keycache->warm_blocks--;
   block->temperature= BLOCK_COLD;

=== modified file 'mysys/my_delete.c'
--- a/mysys/my_delete.c	2009-02-02 17:39:49 +0000
+++ b/mysys/my_delete.c	2009-02-03 17:03:58 +0000
@@ -75,9 +75,9 @@ int nt_share_delete(const char *name, my
   DBUG_ENTER("nt_share_delete");
   DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags));
 
-  errno= 0;
   for (cnt= GetTickCount(); cnt; cnt--)
   {
+    errno= 0;
     sprintf(buf, "%s.%08X.deleted", name, cnt);
     if (MoveFile(name, buf))
       break;

=== modified file 'sql/backup/Doxyfile'
--- a/sql/backup/Doxyfile	2007-11-06 18:32:47 +0000
+++ b/sql/backup/Doxyfile	2008-12-18 21:46:36 +0000
@@ -1,225 +1,1473 @@
-# Doxyfile 1.5.1-p1
+# Doxyfile 1.5.7.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
 
 #---------------------------------------------------------------------------
 # Project related configuration options
 #---------------------------------------------------------------------------
-PROJECT_NAME           = Online Backup
+
+# This tag specifies the encoding used for all characters in the config file 
+# that follow. The default is UTF-8 which is also the encoding used for all 
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the 
+# iconv built into libc) for the transcoding. See 
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = OnlineBackup
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
 PROJECT_NUMBER         = 
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
 OUTPUT_DIRECTORY       = ./docs
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
+# 4096 sub-directories (in 2 levels) under the output directory of each output 
+# format and will distribute the generated files over these directories. 
+# Enabling this option can be useful when feeding doxygen a huge amount of 
+# source files, where putting all generated files in the same directory would 
+# otherwise cause performance problems for the file system.
+
 CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, 
+# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, 
+# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), 
+# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, 
+# Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, Slovene, 
+# Spanish, Swedish, and Ukrainian.
+
 OUTPUT_LANGUAGE        = English
-USE_WINDOWS_ENCODING   = YES
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
 BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
 REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator 
+# that is used to form the text in various listings. Each string 
+# in this list, if found as the leading text of the brief description, will be 
+# stripped from the text and the result after processing the whole list, is 
+# used as the annotated text. Otherwise, the brief description is used as-is. 
+# If left blank, the following values are used ("$name" is automatically 
+# replaced with the name of the entity): "The $name class" "The $name widget" 
+# "The $name file" "is" "provides" "specifies" "contains" 
+# "represents" "a" "an" "the"
+
 ABBREVIATE_BRIEF       = 
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
 ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all 
+# inherited members of a class in the documentation of that class as if those 
+# members were ordinary class members. Constructors, destructors and assignment 
+# operators of the base classes will not be shown.
+
 INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
 FULL_PATH_NAMES        = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user-defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. The tag can be used to show relative paths in the file list. 
+# If left blank the directory from which doxygen is run is used as the 
+# path to strip.
+
 STRIP_FROM_PATH        = 
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
+# the path mentioned in the documentation of a class, which tells 
+# the reader which header file to include in order to use a class. 
+# If left blank only the name of the header file containing the class 
+# definition is used. Otherwise one should specify the include paths that 
+# are normally passed to the compiler using the -I flag.
+
 STRIP_FROM_INC_PATH    = 
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
 SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like regular Qt-style comments 
+# (thus requiring an explicit @brief command for a brief description.)
+
 JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will 
+# interpret the first line (until the first dot) of a Qt-style 
+# comment as the brief description. If set to NO, the comments 
+# will behave just like regular Qt-style comments (thus requiring 
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
+# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
+# comments) as a brief description. This used to be the default behaviour. 
+# The new default is to treat a multi-line C++ comment block as a detailed 
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
 MULTILINE_CPP_IS_BRIEF = NO
-DETAILS_AT_TOP         = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# re-implements.
+
 INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce 
+# a new page for each member. If set to NO, the documentation of a member will 
+# be part of the file/class/namespace that contains it.
+
 SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
 TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user-defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
 ALIASES                = 
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
+# sources only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
 OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java 
+# sources only. Doxygen will then generate output that is more tailored for 
+# Java. For instance, namespaces will be presented as packages, qualified 
+# scopes will look different, etc.
+
 OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran 
+# sources only. Doxygen will then generate output that is more tailored for 
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL 
+# sources. Doxygen will then generate output that is tailored for 
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want 
+# to include (a tag file for) the STL sources as input, then you should 
+# set this tag to YES in order to let doxygen match functions declarations and 
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. 
+# func(std::string) {}). This also make the inheritance and collaboration 
+# diagrams that involve STL classes more complete and accurate.
+
 BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. 
+# Doxygen will parse them like normal C++ but will assume all classes use public 
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter 
+# and setter methods for a property. Setting this option to YES (the default) 
+# will make doxygen to replace the get and set methods by a property in the 
+# documentation. This will only work if the methods are indeed getting or 
+# setting a simple type. If this is not the case, or you want to show the 
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
 DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
+# the same type (for instance a group of public functions) to be put as a 
+# subgroup of that type (e.g. under the Public Functions section). Set it to 
+# NO to prevent subgrouping. Alternatively, this can be done per class using 
+# the \nosubgrouping command.
+
 SUBGROUPING            = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum 
+# is documented as struct, union, or enum with the name of the typedef. So 
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct 
+# with name TypeT. When disabled the typedef will appear as a member of a file, 
+# namespace, or class. And the struct will be named TypeS. This can typically 
+# be useful for C code in case the coding convention dictates that all compound 
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to 
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is 
+# probably good enough. For larger projects a too small cache size can cause 
+# doxygen to be busy swapping symbols to and from disk most of the time 
+# causing a significant performance penality. 
+# If the system has enough physical memory increasing the cache will improve the 
+# performance by keeping more symbols in memory. Note that the value works on 
+# a logarithmic scale so increasing the size by one will rougly double the 
+# memory usage. The cache size is given by this formula: 
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, 
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE      = 0
+
 #---------------------------------------------------------------------------
 # Build related configuration options
 #---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
 EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
 EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
 EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
 EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local 
+# methods, which are defined in the implementation section but not in 
+# the interface are included in the documentation. 
+# If set to NO (the default) only methods in the interface are included.
+
 EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be 
+# extracted and appear in the documentation as a namespace called 
+# 'anonymous_namespace{file}', where file will be replaced with the base 
+# name of the file that contains the anonymous namespace. By default 
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
 HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these classes will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
 HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
+# friend (class|struct|union) declarations. 
+# If set to NO (the default) these declarations will be included in the 
+# documentation.
+
 HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
+# documentation blocks found inside the body of a function. 
+# If set to NO (the default) these blocks will be appended to the 
+# function's detailed documentation block.
+
 HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
 INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower-case letters. If set to YES upper-case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# and Mac users are advised to set this option to NO.
+
 CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
 HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put a list of the files that are included by a file in the documentation 
+# of that file.
+
 SHOW_INCLUDE_FILES     = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
 INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
 SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
+# brief documentation of file, namespace and class members alphabetically 
+# by member name. If set to NO (the default) the members will appear in 
+# declaration order.
+
 SORT_BRIEF_DOCS        = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the 
+# hierarchy of group names into alphabetical order. If set to NO (the default) 
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
+# sorted by fully-qualified names, including namespaces. If set to 
+# NO (the default), the class list will be sorted only by class name, 
+# not including the namespace part. 
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the 
+# alphabetical list.
+
 SORT_BY_SCOPE_NAME     = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
 GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
 GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
 GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
+# disable (NO) the deprecated list. This list is created by putting 
+# \deprecated commands in the documentation.
+
 GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
 ENABLED_SECTIONS       = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consists of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
 MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
 SHOW_USED_FILES        = YES
+
+# If the sources in your project are distributed over multiple directories 
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy 
+# in the documentation. The default is NO.
+
 SHOW_DIRECTORIES       = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the 
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the 
+# Namespaces page.  This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that 
+# doxygen should invoke to get the current version for each file (typically from 
+# the version control system). Doxygen will invoke the program by executing (via 
+# popen()) the command <command> <input-file>, where <command> is the value of 
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file 
+# provided by doxygen. Whatever the program writes to standard output 
+# is used as the file version. See the manual for examples.
+
 FILE_VERSION_FILTER    = 
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by 
+# doxygen. The layout file controls the global structure of the generated output files 
+# in an output format independent way. The create the layout file that represents 
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a 
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name 
+# of the layout file.
+
+LAYOUT_FILE            = 
+
 #---------------------------------------------------------------------------
 # configuration options related to warning and progress messages
 #---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
 QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
 WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
 WARN_IF_UNDOCUMENTED   = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
+# potential errors in the documentation, such as not documenting some 
+# parameters in a documented function, or documenting parameters that 
+# don't exist or using markup commands wrongly.
+
 WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for 
+# functions that are documented, but have no documentation for their parameters 
+# or return value. If set to NO (the default) doxygen will only warn about 
+# wrong or incomplete parameter documentation, but not about the absence of 
+# documentation.
+
 WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text. Optionally the format may contain 
+# $version, which will be replaced by the version of the file (if it could 
+# be obtained via FILE_VERSION_FILTER)
+
 WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
 WARN_LOGFILE           = 
+
 #---------------------------------------------------------------------------
 # configuration options related to the input files
 #---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
 INPUT                  = 
+
+# This tag can be used to specify the character encoding of the source files 
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is 
+# also the default input encoding. Doxygen uses libiconv (or the iconv built 
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for 
+# the list of possible encodings.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx 
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
 FILE_PATTERNS          = 
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
 RECURSIVE              = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
 EXCLUDE                = 
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or 
+# directories that are symbolic links (a Unix filesystem feature) are excluded 
+# from the input.
+
 EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories. Note that the wildcards are matched 
+# against the file with absolute path, so to exclude all test directories 
+# for example use the pattern */test/*
+
 EXCLUDE_PATTERNS       = 
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names 
+# (namespaces, classes, functions, etc.) that should be excluded from the 
+# output. The symbol name can be a fully qualified name, a word, or if the 
+# wildcard * is used, a substring. Examples: ANamespace, AClass, 
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        = 
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or 
+# directories that contain example code fragments that are included (see 
+# the \include command).
+
 EXAMPLE_PATH           = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
 EXAMPLE_PATTERNS       = 
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
 EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
 IMAGE_PATH             = 
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output.  If FILTER_PATTERNS is specified, this tag will be 
+# ignored.
+
 INPUT_FILTER           = 
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern 
+# basis.  Doxygen will compare the file name with each pattern and apply the 
+# filter if there is a match.  The filters are a list of the form: 
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further 
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER 
+# is applied to all files.
+
 FILTER_PATTERNS        = 
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
 FILTER_SOURCE_FILES    = NO
+
 #---------------------------------------------------------------------------
 # configuration options related to source browsing
 #---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources. 
+# Note: To get rid of all source code in the generated output, make sure also 
+# VERBATIM_HEADERS is set to NO.
+
 SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
 INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
 STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
 REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
 REFERENCES_RELATION    = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.  Otherwise they will link to the documentstion.
+
 REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code 
+# will point to the HTML generated by the htags(1) tool instead of doxygen 
+# built-in source browser. The htags tool is part of GNU's global source 
+# tagging system (see http://www.gnu.org/software/global/global.html). You 
+# will need version 4.8.6 or higher.
+
 USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
 VERBATIM_HEADERS       = YES
+
 #---------------------------------------------------------------------------
 # configuration options related to the alphabetical class index
 #---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
 ALPHABETICAL_INDEX     = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
 COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
 IGNORE_PREFIX          = 
+
 #---------------------------------------------------------------------------
 # configuration options related to the HTML output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
 GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `html' will be used as the default path.
+
 HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
+# doxygen will generate files with .html extension.
+
 HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard header.
+
 HTML_HEADER            = 
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard footer.
+
 HTML_FOOTER            = 
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
+# style sheet that is used by each HTML page. It can be used to 
+# fine-tune the look of the HTML output. If the tag is left blank doxygen 
+# will generate a default style sheet. Note that doxygen will try to copy 
+# the style sheet file to the HTML output directory, so don't put your own 
+# stylesheet in the HTML output directory as well, or it will be erased!
+
 HTML_STYLESHEET        = 
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
+# files or namespaces will be aligned in HTML using tables. If set to 
+# NO a bullet list will be used.
+
 HTML_ALIGN_MEMBERS     = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML 
+# documentation will contain sections that can be hidden and shown after the 
+# page has loaded. For this to work a browser that supports 
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox 
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files 
+# will be generated that can be used as input for Apple's Xcode 3 
+# integrated development environment, introduced with OSX 10.5 (Leopard). 
+# To create a documentation set, doxygen will generate a Makefile in the 
+# HTML output directory. Running make will produce the docset in that 
+# directory and running "make install" will install the docset in 
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find 
+# it at startup. 
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET        = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the 
+# feed. A documentation feed provides an umbrella under which multiple 
+# documentation sets from a single provider (such as a company or product suite) 
+# can be grouped.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that 
+# should uniquely identify the documentation set bundle. This should be a 
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen 
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
+# will be generated that can be used as input for tools like the 
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) 
+# of the generated HTML documentation.
+
 GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
+# be used to specify the file name of the resulting .chm file. You 
+# can add a path in front of the file if the result should not be 
+# written to the html output directory.
+
 CHM_FILE               = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
+# be used to specify the location (absolute path including file name) of 
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
+# the HTML help compiler on the generated index.hhp.
+
 HHC_LOCATION           = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
+# controls if a separate .chi index file is generated (YES) or that 
+# it should be included in the master .chm file (NO).
+
 GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING     = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
+# controls whether a binary table of contents is generated (YES) or a 
+# normal table of contents (NO) in the .chm file.
+
 BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members 
+# to the contents of the HTML help documentation and to the tree view.
+
 TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER 
+# are set, an additional index file will be generated that can be used as input for 
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated 
+# HTML documentation.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can 
+# be used to specify the file name of the resulting .qch file. 
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE               = 
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating 
+# Qt Help Project output. For more information please see 
+# <a href="http://doc.trolltech.com/qthelpproject.html#namespace">Qt Help Project / Namespace</a>.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating 
+# Qt Help Project output. For more information please see 
+# <a href="http://doc.trolltech.com/qthelpproject.html#virtual-folders">Qt Help Project / Virtual Folders</a>.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can 
+# be used to specify the location of Qt's qhelpgenerator. 
+# If non-empty doxygen will try to run qhelpgenerator on the generated 
+# .qhp file .
+
+QHG_LOCATION           = 
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
+# top of each HTML page. The value NO (the default) enables the index and 
+# the value YES disables it.
+
 DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20]) 
+# that doxygen will group on one line in the generated HTML documentation.
+
 ENUM_VALUES_PER_LINE   = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to FRAME, a side panel will be generated
+# containing a tree-like index structure (just like the one that 
+# is generated for HTML Help). For this to work a browser that supports 
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
+# probably better off using the HTML help feature. Other possible values 
+# for this tag are: HIERARCHIES, which will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list;
+# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which
+# disables this behavior completely. For backwards compatibility with previous
+# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE
+# respectively.
+
 GENERATE_TREEVIEW      = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
+# used to set the initial width (in pixels) of the frame in which the tree 
+# is shown.
+
 TREEVIEW_WIDTH         = 250
+
+# Use this tag to change the font size of Latex formulas included 
+# as images in the HTML documentation. The default is 10. Note that 
+# when you change the font size after a successful doxygen run you need 
+# to manually remove any form_*.png images from the HTML output directory 
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE       = 10
+
 #---------------------------------------------------------------------------
 # configuration options related to the LaTeX output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
 GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
 LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
+# invoked. If left blank `latex' will be used as the default command name.
+
 LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
+# generate index for LaTeX. If left blank `makeindex' will be used as the 
+# default command name.
+
 MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
 COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
 PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
 EXTRA_PACKAGES         = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
 LATEX_HEADER           = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
 PDF_HYPERLINKS         = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
 USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
 LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
+# include the index chapters (such as File Index, Compound Index, etc.) 
+# in the output.
+
 LATEX_HIDE_INDICES     = NO
+
 #---------------------------------------------------------------------------
 # configuration options related to the RTF output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimized for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
 GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
 RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
 COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
 RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assignments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
 RTF_STYLESHEET_FILE    = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
 RTF_EXTENSIONS_FILE    = 
+
 #---------------------------------------------------------------------------
 # configuration options related to the man page output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
 GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
 MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
 MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
 MAN_LINKS              = NO
+
 #---------------------------------------------------------------------------
 # configuration options related to the XML output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation.
+
 GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `xml' will be used as the default path.
+
 XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
 XML_SCHEMA             = 
+
+# The XML_DTD tag can be used to specify an XML DTD, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
 XML_DTD                = 
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
+# dump the program listings (including syntax highlighting 
+# and cross-referencing information) to the XML output. Note that 
+# enabling this will significantly increase the size of the XML output.
+
 XML_PROGRAMLISTING     = YES
+
 #---------------------------------------------------------------------------
 # configuration options for the AutoGen Definitions output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
 GENERATE_AUTOGEN_DEF   = NO
+
 #---------------------------------------------------------------------------
 # configuration options related to the Perl module output
 #---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
+# generate a Perl module file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
 GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
+# to generate PDF and DVI output from the Perl module output.
+
 PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
+# nicely formatted so it can be parsed by a human reader.  This is useful 
+# if you want to understand what is going on.  On the other hand, if this 
+# tag is set to NO the size of the Perl module output will be much smaller 
+# and Perl will parse it just the same.
+
 PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file 
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
+# This is useful so different doxyrules.make files included by the same 
+# Makefile don't overwrite each other's variables.
+
 PERLMOD_MAKEVAR_PREFIX = 
+
 #---------------------------------------------------------------------------
 # Configuration options related to the preprocessor   
 #---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
 ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
 MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
 EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
 SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
 INCLUDE_PATH           = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
 INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed. To prevent a macro definition from being 
+# undefined via #undef or recursively expanded use the := operator 
+# instead of the = operator.
+
 PREDEFINED             = 
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
 EXPAND_AS_DEFINED      = 
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse 
+# the parser if not removed.
+
 SKIP_FUNCTION_MACROS   = YES
+
 #---------------------------------------------------------------------------
 # Configuration::additions related to external references   
 #---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. 
+# Optionally an initial location of the external documentation 
+# can be added for each tagfile. The format of a tag file without 
+# this location is as follows: 
+#   TAGFILES = file1 file2 ... 
+# Adding location for the tag files is done as follows: 
+#   TAGFILES = file1=loc1 "file2 = loc2" ... 
+# where "loc1" and "loc2" can be relative or absolute paths or 
+# URLs. If a location is present for each tag, the installdox tool 
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen 
+# is run, you must also specify the path to the tagfile here.
+
 TAGFILES               = 
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
 GENERATE_TAGFILE       = 
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
 ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
 EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script 
+# interpreter (i.e. the result of `which perl').
+
 PERL_PATH              = /usr/bin/perl
+
 #---------------------------------------------------------------------------
 # Configuration options related to the dot tool   
 #---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base 
+# or super classes. Setting the tag to NO turns the diagrams off. Note that 
+# this option is superseded by the HAVE_DOT option below. This is only a 
+# fallback. It is recommended to install and use dot, since it yields more 
+# powerful graphs.
+
 CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc 
+# command. Doxygen will then run the mscgen tool (see 
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the 
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where 
+# the mscgen tool resides. If left empty the tool is assumed to be found in the 
+# default search path.
+
+MSCGEN_PATH            = 
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
 HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
 HAVE_DOT               = NO
+
+# By default doxygen will write a font called FreeSans.ttf to the output 
+# directory and reference it in all dot files that doxygen generates. This 
+# font does not include all possible unicode characters however, so when you need 
+# these (or just want a differently looking font) you can specify the font name 
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font, 
+# which can be done by putting it in a standard location or by setting the 
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory 
+# containing the font.
+
+DOT_FONTNAME           = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. 
+# The default size is 10pt.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the output directory to look for the 
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a 
+# different font using DOT_FONTNAME you can set the path where dot 
+# can find it using this tag.
+
+DOT_FONTPATH           = 
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
 CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
 COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for groups, showing the direct groups dependencies
+
 GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
+# collaboration diagrams in a style similar to the OMG's Unified Modeling 
+# Language.
+
 UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
 TEMPLATE_RELATIONS     = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
 INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
 INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then 
+# doxygen will generate a call dependency graph for every global function 
+# or class method. Note that enabling this option will significantly increase 
+# the time of a run. So in most cases it will be better to enable call graphs 
+# for selected functions only using the \callgraph command.
+
 CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then 
+# doxygen will generate a caller dependency graph for every global function 
+# or class method. Note that enabling this option will significantly increase 
+# the time of a run. So in most cases it will be better to enable caller 
+# graphs for selected functions only using the \callergraph command.
+
 CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
 GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES 
+# then doxygen will show the dependencies a directory has on other directories 
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
 DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
 DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
 DOT_PATH               = 
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
 DOTFILE_DIRS           = 
-MAX_DOT_GRAPH_WIDTH    = 1024
-MAX_DOT_GRAPH_HEIGHT   = 1024
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of 
+# nodes that will be shown in the graph. If the number of nodes in a graph 
+# becomes larger than this value, doxygen will truncate the graph, which is 
+# visualized by representing a node as a red box. Note that doxygen if the 
+# number of direct children of the root node in a graph is already larger than 
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note 
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
+# graphs generated by dot. A depth value of 3 means that only nodes reachable 
+# from the root by following a path via at most 3 edges will be shown. Nodes 
+# that lay further from the root node will be omitted. Note that setting this 
+# option to 1 or 2 may greatly reduce the computation time needed for large 
+# code bases. Also note that the size of a graph can be further restricted by 
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
 MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent 
+# background. This is disabled by default, because dot on Windows does not 
+# seem to support this out of the box. Warning: Depending on the platform used, 
+# enabling this option may lead to badly anti-aliased labels on the edges of 
+# a graph (i.e. they become hard to read).
+
 DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output 
+# files in one run (i.e. multiple -o and -T options on the command line). This 
+# makes dot run faster, but since only newer versions of dot (>1.8.10) 
+# support this, this feature is disabled by default.
+
 DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
 GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermediate dot files that are used to generate 
+# the various graphs.
+
 DOT_CLEANUP            = YES
+
 #---------------------------------------------------------------------------
 # Configuration::additions related to the search engine   
 #---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be 
+# used. If set to NO the values of all tags below this one will be ignored.
+
 SEARCHENGINE           = NO

=== modified file 'sql/backup/api_types.h'
--- a/sql/backup/api_types.h	2008-07-19 03:03:39 +0000
+++ b/sql/backup/api_types.h	2008-12-18 21:46:36 +0000
@@ -18,10 +18,12 @@
  class anyway.
  */
 
+/// A null string external reference.
 extern const String my_null_string;
 
 namespace backup {
 
+/// Definition of byte type.
 typedef unsigned char byte;
 
 /**
@@ -30,8 +32,10 @@ typedef unsigned char byte;
   @see @c Backup_driver::get_data and @c Restore_driver::send_data
  */
 
+/// Enumeration for result values.
 enum result_t { OK=0, READY, PROCESSING, BUSY, DONE, ERROR };
 
+/// Definition of version_t which is used as reference to version of backup interfaces.
 typedef uint  version_t;
 
 //@{
@@ -66,89 +70,111 @@ class Db_ref
 {
   const String *m_name;
 
- public:
+public:
 
-  // Construct invalid reference
+  /// Construct invalid reference
   Db_ref() :m_name(NULL)
   {}
 
+  /// Determine if class is valid (must contain a name).
   bool is_valid() const
   { return m_name != NULL; }
 
+  /// Return the name of the database reference.
   const String& name() const
   { return *m_name; }
 
+  /// Return the catalog name.
   const String& catalog() const
   { return my_null_string; }
 
+  /// The equal comparison operator.
   bool operator==(const Db_ref &db) const
   { return stringcmp(m_name, &db.name()) == 0; }
 
+  /// The not equal comparison operator.
   bool operator!=(const Db_ref &db) const
   { return ! this->operator == (db); }
 
- protected:
+protected:
 
   // Constructors are made protected as clients of this class are
   // not supposed to create instances (see comment inside Table_ref)
 
+  /**
+    Constructor
+
+    @param[in]  name  Name of the database.
+  */
   Db_ref(const String &name) :m_name(&name)
   {}
 
-  friend class Table_ref;
+  friend class Table_ref; ///< Pointer to a table reference class.
 };
 
 
+/**
+  @class Table_ref
+
+  This class is an encapsulation to allow easier access to table information.
+*/
 class Table_ref
 {
-  const Db_ref  m_db;
-  const String  *m_name;
+  const Db_ref  m_db;     ///< The database for the table.
+  const String  *m_name;  ///< The name of the table.
 
- public:
+public:
 
   // Construct invalid reference
   Table_ref() :m_name(NULL)
   {}
 
+  /// Determine if object is valid (it has a name).
   bool is_valid() const
   { return m_name != NULL; }
 
+  /// Return database reference.
   const Db_ref& db() const
   { return m_db; }
 
+  /// Return the name.
   const String& name() const
   { return *m_name; }
 
+  /// Comparison for equal operator.
   bool operator==(const Table_ref &t) const
   {
     return m_db == t.db() &&
            stringcmp(m_name, &t.name()) == 0;
   }
 
+  /// Comparison for not equal operator.
   bool operator!=(const Table_ref &db) const
   { return ! this->operator==(db); }
 
+  /// Definition of a name buffer for the table name.
   typedef char name_buf[FN_REFLEN];
 
-  // Produce string identifying the table (e.g. for error reporting)
+  /// Produce string identifying the table (e.g. for error reporting)
   const char* describe(char *buf, size_t len) const;
+  /// Produce string identifying the table (e.g. for error reporting)
   const char* describe(name_buf &buf) const
   { return describe(buf, sizeof(buf)); }
 
-  // Produce string identifying the table in internal format. 
+  /// Produce string identifying the table in internal format. 
   const char* internal_name(char *buf, size_t len) const;
+  /// Produce string identifying the table in internal format. 
   const char* internal_name(name_buf &buf) const
   { return internal_name(buf, sizeof(buf)); };
   
- protected:
+protected:
 
-  /*
+  /**
     Constructor is made protected as it should not be used by
     clients of this class -- they obtain already constructed
     instances from the backup kernel via Table_list object passed
     when creating backup/restore driver.
   */
-
   Table_ref(const String &db, const String &name)
     :m_db(db), m_name(&name)
   {}
@@ -181,7 +207,7 @@ class Table_ref
 
 class Table_list
 {
-  public:
+public:
 
     virtual ~Table_list() {}
 
@@ -262,6 +288,11 @@ struct Buffer
   Buffer() :size(0), table_num(0), last(FALSE), data(NULL)
   {}
 
+  /**
+    Reset the buffer size.
+
+    @param[in]  len  The new length.
+  */
   void reset(size_t len)
   {
     size= len;
@@ -277,8 +308,11 @@ class Restore_driver;
 
 } // backup namespace
 
+/// Definition of backup result value types.
 typedef backup::result_t Backup_result_t;
+/// Definition of backup engine class type.
 typedef backup::Engine   Backup_engine;
+/// Definition of backup factory class type.
 typedef Backup_result_t backup_factory(::handlerton *, Backup_engine*&);
 
 #endif

=== modified file 'sql/backup/backup_aux.h'
--- a/sql/backup/backup_aux.h	2008-11-19 16:32:01 +0000
+++ b/sql/backup/backup_aux.h	2008-12-18 21:46:36 +0000
@@ -8,29 +8,38 @@
 
 */ 
 
+/// Definition for storage engine reference type.
 typedef st_plugin_int* storage_engine_ref;
 
 // Macro which transforms plugin_ref to storage_engine_ref
 #ifdef DBUG_OFF
+/// Macro to map plugin_ref to se_ref
 #define plugin_ref_to_se_ref(A) (A)
+/// Macro to map se_ref ro plugin_ref
 #define se_ref_to_plugin_ref(A) (A)
 #else
+/// Macro to map plugin_ref to se_ref
 #define plugin_ref_to_se_ref(A) ((A) ? *(A) : NULL)
+/// Macro to map se_ref ro plugin_ref
 #define se_ref_to_plugin_ref(A) &(A)
 #endif
 
+/// Return the storage engine name.
 inline
 const char* se_name(storage_engine_ref se)
 { return se->name.str; }
 
+/// Return the version of the plugin.
 inline
 uint se_version(storage_engine_ref se)
 { return se->plugin->version; }  // Q: Or, should it be A->plugin_dl->version?
 
+/// Return the pointer to the handlerton.
 inline
 handlerton* se_hton(storage_engine_ref se)
 { return (handlerton*)(se->data); }
 
+/// Return a storage engine reference by name.
 inline
 storage_engine_ref get_se_by_name(const LEX_STRING name)
 { 
@@ -48,30 +57,55 @@ namespace backup {
  */
 struct LEX_STRING: public ::LEX_STRING
 {
+  /// Base constructor for null string.
   LEX_STRING()
   {
     str= NULL;
     length= 0;
   }
 
+  /** 
+    Constructor for LEX_STRING class.
+
+    @param[in]  s  LEX_STRING string.
+  */
   LEX_STRING(const ::LEX_STRING &s)
   {
     str= s.str;
     length= s.length;
   }
 
+  /** 
+    Constructor for LEX_STRING class.
+
+    @param[in]  s  Character class.
+  */
   LEX_STRING(const char *s)
   {
     str= const_cast<char*>(s);
     length= strlen(s);
   }
 
+  /** 
+    Constructor for LEX_STRING class.
+
+    @param[in]  s  String class.
+  */
   LEX_STRING(const String &s)
   {
     str= const_cast<char*>(s.ptr());
     length= s.length();
   }
 
+  /**
+     Constructor for the string class.
+
+     This constructor takes a begging and ending pointer to construct the
+     string.
+
+     @param[in]  begin  Pointer to start of string.
+     @param[in]  end    Pointer to end of string.
+  */
   LEX_STRING(byte *begin, byte *end)
   {
     str= (char*)begin;
@@ -89,11 +123,21 @@ struct LEX_STRING: public ::LEX_STRING
  */
 class String: public ::String
 {
- public:
+public:
+
+  /** 
+    Constructor for string class.
 
+    @param[in]  s  String class.
+  */
   String(const ::String &s) : ::String(s)
   {}
 
+  /** 
+    Constructor for string class.
+
+    @param[in]  s  LEX_STRING string.
+  */
   String(const ::LEX_STRING &s)
     : ::String(s.str, (uint32)s.length, &::my_charset_bin)
   {
@@ -101,6 +145,15 @@ class String: public ::String
     DBUG_ASSERT(s.length <= ~((uint32)0));
   }
 
+  /**
+     Constructor 
+
+     This constructor takes a begging and ending pointer to construct the
+     string.
+
+     @param[in]  begin  Pointer to start of string.
+     @param[in]  end    Pointer to end of string.
+  */
   String(byte *begin, byte *end)
     : ::String((char*)begin, (uint32)(end - begin), &::my_charset_bin)
   {
@@ -120,14 +173,30 @@ class String: public ::String
      set((char*)NULL, 0, NULL); // Note: explicit cast is needed to disambiguate.
   }
 
+  /** 
+    Constructor for string class.
+
+    @param[in]  s  Character string.
+  */
   String(const char *s)
     : ::String(s, &::my_charset_bin)
   {}
 
+  /// Base constructor for null string class.
   String() : ::String()
   {}
 };
 
+/**
+  Set a table into a TABLE_LIST.
+
+  @param[in]  tl         The table list.
+  @param[in]  tbl        A table for the list.
+  @param[in]  lock_type  The lock type.
+  @param[in]  mem        Pointer to base memory.
+
+  @retval  TABLE_LIST
+*/
 inline
 int set_table_list(TABLE_LIST &tl, const Table_ref &tbl,
                    thr_lock_type lock_type, MEM_ROOT *mem)
@@ -146,6 +215,15 @@ int set_table_list(TABLE_LIST &tl, const
   return 0;
 }
 
+/**
+  Build a new TABLE_LIST.
+
+  @param[in]  tbl        A table for the list.
+  @param[in]  lock_type  The lock type.
+  @param[in]  mem        Pointer to base memory.
+
+  @retval  TABLE_LIST
+*/
 inline
 TABLE_LIST* mk_table_list(const Table_ref &tbl, thr_lock_type lock_type, 
                           MEM_ROOT *mem)
@@ -166,6 +244,14 @@ TABLE_LIST* mk_table_list(const Table_re
   return ptr;
 }
 
+/**
+  Link two table lists together.
+
+  @param[in] tl   The list to append onto.
+  @param[in] next The list to append.
+  
+  @returns New list pointer.
+*/
 inline
 TABLE_LIST* link_table_list(TABLE_LIST &tl, TABLE_LIST *next)
 {
@@ -174,14 +260,14 @@ TABLE_LIST* link_table_list(TABLE_LIST &
 }
 
 TABLE_LIST *build_table_list(const Table_list &tables, thr_lock_type lock);
-void free_table_list(TABLE_LIST*);
+void free_table_list(TABLE_LIST* tables);
 
 } // backup namespace
 
 /**
   Implements a dynamic map from A to B* (also known as hash array).
   
-  An instance @map of calss @c Map<A,B> can store mappings from values of 
+  An instance @c map of class @c Map<A,B> can store mappings from values of 
   type @c A to pointers of type @c B*. Such mappings are added with
   @code
    A a;
@@ -223,15 +309,16 @@ class Map
 {
   HASH m_hash;
   
- public:
+public:
 
+  /// Constructor
   Map(size_t);
   ~Map();
 
   int insert(const A&, B*);
   B* operator[](const A&) const;
   
- private:
+private:
  
   struct Node;
 };
@@ -250,11 +337,17 @@ struct Map<A,B>::Node
     Note: key member must be first for correct key offset value in HASH 
     initialization.
    */
-  A key;  
-  B *ptr;
+  A key;   ///< key or index in hash/map.
+  B *ptr;  ///< pointer to item in hash/map.
 
+  /// Constructor
   Node(const A &a, B *b) :key(a), ptr(b) {}
   
+  /**
+    Delete the node by key.
+
+    @param[in]  node  The node to delete.
+  */
   static void del_key(void *node)
   { delete (Node*) node; }
 };
@@ -308,15 +401,31 @@ class Map<uint,T>: public ::Dynamic_arra
 {
   typedef Dynamic_array< T* > Base;
   
- public:
+public:
+
+  /// Constructor
+  Map(uint init_size, uint increment);
+
+  /**
+    The index operator.
+
+    @param[in]  pos  Position to retrieve.
+
+    @returns The item and position @c pos.
+  */
+  T* operator[](ulong pos) const;
+  /**
+    Insert an item in the map.
 
-   Map(uint init_size, uint increment);
+    @param[in]  pos  Desired position.
+    @param[in]  ptr  Item to insert. 
 
-   T* operator[](ulong pos) const;
-   int insert(ulong pos, T *ptr);
-   ulong count() const;
+    @retval  Status of insert.
+  */
+  int insert(ulong pos, T *ptr); 
+  ulong count() const;
 
- private:
+private:
 
    void clear_free_space();
 };

=== modified file 'sql/backup/backup_engine.h'
--- a/sql/backup/backup_engine.h	2008-07-19 03:03:39 +0000
+++ b/sql/backup/backup_engine.h	2008-12-18 21:46:36 +0000
@@ -45,7 +45,7 @@ class Restore_driver;
 
 class Engine
 {
- public:
+public:
 
   virtual ~Engine() {}
 
@@ -76,7 +76,7 @@ class Engine
   /**
    Create a restore driver.
 
-   Given a list of tables to be restored, create instance of restore
+   @brief Given a list of tables to be restored, create instance of restore
    driver which will restore these tables from a backup image.
 
    The @c flags parameter gives additional information about
@@ -84,10 +84,10 @@ class Engine
    @c Driver::FULL flag if the driver is supposed to replace all the
    tables stored in a given storage engine with the restored ones.
 
-   @param  version  (in) version of the backup image.
-   @param  flags    (in) additional info about restore operation.
-   @param  tables   (in) list of tables to be restored.
-   @param  drv      (out) pointer to restore driver instance.
+   @param[in]  version  version of the backup image.
+   @param[in]  flags    additional info about restore operation.
+   @param[in]  tables   list of tables to be restored.
+   @param[out] drv      pointer to restore driver instance.
 
    @return  Error code or @c OK on success.
   */
@@ -131,7 +131,7 @@ class Engine
 
 class Driver
 {
- public:
+public:
 
   /// Types of backup/restore operations.
   enum enum_flags { FULL    =0x1,  ///< concerns all tables from given storage engine
@@ -191,7 +191,7 @@ class Driver
   /// Unknown size constant used for backup image size estimates.
   static const size_t UNKNOWN_SIZE;
 
- protected:
+protected:
 
   /// Refers to the list of tables passed when the driver was created.
   const Table_list &m_tables;
@@ -257,8 +257,9 @@ class Driver
 
 class Backup_driver: public Driver
 {
- public:
+public:
 
+  /// Constructor
   Backup_driver(const Table_list &tables) :Driver(tables) {};
 
   virtual ~Backup_driver() {}; // We will derive from this class.
@@ -463,8 +464,9 @@ class Backup_driver: public Driver
 
 class Restore_driver: public Driver
 {
- public:
+public:
 
+  /// Constructor
   Restore_driver(const Table_list &tables) :Driver(tables) {};
   virtual ~Restore_driver() {};
 

=== modified file 'sql/backup/backup_info.cc'
--- a/sql/backup/backup_info.cc	2008-12-10 15:53:06 +0000
+++ b/sql/backup/backup_info.cc	2008-12-18 21:46:36 +0000
@@ -231,19 +231,28 @@ Backup_info::find_backup_engine(const ba
 
  *************************************************/
 
-/*
+/**
   Definition of Backup_info::Ts_hash_node structure used by Backup_info::ts_hash
   HASH.
- */ 
-
+*/ 
 struct Backup_info::Ts_hash_node
 {
   const String *name;	///< Name of the tablespace.
   Ts *it;               ///< Catalogue entry holding the tablespace (if exists).
 
+  /// Constructor
   Ts_hash_node(const String*);
 
-  static uchar* get_key(const uchar *record, size_t *key_length, my_bool);
+  /**
+    Return the key of the node.
+
+    @param[in]  record      The data in the record.
+    @param[in]  key_length  The length of the key.
+    @param[in]  attr        Not_used __attribute__((unused)))
+    @returns Pointer to the key.
+  */
+  static uchar* get_key(const uchar *record, size_t *key_length, my_bool attr);
+  /// Free data.
   static void free(void *record);
 };
 
@@ -281,14 +290,35 @@ uchar* Backup_info::Ts_hash_node::get_ke
  */ 
 struct Backup_info::Dep_node: public Sql_alloc
 {
-  Dep_node *next;
-  Dbobj *obj;
-  String key;
+  Dep_node *next; ///< Pointer to next node.
+  Dbobj *obj;     ///< Pointer to database object.
+  String key;     ///< The key name.
+
+  /**
+    Constructor using data items.
 
+    @param[in]  db_name   Name of the database.
+    @param[in]  name      Name of object.
+    @param[in]  type      Type of object.
+  */
   Dep_node(const ::String &db_name, const ::String &name, const obj_type type);
+  /// Base constructor using existing node.
   Dep_node(const Dep_node&);
 
-  static uchar* get_key(const uchar *record, size_t *key_length, my_bool);
+  /**
+    Return the key of the node.
+
+    @param[in]  record      The data in the record.
+    @param[in]  key_length  The length of the key.
+    @param[in]  attr        Not_used __attribute__((unused)))
+    @returns Pointer to the key.
+  */
+  static uchar* get_key(const uchar *record, size_t *key_length, my_bool attr);
+  /**
+    Free the node.
+
+    @param[in]  record  The data to free.
+  */
   static void free(void *record);
 };
 
@@ -573,7 +603,8 @@ backup::Image_info::Db* Backup_info::add
 /**
   Select given databases for backup.
 
-  @param[in]  list of databases to be backed-up
+  @param[in]  thd  Current thread.
+  @param[in]  dbs  List of databases to be backed-up
 
   For each database, all objects stored in that database are also added to
   the image.
@@ -888,7 +919,7 @@ namespace {
  */ 
 class Tbl: public backup::Table_ref
 {
- public:
+public:
 
    Tbl(obs::Obj *obj) :backup::Table_ref(*obj->get_db_name(), *obj->get_name())
    {}
@@ -1348,13 +1379,14 @@ class Backup_info::Global_iterator
   Iterator *m_it; ///< Points at the currently used iterator.
   Obj *m_obj;         ///< Points at next object to be returned by this iterator.
 
- public:
+public:
 
+  /// Constructor
   Global_iterator(const Backup_info&);
 
   int init();
 
- private:
+private:
 
   Obj* get_ptr() const;
   bool next();
@@ -1450,11 +1482,12 @@ class Backup_info::Perdb_iterator : publ
 {
   Dep_node *ptr;
 
- public:
+public:
 
+  /// Constructor
   Perdb_iterator(const Backup_info&);
 
- private:
+private:
 
   Obj* get_ptr() const;
   bool next();

=== modified file 'sql/backup/backup_info.h'
--- a/sql/backup/backup_info.h	2008-12-10 15:53:06 +0000
+++ b/sql/backup/backup_info.h	2008-12-18 21:46:36 +0000
@@ -29,9 +29,9 @@ int write_table_data(THD*, backup::Logge
 */
 class Backup_info: public backup::Image_info
 {
- public:
+public:
 
-  backup::Logger &m_log;
+  backup::Logger &m_log;  ///< Pointer to logger class.
 
   ~Backup_info();
 
@@ -45,7 +45,7 @@ class Backup_info: public backup::Image_
   Iterator* get_global() const;
   Iterator* get_perdb()  const;
 
- private:
+private:
 
   /*
     Note: constructor is private because instances of this class are supposed

=== modified file 'sql/backup/backup_kernel.h'
--- a/sql/backup/backup_kernel.h	2008-11-25 17:44:19 +0000
+++ b/sql/backup/backup_kernel.h	2009-01-13 12:57:55 +0000
@@ -58,9 +58,11 @@ int restore_table_data(THD*, Restore_inf
  */ 
 class Backup_restore_ctx: public backup::Logger 
 {
- public:
+public:
 
+  /// Constructor
   Backup_restore_ctx(THD*);
+  /// Destructor
   ~Backup_restore_ctx();
 
   bool is_valid() const;
@@ -78,9 +80,10 @@ class Backup_restore_ctx: public backup:
 
   int close();
 
+  /// Return the thread instance used.
   THD* thd() const { return m_thd; }
 
- private:
+private:
 
   // Prevent copying/assignments
   Backup_restore_ctx(const Backup_restore_ctx&);
@@ -99,9 +102,31 @@ class Backup_restore_ctx: public backup:
 
   /** 
     @brief State of a context object. 
-    
-    Backup/restore can be performed only if object is prepared for that 
-    operation.
+
+    The following diagram illustrates the states in which a context object
+    can be and how the state changes as a result of calling public methods.
+    Methods which are not listed are forbidden in a given state.
+    @verbatim
+    CREATED
+        prepare_for_backup()   -> PREPARED_FOR_BACKUP
+        prepare_for_restore()  -> PREPARED_FOR_RESTORE
+        close()                -> CLOSED
+
+    PREPARED_FOR_BACKUP
+        do_backup()            -> CLOSED
+        close()                -> CLOSED
+
+    PREPARED_FOR_RESTORE
+        do_restore()           -> CLOSED
+        close()                -> CLOSED
+
+    CLOSED
+        close()                -> CLOSED
+    @endverbatim
+
+    @note An instance of the context class can be used only once -- when it 
+    moves to CLOSED state no methods can be called except for close() which does
+    nothing in that case.
    */
   enum { CREATED,
          PREPARED_FOR_BACKUP,
@@ -129,9 +154,6 @@ class Backup_restore_ctx: public backup:
   
   ::String  m_path;   ///< Path to where the backup image file is located.
 
-  /** If true, the backup image file is deleted at clean-up time. */
-  bool m_remove_loc;
-
   backup::Stream *m_stream; ///< Pointer to the backup stream object, if opened.
   backup::Image_info *m_catalog;  ///< Pointer to the image catalogue object.
 
@@ -147,7 +169,13 @@ class Backup_restore_ctx: public backup:
   /** 
     Indicates if tables have been locked with @c lock_tables_for_restore()
   */
-  bool m_tables_locked; 
+  bool m_tables_locked;
+
+  /**
+    Table list created by lock_tables_for_restore() and used by
+    unlock_tables(). Members are allocated from m_thd->mem_root.
+  */
+  TABLE_LIST *m_backup_tables;
 
   /**
     Indicates we must turn binlog back on in the close method. This is
@@ -160,6 +188,9 @@ class Backup_restore_ctx: public backup:
   
   int report_stream_open_failure(int open_error, const LEX_STRING *location);
 
+  /// Indicates if the operation has been successfully completed.  
+  bool m_completed;  
+
   friend int backup_init();
   friend void backup_shutdown();
   friend bstream_byte* bstream_alloc(unsigned long int);
@@ -207,12 +238,11 @@ void Backup_restore_ctx::disable_fkey_co
 inline
 int Backup_restore_ctx::fatal_error(int error_code)
 {
-  m_remove_loc= TRUE;
-
   if (m_error)
     return m_error;
 
   m_error= error_code;
+  report_state(BUP_ERRORS);
 
   return error_code;
 }

=== modified file 'sql/backup/backup_test.cc'
--- a/sql/backup/backup_test.cc	2008-12-05 23:47:51 +0000
+++ b/sql/backup/backup_test.cc	2008-12-18 21:46:36 +0000
@@ -16,7 +16,7 @@ using namespace obs;
    Call backup kernel API to execute backup related SQL statement.
 
    @param[in] thd  current thread
-   @param[in] lex  results of parsing the statement.
+   @param[in] db_list  List of databases.
   */
 int execute_backup_test_command(THD *thd, List<LEX_STRING> *db_list)
 {

=== modified file 'sql/backup/be_default.cc'
--- a/sql/backup/be_default.cc	2008-12-12 11:36:54 +0000
+++ b/sql/backup/be_default.cc	2009-01-21 15:00:23 +0000
@@ -15,53 +15,52 @@
 */
 
 /**
- * @file 
- *
- * @brief Contains the default backup algorithm driver.
- *
- * This file contains the default backup algorithm (also called a "driver"
- * in the online backup terminology. The default backup algorithm may be
- * used in place of an engine-specific driver if one does not exist or if
- * chosen by the user.
- *
- * The default backup algorithm is a blocking algorithm that locks all of
- * the tables given at the start of the backup/restore process. Once all of
- * the data is backed up or restored, the locks are removed. The default
- * backup is a row-level backup and therefore does not backup the indexes
- * or any of the engine-specific files.
- *
- * The classes in this file use the namespace @c default_backup to distinguish
- * these classes from other backup drivers. The backup functionality is
- * contained in the backup class shown below. Similarly, the restore
- * functionality is contained in the restore class below.
- *
- * The format of the backup is written as a series of data blocks where each
- * block contains a flag indicating what kind of data is in the block. The 
- * flags are:
- *
- * <code>
- *   RCD_ONCE   - Single data block for record data
- *   RCD_FIRST  - First data block in buffer for record buffer
- *   RCD_DATA   - Intermediate data block for record buffer
- *   RCD_LAST   - Last data block in buffer for record buffer
- *   BLOB_ONCE  - Single data block for blob data
- *   BLOB_FIRST - First data block in buffer for blob buffer
- *   BLOB_DATA  - Intermediate data block for blob buffer
- *   BLOB_LAST  - Last data block in buffer for blob buffer
- * </code>
- *
- * The flag is the first byte in the block. The remaining space in the block
- * is the data -- either record data or blob fields.
- *
- * The block flagged as BLOB_FIRST also contains a 4-byte field which 
- * contains the total size of the blob field. This is necessary for restore
- * because the size of the blob field is unknown and the size is needed to 
- * allocate memory for the buffer_iterator used to buffer large data from
- * the kernel.
- *
- * TODO 
- *  - Consider making the enums for BACKUP_MODE and RESTORE_MODE bit fields.
- *  - Change code to ignore blobs with no data (NULL).
+   @file 
+ 
+   @brief Contains the default backup algorithm driver.
+ 
+   This file contains the default backup algorithm (also called a "driver"
+   in the online backup terminology. The default backup algorithm may be
+   used in place of an engine-specific driver if one does not exist or if
+   chosen by the user.
+ 
+   The default backup algorithm is a blocking algorithm that locks all of
+   the tables given at the start of the backup/restore process. Once all of
+   the data is backed up or restored, the locks are removed. The default
+   backup is a row-level backup and therefore does not backup the indexes
+   or any of the engine-specific files.
+ 
+   The classes in this file use the namespace @c default_backup to distinguish
+   these classes from other backup drivers. The backup functionality is
+   contained in the backup class shown below. Similarly, the restore
+   functionality is contained in the restore class below.
+ 
+   The format of the backup is written as a series of data blocks where each
+   block contains a flag indicating what kind of data is in the block. The 
+   flags are:
+ 
+   <code>
+     RCD_ONCE   - Single data block for record data
+     RCD_FIRST  - First data block in buffer for record buffer
+     RCD_DATA   - Intermediate data block for record buffer
+     RCD_LAST   - Last data block in buffer for record buffer
+     BLOB_ONCE  - Single data block for blob data
+     BLOB_FIRST - First data block in buffer for blob buffer
+     BLOB_DATA  - Intermediate data block for blob buffer
+     BLOB_LAST  - Last data block in buffer for blob buffer
+   </code>
+ 
+   The flag is the first byte in the block. The remaining space in the block
+   is the data -- either record data or blob fields.
+ 
+   The block flagged as BLOB_FIRST also contains a 4-byte field which 
+   contains the total size of the blob field. This is necessary for restore
+   because the size of the blob field is unknown and the size is needed to 
+   allocate memory for the buffer_iterator used to buffer large data from
+   the kernel.
+ 
+   @todo Consider making the enums for BACKUP_MODE and RESTORE_MODE bit fields.
+   @todo Change code to ignore blobs with no data (NULL).
  */
 #include "../mysql_priv.h"
 #include "backup_engine.h"
@@ -155,12 +154,12 @@ Backup::~Backup()
 
 
 /**
-  * @brief Prelock call to setup locking.
-  *
-  * Launches a separate thread ("locking thread") which will lock
-  * tables. Locking in a separate thread is needed to have a non-blocking
-  * prelock() (given that thr_lock() is blocking).
-  */
+  @brief Prelock call to setup locking.
+  
+  Launches a separate thread ("locking thread") which will lock
+  tables. Locking in a separate thread is needed to have a non-blocking
+  prelock() (given that thr_lock() is blocking).
+*/
 result_t Backup::prelock()
 {
   DBUG_ENTER("Default_backup::prelock()");
@@ -168,14 +167,14 @@ result_t Backup::prelock()
 }
 
 /**
-  * @brief Start table read.
-  *
-  * This method saves the handler for the table and initializes the
-  * handler for reading.
-  *
-  * @retval OK     handler initialized properly.
-  * @retval ERROR  problem with hander initialization.
-  */
+  @brief Start table read.
+ 
+  This method saves the handler for the table and initializes the
+  handler for reading.
+ 
+  @retval OK     handler initialized properly.
+  @retval ERROR  problem with hander initialization.
+*/
 result_t Backup::start_tbl_read(TABLE *tbl)
 {
   int last_read_res;  
@@ -193,13 +192,12 @@ result_t Backup::start_tbl_read(TABLE *t
 }
 
 /**
-  * @brief End table read.
-  *
-  * This method signals the handler that the reading process is complete.
-  *
-  * @retval OK     handler read stopped properly.
-  * @retval ERROR  problem with hander.
-  */
+  @brief End table read.
+  This method signals the handler that the reading process is complete.
+  
+  @retval OK     handler read stopped properly.
+  @retval ERROR  problem with hander.
+*/
 result_t Backup::end_tbl_read()
 {
   int last_read_res;
@@ -217,14 +215,14 @@ result_t Backup::end_tbl_read()
 }
 
 /**
-  * @brief Get next table in the list.
-  *
-  * This method iterates through the list of tables selecting the
-  * next table in the list and starting the read process.
-  *
-  * @retval 0   no errors.
-  * @retval -1  no more tables in list.
-  */
+  @brief Get next table in the list.
+ 
+  This method iterates through the list of tables selecting the
+  next table in the list and starting the read process.
+ 
+  @retval 0   no errors.
+  @retval -1  no more tables in list.
+*/
 int Backup::next_table()
 {
   DBUG_ENTER("Backup::next_table()");
@@ -251,16 +249,17 @@ int Backup::next_table()
 }
 
 /* Potential buffer on the stack for the bitmap */
+/// Define bitmap stack size.
 #define BITMAP_STACKBUF_SIZE (128/8)
 
 /**
-  * @brief Pack the data for a row in the table.
-  *
-  * This method uses the binary log methods to pack a row from the
-  * internal row format to the binary log format.
-  *
-  * @returns  Size of packed row.
-  */
+  @brief Pack the data for a row in the table.
+   
+  This method uses the binary log methods to pack a row from the
+  internal row format to the binary log format.
+ 
+  @returns  Size of packed row.
+*/
 uint Backup::pack(byte *rcd, byte *packed_row)
 {
   uint size= 0;
@@ -284,30 +283,30 @@ uint Backup::pack(byte *rcd, byte *packe
 }
 
 /**
-  * @brief Get the data for a row in the table.
-  * This method is the main method used in the backup operation. It is
-  * responsible for reading a row from the table and placing the data in
-  * the buffer (buf.data) and setting the correct attributes for processing
-  * (e.g., buf.size = size of record data).
-  *
-  * Control of the method is accomplished by using several modes that
-  * signal portions of the method to run. These modes are:
-  *
-  * <code>
-  * INITIALIZE          Indicates time to initialize read
-  * GET_NEXT_TABLE      Open next table in the list
-  * READ_RCD            Reading rows from table mode
-  * READ_RCD_BUFFER     Buffer records mode
-  * CHECK_BLOBS         See if record has blobs
-  * READ_BLOB           Reading blobs from record mode
-  * READ_BLOB_BUFFER    Buffer blobs mode
-  * </code>
-  *
-  * @retval READY   initialization phase complete.
-  * @retval OK      data read.
-  * @retval ERROR   problem with reading data.
-  * @retval DONE    driver finished reading from all tables.
-  */
+  @brief Get the data for a row in the table.
+  This method is the main method used in the backup operation. It is
+  responsible for reading a row from the table and placing the data in
+  the buffer (buf.data) and setting the correct attributes for processing
+  (e.g., buf.size = size of record data).
+  
+  Control of the method is accomplished by using several modes that
+  signal portions of the method to run. These modes are:
+  
+  <code>
+  INITIALIZE          Indicates time to initialize read
+  GET_NEXT_TABLE      Open next table in the list
+  READ_RCD            Reading rows from table mode
+  READ_RCD_BUFFER     Buffer records mode
+  CHECK_BLOBS         See if record has blobs
+  READ_BLOB           Reading blobs from record mode
+  READ_BLOB_BUFFER    Buffer blobs mode
+  </code>
+  
+  @retval READY   initialization phase complete.
+  @retval OK      data read.
+  @retval ERROR   problem with reading data.
+  @retval DONE    driver finished reading from all tables.
+*/
 result_t Backup::get_data(Buffer &buf)
 {
   int last_read_res;  
@@ -620,23 +619,12 @@ result_t Restore::cleanup()
 }
 
 /**
-  * @brief Truncate table.
-  *
-  * This method saves the handler for the table and deletes all rows in
-  * the table.
-  *
-  * @retval OK     rows deleted.
-  * @retval ERROR  problem with deleting rows.
-  */
-
-
-/**
-  * @brief End restore process.
-  *
-  * This method unlocks and closes all of the tables.
-  *
-  * @retval OK    all tables unlocked.
-  */
+  @brief End restore process.
+  
+  This method unlocks and closes all of the tables.
+  
+  @retval OK    all tables unlocked.
+*/
 result_t Restore::end()
 {
   DBUG_ENTER("Restore::end");
@@ -644,25 +632,14 @@ result_t Restore::end()
 }
 
 /**
-  * @brief Get next table in the list.
-  *
-  * This method iterates through the list of tables selecting the
-  *  next table in the list and starting the write process.
-  *
-  * @retval 0   no errors.
-  * @retval -1  no more tables in list.
-  */
-
-
-/**
-  * @brief Unpack the data for a row in the table.
-  *
-  * This method uses the binary log methods to unpack a row from the
-  * binary log format to the internal row format.
-  *
-  * @retval 0   no errors.
-  * @retval !0  errors during unpack_row().
-  */
+  @brief Unpack the data for a row in the table.
+  
+  This method uses the binary log methods to unpack a row from the
+  binary log format to the internal row format.
+  
+  @retval 0   no errors.
+  @retval !0  errors during unpack_row().
+*/
 uint Restore::unpack(byte *packed_row)
 {
   int error= 0;
@@ -689,29 +666,29 @@ uint Restore::unpack(byte *packed_row)
 }
 
 /**
-  * @brief Restore the data for a row in the table.
-  *
-  * This method is the main method used in the restore operation. It is
-  * responsible for writing a row to the table.
-  *
-  * Control of the method is accomplished by using several modes that
-  * signal portions of the method to run. These modes are:
-  *
-  * <code>
-  * INITIALIZE          Indicates time to initialize read
-  * GET_NEXT_TABLE      Open next table in the list
-  * WRITE_RCD           Writing rows from table mode
-  * CHECK_BLOBS         See if record has blobs
-  * WRITE_BLOB          Writing blobs from record mode
-  * WRITE_BLOB_BUFFER   Buffer blobs mode
-  * </code>
-  *
-  * @retval READY       initialization phase complete.
-  * @retval OK          data written.
-  * @retval ERROR       problem with writing data.
-  * @retval PROCESSING  switching modes -- do not advance stream.
-  * @retval DONE        driver finished writing to all tables.
-  */
+  @brief Restore the data for a row in the table.
+  
+  This method is the main method used in the restore operation. It is
+  responsible for writing a row to the table.
+  
+  Control of the method is accomplished by using several modes that
+  Signal portions of the method to run. These modes are:
+  
+  <code>
+  INITIALIZE          Indicates time to initialize read
+  GET_NEXT_TABLE      Open next table in the list
+  WRITE_RCD           Writing rows from table mode
+  CHECK_BLOBS         See if record has blobs
+  WRITE_BLOB          Writing blobs from record mode
+  WRITE_BLOB_BUFFER   Buffer blobs mode
+  </code>
+  
+  @retval READY       initialization phase complete.
+  @retval OK          data written.
+  @retval ERROR       problem with writing data.
+  @retval PROCESSING  switching modes -- do not advance stream.
+  @retval DONE        driver finished writing to all tables.
+*/
 result_t Restore::send_data(Buffer &buf)
 {
   byte *ptr= 0;

=== modified file 'sql/backup/be_default.h'
--- a/sql/backup/be_default.h	2008-07-19 03:03:39 +0000
+++ b/sql/backup/be_default.h	2008-12-18 21:46:36 +0000
@@ -34,40 +34,51 @@ const byte BLOB_LAST=  (3U<<3); // Last
 
 
 /**
- * @class Backup
- *
- * @brief Contains the default backup algorithm backup functionality.
- *
- * The backup class is a row-level backup mechanism designed to perform
- * a table scan on each table reading the rows and saving the data to the
- * buffer from the backup algorithm.
- *
- * @see <backup driver> and <backup thread driver>
- */
+  @class Backup
+ 
+  @brief Contains the default backup algorithm backup functionality.
+ 
+  The backup class is a row-level backup mechanism designed to perform
+  a table scan on each table reading the rows and saving the data to the
+  buffer from the backup algorithm.
+ 
+  @see Backup_driver and Backup_thread_driver
+*/
 class Backup: public Backup_thread_driver
 {
-  public:
+public:
+    /// Enumeration values for status of data.
     enum has_data_info { YES, WAIT, EOD };
+    /// Constructor
     Backup(const Table_list &tables, THD *t_thd, thr_lock_type lock_type);
     virtual ~Backup(); 
+    /// Return current size of data
     size_t size()  { return UNKNOWN_SIZE; };
+    /// Return initial size of data
     size_t init_size() { return 0; };
+    /// Initialize backup process
     result_t  begin(const size_t) { return backup::OK; };
+    /// End backup process
     result_t end() { return backup::OK; };
     result_t get_data(Buffer &buf);
+    /// Lock signal
     result_t lock() { return backup::OK; };
+    /// Unlock signal
     result_t unlock() { return backup::OK; };
+    /// Cancel the process
     result_t cancel() 
     { 
       mode= CANCEL;
       cleanup();
       return backup::OK;
     }
+    /// Return table list containing all tables
     TABLE_LIST *get_table_list() { return all_tables; }
+    /// Free the class resources
     void free() { delete this; };
     result_t prelock(); 
 
- protected:
+protected:
     TABLE *cur_table;              ///< The table currently being read.
     my_bool init_phase_complete;   ///< Used to identify end of init phase.
     my_bool locks_acquired;        ///< Used to help kernel synchronize drivers.
@@ -75,7 +86,7 @@ class Backup: public Backup_thread_drive
     my_bool m_cleanup;             ///< Is call to cleanup() needed?
     result_t end_tbl_read(); 
 
-  private:
+private:
     /*
       We use an enum to control the flow of the algorithm. Each mode 
       invokes a different behavior through a large switch. The mode is
@@ -109,20 +120,22 @@ class Backup: public Backup_thread_drive
 };
 
 /**
- * @class Restore
- *
- * @brief Contains the default backup algorithm restore functionality.
- *
- * The restore class is a row-level backup mechanism designed to restore
- * data for each table by writing the data for the rows from the
- * buffer given by the backup algorithm.
- *
- * @see <restore driver>
- */
+  @class Restore
+ 
+  @brief Contains the default backup algorithm restore functionality.
+ 
+  The restore class is a row-level backup mechanism designed to restore
+  data for each table by writing the data for the rows from the
+  buffer given by the backup algorithm.
+ 
+  @see Restore_driver
+*/
 class Restore: public Restore_driver
 {
-  public:
+public:
+    /// Enumeration values for status of data.
     enum has_data_info { YES, WAIT, EOD };
+    /// Constructor
     Restore(const backup::Logical_snapshot &info, THD *t_thd);
     virtual ~Restore()
     { 
@@ -139,7 +152,7 @@ class Restore: public Restore_driver
     }
     void free() { delete this; };
 
- private:
+private:
      /*
       We use an enum to control the flow of the algorithm. Each mode 
       invokes a different behavior through a large switch. The mode is
@@ -190,18 +203,25 @@ namespace backup {
 
 class Logger;
 
+/**
+  Extends Logical_info to implement the default backup driver.
+*/
 class Default_snapshot: public Logical_snapshot
 {
- public:
+public:
 
+  /// Constructor
   Default_snapshot(Logger&) :Logical_snapshot(1) // current version number is 1
   {}
+  /// Constructor
   Default_snapshot(Logger&, const version_t ver) :Logical_snapshot(ver)
   {}
 
+  /// Return snapshot type.
   enum_snap_type type() const
   { return DEFAULT_SNAPSHOT; }
 
+  /// Return the name of the snapshot.
   const char* name() const
   { return "Default"; }
 

=== modified file 'sql/backup/be_logical.h'
--- a/sql/backup/be_logical.h	2008-07-07 12:51:56 +0000
+++ b/sql/backup/be_logical.h	2008-12-18 21:46:36 +0000
@@ -22,17 +22,25 @@ class Logical_snapshot :public Snapshot_
 {
 public:
 
+  /// Constructor
   Logical_snapshot(version_t ver) :Snapshot_info(ver) {}
+
+  /**
+    Get opened TABLE structure for the table at position @c pos.
+
+    This method should be called only after tables have been opened and locked
+    by the backup kernel.
+
+    @param[in] pos The position of the table in the list.
+
+    @returns Pointer to table.
+  */ 
   TABLE*      get_opened_table(ulong pos) const;
+
+  /// Return the current table list.
   const Table_list& get_table_list() const;
 };
 
-/**
-  Get opened TABLE structure for the table at position @c pos.
-
-  This method should be called only after tables have been opened and locked
-  by the backup kernel.
-*/ 
 inline
 TABLE *Logical_snapshot::get_opened_table(ulong pos) const
 {

=== modified file 'sql/backup/be_native.h'
--- a/sql/backup/be_native.h	2008-04-16 18:23:05 +0000
+++ b/sql/backup/be_native.h	2008-12-18 21:46:36 +0000
@@ -17,8 +17,9 @@ class Native_snapshot: public Snapshot_i
   const char *m_name;  ///< Saved name of storage engine.
   uint       m_se_ver; ///< Storage engine version number.
 
- public:
+public:
 
+  /// Constructor
   Native_snapshot(Logger &log, const storage_engine_ref se) 
     :Snapshot_info(0), m_hton(NULL), m_be(NULL)
   {
@@ -26,7 +27,8 @@ class Native_snapshot: public Snapshot_i
     if (m_be)
       m_version= m_be->version();
   }
-  
+
+  /// Constructor
   Native_snapshot(Logger &log, const version_t ver, const storage_engine_ref se) 
     :Snapshot_info(ver), m_hton(NULL), m_be(NULL)
   {
@@ -45,9 +47,11 @@ class Native_snapshot: public Snapshot_i
   enum_snap_type type() const
   { return NATIVE_SNAPSHOT; }
 
+  /// Return the version number of the storage engine.
   uint se_ver() const
   { return m_se_ver; }
 
+  /// Return the name of the storage engine.
   const char* se_name() const
   { return m_name; }
 
@@ -72,7 +76,7 @@ class Native_snapshot: public Snapshot_i
     return m_be->get_restore(m_version, Driver::PARTIAL, m_tables, drv);
   }
 
- private:
+private:
 
   int init(Logger &log, const storage_engine_ref se);
 };

=== modified file 'sql/backup/be_nodata.cc'
--- a/sql/backup/be_nodata.cc	2008-05-14 00:24:06 +0000
+++ b/sql/backup/be_nodata.cc	2008-12-18 21:46:36 +0000
@@ -45,19 +45,14 @@ using backup::Buffer;
 
 using namespace backup;
 
-Engine::Engine(THD *t_thd)
-{
-  m_thd= t_thd;
-}
-
 /**
   Create a nodata backup backup driver.
   
   Creates a stubbed driver class for the backup kernel code. This
   allows the driver to be used in a backup while not reading data.
   
-  @param[IN]  tables list of tables to be backed-up.
-  @param[OUT] eng    pointer to backup driver instance.
+  @param[in]  tables list of tables to be backed-up.
+  @param[out] drv    pointer to backup driver instance.
   
   @retval  ERROR  if cannot create backup driver class.
   @retval  OK     on success.
@@ -78,6 +73,10 @@ result_t Engine::get_backup(const uint32
 
   This method is the main method used in the backup operation. It
   is stubbed and does not read any data.
+
+  @param[in]  buf  buffer to be filled with backup data
+
+  @returns DONE
 */
 result_t Backup::get_data(Buffer &buf)
 {
@@ -93,16 +92,19 @@ result_t Backup::get_data(Buffer &buf)
   
   Creates a stubbed driver class for the backup kernel code. This
   allows the driver to be used in a restore while not writing data.
-  
-  @param[IN]  version  version of the backup image.
-  @param[IN]  tables   list of tables to be restored.
-  @param[OUT] eng      pointer to restore driver instance.
-  
+
+  @param[in]  version  version of the backup image.
+  @param[in]  flags    additional info about restore operation.
+  @param[in]  tables   list of tables to be restored.
+  @param[out] drv      pointer to restore driver instance.
+
   @retval ERROR  if cannot create restore driver class.
   @retval OK     on success.
 */
-result_t Engine::get_restore(version_t, const uint32, 
-                             const Table_list &tables, Restore_driver* &drv)
+result_t Engine::get_restore(const version_t, 
+                             const uint32, 
+                             const Table_list &tables, 
+                             Restore_driver* &drv)
 {
   DBUG_ENTER("Engine::get_restore");
   Restore *ptr= new nodata_backup::Restore(tables, m_thd);

=== modified file 'sql/backup/be_nodata.h'
--- a/sql/backup/be_nodata.h	2008-07-19 03:03:39 +0000
+++ b/sql/backup/be_nodata.h	2008-12-18 21:46:36 +0000
@@ -16,7 +16,7 @@ using backup::Buffer;
 /**
   @class Engine
  
-  @brief Encapsulates nodata online backup/restore functionality.
+  Encapsulates nodata online backup/restore functionality.
  
   Using this class, the caller can create an instance of the nodata backup
   backup and restore class. The nodata driver does not read or write to any 
@@ -25,72 +25,75 @@ using backup::Buffer;
 */
 class Engine: public Backup_engine
 {
-  public:
-    Engine(THD *t_thd);
-
-    /*
-      Return version of backup images created by this engine.
-    */
-    version_t version() const { return 0; };
-    result_t get_backup(const uint32, const Table_list &tables, 
-                        Backup_driver* &drv);
-    result_t get_restore(const version_t ver, const uint32, const Table_list &tables,
-                         Restore_driver* &drv);
-
-    /*
-     Free any resources allocated by the nodata backup engine.
-    */
-    void free() { delete this; }
+public:
+  /// Constructor
+  Engine(THD *t_thd) { m_thd= t_thd; }
+
+  /*
+    Return version of backup images created by this engine.
+  */
+  version_t version() const { return 0; };
+  result_t get_backup(const uint32, const Table_list &tables, 
+                      Backup_driver* &drv);
+  result_t get_restore(const version_t version, const uint32 flags, 
+                       const Table_list &tables, Restore_driver* &drv);
+
+  /*
+   Free any resources allocated by the nodata backup engine.
+  */
+  void free() { delete this; }
 
 private:
-    THD *m_thd;
+  THD *m_thd; ///< Current thread reference.
 };
 
 /**
   @class Backup
  
-  @brief Contains the nodata backup algorithm backup functionality.
+  Contains the nodata backup algorithm backup functionality.
  
   Creates a stubbed driver class for the backup kernel code. This
   allows the driver to be used in a backup while not reading data.
 */
 class Backup: public Backup_driver
 {
-  public:
-    Backup(const backup::Table_list &tables):
-    Backup_driver(tables) {};
-    virtual ~Backup() {}; 
-    size_t size()  { return 0; };
-    size_t init_size() { return 0; };
-    result_t begin(const size_t) { return backup::OK; };
-    result_t end() { return backup::OK; };
-    result_t get_data(Buffer &buf);
-    result_t lock() { return backup::OK; };
-    result_t unlock() { return backup::OK; };
-    result_t cancel() { return backup::OK; };
-    void free() { delete this; };
-    result_t prelock() { return backup::OK; };
+public:
+  /// Constructor
+  Backup(const backup::Table_list &tables):
+  Backup_driver(tables) {};
+  virtual ~Backup() {}; 
+  size_t size()  { return 0; };
+  size_t init_size() { return 0; };
+  result_t begin(const size_t) { return backup::OK; };
+  result_t end() { return backup::OK; };
+  result_t get_data(Buffer &buf);
+  result_t lock() { return backup::OK; };
+  result_t unlock() { return backup::OK; };
+  result_t cancel() { return backup::OK; };
+  void free() { delete this; };
+  result_t prelock() { return backup::OK; };
 };
 
 /**
   @class Restore
  
-  @brief Contains the nodata backup algorithm restore functionality.
+  Contains the nodata backup algorithm restore functionality.
  
   Creates a stubbed driver class for the backup kernel code. This
   allows the driver to be used in a restore while not writing data.
 */
 class Restore: public Restore_driver
 {
-  public:
-    Restore(const Table_list &tables, THD *t_thd):
-        Restore_driver(tables) {};
-    virtual ~Restore() {};
-    result_t begin(const size_t) { return backup::OK; };
-    result_t end() { return backup::OK; };
-    result_t send_data(Buffer &buf);
-    result_t cancel() { return backup::OK; };
-    void free() { delete this; };
+public:
+  /// Constructor
+  Restore(const Table_list &tables, THD *t_thd):
+         Restore_driver(tables) {};
+  virtual ~Restore() {};
+  result_t begin(const size_t) { return backup::OK; };
+  result_t end() { return backup::OK; };
+  result_t send_data(Buffer &buf);
+  result_t cancel() { return backup::OK; };
+  void free() { delete this; };
 };
 } // nodata_backup namespace
 
@@ -105,18 +108,27 @@ namespace backup {
 
 class Logger;
 
+/**
+  @class Nodata_snapshot
+
+  This extends Snapshot_info for implementation of the no data backup driver.
+*/
 class Nodata_snapshot: public Snapshot_info
 {
- public:
+public:
 
+  /// Constructor
   Nodata_snapshot(Logger&) :Snapshot_info(1) // current version number is 1
   {}
+  /// Constructor
   Nodata_snapshot(Logger&, const version_t ver) :Snapshot_info(ver)
   {}
 
+  /// Return snapshot type.
   enum_snap_type type() const
   { return NODATA_SNAPSHOT; }
 
+  /// Return the name of the snapshot.
   const char* name() const
   { return "Nodata"; }
 

=== modified file 'sql/backup/be_snapshot.cc'
--- a/sql/backup/be_snapshot.cc	2008-12-12 11:36:54 +0000
+++ b/sql/backup/be_snapshot.cc	2009-01-21 15:00:23 +0000
@@ -15,30 +15,30 @@
 */
 
 /**
- * @file
- *
- * @brief Contains the snapshot backup algorithm driver.
- *
- * This file contains the snapshot backup algorithm (also called a "driver"
- * in the online backup terminology. The snapshot backup algorithm may be
- * used in place of an engine-specific driver if one does not exist or if
- * chosen by the user.
- *
- * The snapshot backup algorithm is a non-blocking algorithm that enables a
- * consistent read of the tables given at the start of the backup/restore 
- * process. This is accomplished by using a consistent snapshot transaction
- * and table locks. Once all of the data is backed up or restored, the locks 
- * are removed. The snapshot backup is a row-level backup and therefore does 
- * not backup the indexes or any of the engine-specific files.
- *
- * The classes in this file use the namespace "snapshot_backup" to distinguish
- * these classes from other backup drivers. The backup functionality is
- * contained in the backup class shown below. Similarly, the restore
- * functionality is contained in the restore class below.
- *
- * The format of the backup is the same as the default backup driver.
- * Please see <code> be_default.cc </code> for a complete description.
- */
+   @file
+  
+   @brief Contains the snapshot backup algorithm driver.
+  
+   This file contains the snapshot backup algorithm (also called a "driver"
+   in the online backup terminology. The snapshot backup algorithm may be
+   used in place of an engine-specific driver if one does not exist or if
+   chosen by the user.
+  
+   The snapshot backup algorithm is a non-blocking algorithm that enables a
+   consistent read of the tables given at the start of the backup/restore 
+   process. This is accomplished by using a consistent snapshot transaction
+   and table locks. Once all of the data is backed up or restored, the locks 
+   are removed. The snapshot backup is a row-level backup and therefore does 
+   not backup the indexes or any of the engine-specific files.
+  
+   The classes in this file use the namespace "snapshot_backup" to distinguish
+   these classes from other backup drivers. The backup functionality is
+   contained in the backup class shown below. Similarly, the restore
+   functionality is contained in the restore class below.
+  
+   The format of the backup is the same as the default backup driver.
+   Please see <code> be_default.cc </code> for a complete description.
+*/
 
 #include "../mysql_priv.h"
 #include "backup_engine.h"

=== modified file 'sql/backup/be_snapshot.h'
--- a/sql/backup/be_snapshot.h	2008-07-19 03:03:39 +0000
+++ b/sql/backup/be_snapshot.h	2008-12-18 21:46:36 +0000
@@ -15,19 +15,20 @@ using backup::Table_ref;
 using backup::Buffer;
 
 /**
- * @class Backup
- *
- * @brief Contains the snapshot backup algorithm backup functionality.
- *
- * The backup class is a row-level backup mechanism designed to perform
- * a table scan on each table reading the rows and saving the data to the
- * buffer from the backup algorithm using a consistent read transaction.
- *
- * @see <backup driver>
+  @class Backup
+ 
+  Contains the snapshot backup algorithm backup functionality.
+ 
+  The backup class is a row-level backup mechanism designed to perform
+  a table scan on each table reading the rows and saving the data to the
+  buffer from the backup algorithm using a consistent read transaction.
+ 
+  @see Backup_driver
  */
 class Backup: public default_backup::Backup
 {
-  public:
+public:
+    /// Constructor
     Backup(const Table_list &tables, THD *t_thd) 
       :default_backup::Backup(tables, t_thd, TL_READ) 
     { 
@@ -35,20 +36,27 @@ class Backup: public default_backup::Bac
       m_cancel= FALSE;
       m_trans_start= FALSE;
     };
+    /// Destructor
     virtual ~Backup() { cleanup(); };
-    result_t begin(const size_t) { return backup::OK; };
-    result_t end() { return backup::OK; };
+
+    /// Initialize backup process
+    result_t begin(const size_t) { return backup::OK; }; 
+    /// End backup process
+    result_t end() { return backup::OK; }
     result_t get_data(Buffer &buf);
+    /// Initiate a prelock
     result_t prelock() { return backup::READY; }
     result_t lock();
+    /// Unlock signal
     result_t unlock() { return backup::OK; };
+    /// Cancel the process
     result_t cancel() 
     { 
       m_cancel= TRUE;
       cleanup();
       return backup::OK;
     }
-  private:
+private:
     my_bool tables_open;   ///< Indicates if tables are open
     my_bool m_cancel;      ///< Cancel backup
     my_bool m_trans_start; ///< Is transaction stated?
@@ -57,19 +65,20 @@ class Backup: public default_backup::Bac
 };
 
 /**
- * @class Restore
- *
- * @brief Contains the snapshot backup algorithm restore functionality.
- *
- * The restore class is a row-level backup mechanism designed to restore
- * data for each table by writing the data for the rows from the
- * buffer given by the backup algorithm.
- *
- * @see <restore driver>
+  @class Restore
+ 
+  Contains the snapshot backup algorithm restore functionality.
+ 
+  The restore class is a row-level backup mechanism designed to restore
+  data for each table by writing the data for the rows from the
+  buffer given by the backup algorithm.
+ 
+  @see Restore_driver
  */
 class Restore: public default_backup::Restore
 {
-  public:
+public:
+    /// Constructor
     Restore(const backup::Logical_snapshot &snap, THD *t_thd)
       :default_backup::Restore(snap, t_thd){};
     virtual ~Restore(){};
@@ -86,19 +95,25 @@ class Restore: public default_backup::Re
 
 namespace backup {
 
-
+/**
+  Extends Logical_info to implement the consistent snapshot backup driver.
+*/
 class CS_snapshot: public Logical_snapshot
 {
- public:
+public:
 
+  /// Constructor
   CS_snapshot(Logger&) :Logical_snapshot(1) // current version number is 1
   {}
+  /// Constructor
   CS_snapshot(Logger&, version_t ver) :Logical_snapshot(ver)
   {}
 
+  /// Return snapshot type.
   enum_snap_type type() const
   { return CS_SNAPSHOT; }
 
+  /// Return the name of the snapshot.
   const char* name() const
   { return "Snapshot"; }
 

=== modified file 'sql/backup/be_thread.cc'
--- a/sql/backup/be_thread.cc	2008-12-13 16:34:25 +0000
+++ b/sql/backup/be_thread.cc	2009-01-21 15:06:10 +0000
@@ -15,32 +15,32 @@
 */
 
 /**
-  * @file
-  *
-  * @brief Contains the thread methods for online backup.
-  *
-  * The methods in this class are used to initialize the mutexes
-  * for the backup threads. Helper methods are included to make thread
-  * calls easier for the driver code.
-  */
+  @file
+  
+  @brief Contains the thread methods for online backup.
+  
+  The methods in this class are used to initialize the mutexes
+  for the backup threads. Helper methods are included to make thread
+  calls easier for the driver code.
+*/
 
 #include "../mysql_priv.h"
 #include "be_thread.h"
 
 /**
-  *  @brief Creates a new THD object.
-  *
-  * Creates a new THD object for use in running as a separate thread.
-  *
-  * @returns Pointer to new THD object or 0 if error.
-  *
-  * @TODO Move this method to a location where ha_ndbcluster_binlog.cc can
-  *       use it and replace code in ndb_binlog_thread_func(void *arg) to
-  *       call this function.
-  *
-  * @note my_net_init() this should be paired with my_net_end() on 
-  *       close/kill of thread.
-  */
+  @brief Creates a new THD object.
+  
+  Creates a new THD object for use in running as a separate thread.
+  
+  @returns Pointer to new THD object or 0 if error.
+  
+  @todo Move this method to a location where ha_ndbcluster_binlog.cc can
+        use it and replace code in ndb_binlog_thread_func(void *arg) to
+        call this function.
+  
+  @note my_net_init() this should be paired with my_net_end() on 
+        close/kill of thread.
+*/
 THD *create_new_thd()
 {
   THD *thd;
@@ -91,14 +91,14 @@ THD *create_new_thd()
 }
 
 /**
-  * @brief Lock tables in driver.
-  *
-  * This method creates a new THD for use in the new thread. It calls
-  * the method to open and lock the tables.
-  *
-  * @note my_thread_init() should be paired with my_thread_end() on 
-  *       close/kill of thread.
-  */
+  @brief Lock tables in driver.
+  
+  This method creates a new THD for use in the new thread. It calls
+  the method to open and lock the tables.
+  
+  @note my_thread_init() should be paired with my_thread_end() on 
+        close/kill of thread.
+*/
 pthread_handler_t backup_thread_for_locking(void *arg)
 {
   Locking_thread_st *locking_thd= static_cast<Locking_thread_st *>(arg);

=== modified file 'sql/backup/be_thread.h'
--- a/sql/backup/be_thread.h	2008-07-09 07:12:43 +0000
+++ b/sql/backup/be_thread.h	2008-12-18 21:46:36 +0000
@@ -30,18 +30,20 @@ using backup::Table_list;
    backup_thread_for_locking
 
    This method creates a new thread and opens and locks the tables.
+
+   @param[in] arg  List of arguments.
 */
 pthread_handler_t backup_thread_for_locking(void *arg);
 
 /**
- * @struct Locking_thread
- *
- * @brief Adds variables for using a locking thread for opening tables.
- *
- * The Backup_thread structure contains a mutex and condition variable
- * for using a thread to open and lock the tables. This is meant to be a
- * generic class that can be used elsewhere for opening and locking tables.
- */
+  Locking_thread
+ 
+  @brief Adds variables for using a locking thread for opening tables.
+ 
+  The Backup_thread structure contains a mutex and condition variable
+  for using a thread to open and lock the tables. This is meant to be a
+  generic class that can be used elsewhere for opening and locking tables.
+*/
 struct Locking_thread_st
 {
 public:
@@ -66,25 +68,27 @@ public:
 }; // Locking_thread_st
 
 /**
- * @class Backup_thread_driver
- *
- * @brief Adds variables for using a locking thread for opening tables.
- *
- * The Backup_thread_driver class extends the Backup_driver class by adding
- * a mutex and condition variable for using a thread to open and lock the 
- * tables.
- *
- * @see <backup driver> and <backup thread driver>
- */
+  @class Backup_thread_driver
+ 
+  @brief Adds variables for using a locking thread for opening tables.
+ 
+  The Backup_thread_driver class extends the Backup_driver class by adding
+  a mutex and condition variable for using a thread to open and lock the 
+  tables.
+ 
+  @see Backup_driver
+*/
 class Backup_thread_driver : public Backup_driver
 {
 public:
 
+  /// Constructor
   Backup_thread_driver(const backup::Table_list &tables)
     :Backup_driver(tables) { locking_thd = new Locking_thread_st(); }
+  /// Destructor
   ~Backup_thread_driver() { delete locking_thd; }
 
-  Locking_thread_st *locking_thd;
+  Locking_thread_st *locking_thd;  ///< Pointer to locking thread data.
 }; // Backup_thread_driver class
 
 

=== modified file 'sql/backup/buffer_iterator.cc'
--- a/sql/backup/buffer_iterator.cc	2008-03-04 16:06:28 +0000
+++ b/sql/backup/buffer_iterator.cc	2008-12-18 21:46:36 +0000
@@ -15,28 +15,28 @@
 */
 
 /**
- * @file
- *
- * @brief Contains a buffering class for breaking large data into parts.
- *
- * This file contains a buffering class for buffering large chunks of
- * data. It can be used to store a large chunk of data and iterate
- * through windows of a specified size until all the data is read.
- * It can also be used to recombine the data from smaller windows.
-  */
+  @file
+ 
+  @brief Contains a buffering class for breaking large data into parts.
+ 
+  This file contains a buffering class for buffering large chunks of
+  data. It can be used to store a large chunk of data and iterate
+  through windows of a specified size until all the data is read.
+  It can also be used to recombine the data from smaller windows.
+*/
 
 #include "../mysql_priv.h"
 #include "buffer_iterator.h"
 
 /**
- * @brief Create a buffer iterator.
- *
- * Given a pointer to a block of data, its maximum size, and
- * window size, start iterator for reading or writing data.
- *
- * @param  buff_ptr (in) a pointer to a block of memory
- * @param  size     (in) the maximum size of the data
- */
+  @brief Create a buffer iterator.
+ 
+  Given a pointer to a block of data, its maximum size, and
+  window size, start iterator for reading or writing data.
+ 
+  @param  buff_ptr (in) a pointer to a block of memory
+  @param  size     (in) the maximum size of the data
+*/
 int Buffer_iterator::initialize(byte *buff_ptr, size_t size)
 {
   DBUG_ENTER("buffer_iterator::initialize(buff_ptr, size, window)");
@@ -50,13 +50,13 @@ int Buffer_iterator::initialize(byte *bu
 }
 
 /**
- * @brief Create a buffer iterator.
- *
- * Given the maximum size of a block of data and the
- * window size, start iterator for reading or writing data.
- *
- * @param  size     (in) the maximum size of the data
- */
+  @brief Create a buffer iterator.
+ 
+  Given the maximum size of a block of data and the
+  window size, start iterator for reading or writing data.
+ 
+  @param  size     (in) the maximum size of the data
+*/
 int Buffer_iterator::initialize(size_t size)
 {
   DBUG_ENTER("buffer_iterator::initialize(size, window)");
@@ -70,10 +70,10 @@ int Buffer_iterator::initialize(size_t s
 }
 
 /**
- * @brief Reset buffer iterator.
- *
- * Destroy any memory used.
- */
+  @brief Reset buffer iterator.
+ 
+  Destroy any memory used.
+*/
 int Buffer_iterator::reset()
 {
   DBUG_ENTER("buffer_iterator::reset()");
@@ -85,18 +85,18 @@ int Buffer_iterator::reset()
 }
 
 /**
- * @brief Get the next window of data in the iterator.
- *
- * This method retrieves the next window in the iterator. It
- * returns the number of bytes read (may be less if last
- * window is smaller than the max window size), and updates
- * the pointer passed as an argument.
- *
- * @param  buff_ptr  (in) a pointer to the window to be read
- * @param  window    (in) the size of the window
- * 
- * @retval the size of the window
- */
+  @brief Get the next window of data in the iterator.
+ 
+  This method retrieves the next window in the iterator. It
+  returns the number of bytes read (may be less if last
+  window is smaller than the max window size), and updates
+  the pointer passed as an argument.
+ 
+  @param  buff_ptr  (in) a pointer to the window to be read
+  @param  window    (in) the size of the window
+  
+  @retval the size of the window
+*/
 size_t Buffer_iterator::get_next(byte **buff_ptr, size_t window)
 {
   size_t bytes_read;
@@ -126,19 +126,19 @@ size_t Buffer_iterator::get_next(byte **
 }
 
 /**
- * @brief Insert the next window of data into the iterator.
- *
- * This method inserts the next window into the iterator. It
- * uses the pointer passed as an argument to copy data from 
- * that location to the internal buffer based on the size of
- * the window passed as an argument.
- *
- * @param  buff_ptr (in/out) a pointer to the window to be written
- * @param  size     (in) the size of the window to be written
- * 
- * @retval 0  success
- * @retval 1  window size exceeds maximum size of the block of data
- */
+  @brief Insert the next window of data into the iterator.
+ 
+  This method inserts the next window into the iterator. It
+  uses the pointer passed as an argument to copy data from 
+  that location to the internal buffer based on the size of
+  the window passed as an argument.
+ 
+  @param  buff_ptr (in/out) a pointer to the window to be written
+  @param  size     (in) the size of the window to be written
+  
+  @retval 0  success
+  @retval 1  window size exceeds maximum size of the block of data
+*/
 int Buffer_iterator::put_next(byte *buff_ptr, size_t size)
 {
   DBUG_ENTER("buffer_iterator::put_next()");
@@ -154,18 +154,18 @@ int Buffer_iterator::put_next(byte *buff
 }
 
 /**
- * @brief Determines the number of windows left to read.
- *
- * This method calculates how many windows are left to read in
- * the iterator. Use this method following initialize() to
- * determine the maximum windows you can write to the buffer or
- * use this method to determine how many more windows are 
- * remaining to be read.
- * 
- * @param  size     (in) the size of the window 
- *
- * @retval the number of windows left to read
- */
+  @brief Determines the number of windows left to read.
+ 
+  This method calculates how many windows are left to read in
+  the iterator. Use this method following initialize() to
+  determine the maximum windows you can write to the buffer or
+  use this method to determine how many more windows are 
+  remaining to be read.
+  
+  @param  size     (in) the size of the window 
+ 
+  @retval the number of windows left to read
+*/
 int Buffer_iterator::num_windows(size_t size)
 {
   int num_windows;
@@ -178,20 +178,20 @@ int Buffer_iterator::num_windows(size_t
 }
 
 /**
- * @brief Retrieve the pointer to the block of data.
- *
- * This method gets the base pointer to the block of data and 
- * returns it to the caller. This method can be used after writing
- * a series of windows to a buffer. When called, the method turns
- * off the free() mechanism for freeing the base memory allocated.
- * This was included to allow callers to reuse the memory. For 
- * example, this method is used in the default algorithm to read
- * and write blob data. On write, the pointer to the blob data (the
- * data in the buffer) is needed to write to the storage engine. Thus,
- * when this method is called the memory is not freed on destruction.
- * 
- * @retval the pointer to the buffer
- */
+  @brief Retrieve the pointer to the block of data.
+ 
+  This method gets the base pointer to the block of data and 
+  returns it to the caller. This method can be used after writing
+  a series of windows to a buffer. When called, the method turns
+  off the free() mechanism for freeing the base memory allocated.
+  This was included to allow callers to reuse the memory. For 
+  example, this method is used in the default algorithm to read
+  and write blob data. On write, the pointer to the blob data (the
+  data in the buffer) is needed to write to the storage engine. Thus,
+  when this method is called the memory is not freed on destruction.
+  
+  @retval the pointer to the buffer
+*/
 byte *Buffer_iterator::get_base_ptr()
 {
   byte *ptr;

=== modified file 'sql/backup/buffer_iterator.h'
--- a/sql/backup/buffer_iterator.h	2008-03-04 16:06:28 +0000
+++ b/sql/backup/buffer_iterator.h	2008-12-18 21:46:36 +0000
@@ -4,72 +4,72 @@
 using backup::byte;
 
 /**
- * @class Buffer_iterator
- *
- * @brief Encapsulates data buffering functionality.
- *
- * This class is used in the backup drivers for buffering large blocks
- * of data into smaller windows. This allows the backup drivers to
- * store a large field in multiple blocks of data allocated by the
- * backup kernel.
- *
- * For example, if a driver needs to store a blob field of size 8000 
- * bytes, but the kernel provides a buffer of size 1024 bytes, this 
- * class can be used to break the data into 8 parts. Upon restore, 
- * this class can be used to reassemble the parts into the original
- * size of data and thus write the data as one block to the engine.
- *
- * The class provides two methods for creation. The class can take
- * a pointer to an existing memory block or if omitted will allocate
- * a buffer of the size passed. See the class constructor for more
- * details.
- *
- * To use this class for reading, instantiate the class passing in
- * a pointer to the block you want to read, the total size of the
- * block, and the size of the window you want to read. Then call
- * get_next() for each window you want to read. You can use the
- * num_windows() method to indicate how many windows are left to
- * read. This is best used in a loop-like arrangement like the 
- * following:
- *
- * byte *ptr;
- * byte *outptr;
- * ptr= (byte *)my_malloc(8000, MYF(0));
- * Buffer_iterator *my_buff = new Buffer_iterator(ptr, 8000, 1024);
- * while (my_buff->num_windows())
- * {
- *   bytes_read= my_buff->get_next(&out_ptr);
- *   // do something with the window in out_ptr here 
- * } 
- *
- * Note: If you want to permit the Buffer_iterator class to create
- * it's own buffer, you must use the put_next() method to copy the data
- * from your own buffer into the buffer in the class.
- *
- * To use this class for writing, instantiate the class passing in 
- * the total size of the block, and the size of the window you will be
- * writing. Note: The window size is not used for writing. Then call
- * put_next() to insert a window into the buffer. Once all of the data 
- * has been placed into the buffer, you can use the get_base_ptr() to
- * retrieve the pointer to the buffer. This is best used in a loop-like
- * arrangement like the following:
- *
- * long size; //contains size of window to write
- * byte *ptr; //contains the pointer to the window
- *
- * size= read_my_data(&ptr); //read data here
- * Buffer_iterator *my_buff = new Buffer_iterator(8000, 1024);
- * while (there is data to read)
- * {
- *   my_buff->put_next(&out_ptr, size);
- *   // read more here 
- * } 
- * write_my_reassembled_block(my_buff->get_base_ptr(), total_size);
- *
- */
+  @class Buffer_iterator
+ 
+  @brief Encapsulates data buffering functionality.
+ 
+  This class is used in the backup drivers for buffering large blocks
+  of data into smaller windows. This allows the backup drivers to
+  store a large field in multiple blocks of data allocated by the
+  backup kernel.
+ 
+  For example, if a driver needs to store a blob field of size 8000 
+  bytes, but the kernel provides a buffer of size 1024 bytes, this 
+  class can be used to break the data into 8 parts. Upon restore, 
+  this class can be used to reassemble the parts into the original
+  size of data and thus write the data as one block to the engine.
+ 
+  The class provides two methods for creation. The class can take
+  a pointer to an existing memory block or if omitted will allocate
+  a buffer of the size passed. See the class constructor for more
+  details.
+ 
+  To use this class for reading, instantiate the class passing in
+  a pointer to the block you want to read, the total size of the
+  block, and the size of the window you want to read. Then call
+  get_next() for each window you want to read. You can use the
+  num_windows() method to indicate how many windows are left to
+  read. This is best used in a loop-like arrangement like the 
+  following:
+ 
+  byte *ptr;
+  byte *outptr;
+  ptr= (byte *)my_malloc(8000, MYF(0));
+  Buffer_iterator *my_buff = new Buffer_iterator(ptr, 8000, 1024);
+  while (my_buff->num_windows())
+  {
+    bytes_read= my_buff->get_next(&out_ptr);
+    // do something with the window in out_ptr here 
+  } 
+ 
+  Note: If you want to permit the Buffer_iterator class to create
+  it's own buffer, you must use the put_next() method to copy the data
+  from your own buffer into the buffer in the class.
+ 
+  To use this class for writing, instantiate the class passing in 
+  the total size of the block, and the size of the window you will be
+  writing. Note: The window size is not used for writing. Then call
+  put_next() to insert a window into the buffer. Once all of the data 
+  has been placed into the buffer, you can use the get_base_ptr() to
+  retrieve the pointer to the buffer. This is best used in a loop-like
+  arrangement like the following:
+ 
+  long size; //contains size of window to write
+  byte *ptr; //contains the pointer to the window
+ 
+  size= read_my_data(&ptr); //read data here
+  Buffer_iterator *my_buff = new Buffer_iterator(8000, 1024);
+  while (there is data to read)
+  {
+    my_buff->put_next(&out_ptr, size);
+    // read more here 
+  } 
+  write_my_reassembled_block(my_buff->get_base_ptr(), total_size);
+ 
+*/
 class Buffer_iterator
 {
-  public:
+public:
     int initialize(byte *buff_ptr, size_t size);
     int initialize(size_t size);
     int reset();
@@ -78,7 +78,7 @@ class Buffer_iterator
     int num_windows(size_t size);
     byte *get_base_ptr();
 
-  private: 
+private: 
     byte *buffer;          ///< The pointer to the block of data to iterate
     byte *cur_ptr;         ///< The current position in the buffer
     size_t max_size;       ///< The maximum size of the block of data

=== modified file 'sql/backup/data_backup.cc'
--- a/sql/backup/data_backup.cc	2008-12-13 11:02:16 +0000
+++ b/sql/backup/data_backup.cc	2009-01-21 15:06:10 +0000
@@ -28,6 +28,11 @@
 
 namespace backup {
 
+/**
+  Backup state
+
+  Structure to store the backup state.
+*/
 struct backup_state {
 
  /// State of a single backup driver.
@@ -45,8 +50,13 @@ struct backup_state {
 
 #ifndef DBUG_OFF
 
-  static const char* name[];
+  static const char* name[];  ///< The text of the state.
+
+  /** 
+    Initializer
 
+    Structure for containing state names.
+  */
   struct Initializer
   {
     Initializer()
@@ -64,7 +74,7 @@ struct backup_state {
     }
   };
 
- private:
+private:
 
   static Initializer init;
 
@@ -75,7 +85,7 @@ struct backup_state {
 #ifndef DBUG_OFF
 
 const char* backup_state::name[backup_state::MAX];
-backup_state::Initializer init;
+backup_state::Initializer init;  ///< Initializer state.
 
 #endif
 
@@ -87,18 +97,20 @@ backup_state::Initializer init;
  */
 class Block_writer
 {
- public:
+public:
 
+  /// Enumeration of the result conditions.
   enum result_t { OK, NO_RES, ERROR };
 
   result_t  get_buf(Buffer &);
   result_t  write_buf(const Buffer&);
   void      drop_buf(Buffer&);
 
+  /// Constructor
   Block_writer(byte, size_t, Output_stream&);
   ~Block_writer();
 
- private:
+private:
 
   byte           snap_num;  ///< snapshot to which the data belongs
   Output_stream  &m_str;    ///< stream to which we write
@@ -124,23 +136,26 @@ class Block_writer
 
 class Backup_pump
 {
- public:
+public:
 
   backup_state::value  state; ///< State of the backup driver.
 
+  /// Enumeration for mode (reading or writing) data.
   enum { READING, ///< Pump is polling driver for data.
          WRITING  ///< Pump sends data to the stream.
-       } mode;
+       } mode;    ///< mode of operation.
 
-  /** The estimate returned by backup driver's @c init_data() method. */
-  size_t  init_size;
-  size_t  bytes_in, bytes_out;
+  size_t  init_size; ///< size as returned by backup driver's @c init_data().
+  size_t  bytes_in;  ///< number of bytes written. 
+  size_t  bytes_out; ///< number of bytes read.
 
   const char *m_name; ///< Name of the driver (for debug purposes).
 
+  /// Constructor
   Backup_pump(Snapshot_info&, Block_writer&);
   ~Backup_pump();
 
+  /// Determine of the state of the driver is valid.
   bool is_valid()
   { return m_drv && state != backup_state::ERROR; }
 
@@ -149,8 +164,11 @@ class Backup_pump
   int begin();
   int end();
   int prepare();
+  /// Lock signal
   int lock();
+  /// Unlock signal
   int unlock();
+  /// Cancel the process
   int cancel();
 
   /// Return the backup driver used by the pump.
@@ -160,10 +178,11 @@ class Backup_pump
     return *m_drv;
   }
 
+  /// Set the logger class pointer.
   void set_logger(Logger *log)
   { m_log= log; }
 
- private:
+private:
 
   /// If block writer has no buffers, retry this many times before giving up.
   static const uint get_buf_retries= 3; 
@@ -221,7 +240,7 @@ class Scheduler
   class Pump;
   class Pump_iterator;
 
- public:
+public:
 
   int add(Pump*);
   int step();
@@ -234,18 +253,21 @@ class Scheduler
   uint  prepare_count;  ///< no. drivers preparing for lock
   uint  finish_count;   ///< no. drivers sending final data
 
+  /// Return number of initial items left to process.
   size_t init_left() const
   { return m_known_count? m_init_left/m_known_count + 1 : 0; }
 
+  /// Return total bytes written from members.
   size_t bytes_written() const
   { return m_total; }
 
+  /// Determine if the list is empty.
   bool is_empty() const
   { return m_count == 0; }
 
   ~Scheduler() { cancel_backup(); }
 
- private:
+private:
 
   LIST   *m_pumps, *m_last;
   Logger &m_log;        ///< for reporting errors          
@@ -282,8 +304,9 @@ class Scheduler::Pump: public Backup_pum
 
   friend class Scheduler;
 
- public:
+public:
 
+  /// Constructor
   Pump(Snapshot_info &snap, Output_stream &s)
     :Backup_pump(snap, bw), start_pos(0),
     bw(snap.m_num - 1, DATA_BUFFER_SIZE, s)
@@ -291,6 +314,7 @@ class Scheduler::Pump: public Backup_pum
     DBUG_ASSERT(snap.m_num > 0);
   }
 
+  /// Return current position of pump.
   size_t pos() const
   { return start_pos + bytes_in; }
 };
@@ -360,6 +384,10 @@ int block_commits(THD *thd, TABLE_LIST *
 {
   DBUG_ENTER("block_commits()");
 
+  DBUG_EXECUTE_IF("backup_grl_fail", 
+    /* Mimic behavior of a failing lock_global_read_lock */
+    DBUG_RETURN(1););
+
   /*
     Step 1 - global read lock.
   */
@@ -382,6 +410,12 @@ int block_commits(THD *thd, TABLE_LIST *
     result= close_cached_tables(thd, 0, tables);
   */
 
+  DBUG_EXECUTE_IF("backup_grl_block_commit_fail",
+    /* Mimic behavior of a failing make_global_read_lock_block_commit */
+    unlock_global_read_lock(thd);
+    DBUG_RETURN(1);
+  );
+  
   /*
     Step 3 - make the global read lock to block commits.
   */
@@ -402,13 +436,13 @@ int block_commits(THD *thd, TABLE_LIST *
 
    @param  thd    (in) the current thread structure.
 
-   @returns 0
+   This method cannot fail.
   */
-int unblock_commits(THD *thd)
+void unblock_commits(THD *thd)
 {
   DBUG_ENTER("unblock_commits()");
   unlock_global_read_lock(thd);
-  DBUG_RETURN(0);
+  DBUG_VOID_RETURN;
 }
 
 /**
@@ -652,7 +686,10 @@ int write_table_data(THD* thd, Backup_in
     int error= 0;
     error= block_commits(thd, NULL);
     if (error)
+    {
+      log.report_error(ER_BACKUP_SYNCHRONIZE);
       goto error;
+    }
 
     if (sch.prepare())    // logs errors
       goto error;
@@ -691,9 +728,7 @@ int write_table_data(THD* thd, Backup_in
       Unblock commits.
     */
     DEBUG_SYNC(thd, "before_backup_unblock_commit");
-    error= unblock_commits(thd);
-    if (error)
-      goto error;
+    unblock_commits(thd);
 
 
     report_vp_info(info);
@@ -738,30 +773,36 @@ namespace backup {
  */
 class Scheduler::Pump_iterator
 {
- public:
+public:
 
-  LIST  *el;
+  LIST  *pumps;  ///< The list of pumps.
 
+  /// The next operator.
   Pump* operator->()
   {
-    return el? static_cast<Pump*>(el->data) : NULL;
+    return pumps? static_cast<Pump*>(pumps->data) : NULL;
   }
 
+  /// The increment operator.
   void  operator++()
   {
-    if(el) el= el->next;
+    if(pumps) pumps= pumps->next;
   }
 
+  /// Check to see if pumps list exist and has data.
   operator bool() const
-  { return el && el->data; }
+  { return pumps && pumps->data; }
 
+  /// The comparison operator.
   void operator=(const Pump_iterator &p)
-  { el= p.el; }
+  { pumps= p.pumps; }
 
-  Pump_iterator(): el(NULL)
+  /// Base constructor for null list.
+  Pump_iterator(): pumps(NULL)
   {}
 
-  Pump_iterator(const Scheduler &sch) :el(sch.m_pumps)
+  /// Base constructor for existing list of pumps.
+  Pump_iterator(const Scheduler &sch) :pumps(sch.m_pumps)
   {}
 
 };
@@ -941,13 +982,13 @@ void Scheduler::move_pump_to_end(const P
 {
   // The pump to move is in the m_pumps list so the list can't be empty.
   DBUG_ASSERT(m_pumps);
-  if (m_last != p.el)
+  if (m_last != p.pumps)
   {
-    m_pumps= list_delete(m_pumps, p.el);
-    m_last->next= p.el;
-    p.el->prev= m_last;
-    p.el->next= NULL;
-    m_last= p.el;
+    m_pumps= list_delete(m_pumps, p.pumps);
+    m_last->next= p.pumps;
+    p.pumps->prev= m_last;
+    p.pumps->next= NULL;
+    m_last= p.pumps;
   }
 }
 
@@ -958,22 +999,22 @@ void Scheduler::move_pump_to_end(const P
  */
 void Scheduler::remove_pump(Pump_iterator &p)
 {
-  DBUG_ASSERT(p.el);
+  DBUG_ASSERT(p.pumps);
 
-  if (m_last == p.el)
+  if (m_last == p.pumps)
     m_last= m_last->prev;
 
   if (m_pumps)
   {
-    m_pumps= list_delete(m_pumps, p.el);
+    m_pumps= list_delete(m_pumps, p.pumps);
     m_count--;
   }
 
   if (p)
   {
     // destructor calls driver's free() method
-    delete static_cast<Pump*>(p.el->data);
-    my_free(p.el, MYF(0));
+    delete static_cast<Pump*>(p.pumps->data);
+    my_free(p.pumps, MYF(0));
   }
 }
 

=== removed file 'sql/backup/debug.h'
--- a/sql/backup/debug.h	2008-02-21 18:28:20 +0000
+++ b/sql/backup/debug.h	1970-01-01 00:00:00 +0000
@@ -1,271 +0,0 @@
-#ifndef _BACKUP_DEBUG_H
-#define _BACKUP_DEBUG_H
-
-#define BACKUP_BREAKPOINT_TIMEOUT 300
-
-/*
-  TODO
-  - decide how to configure DEBUG_BACKUP
- */
-
-#ifndef DBUG_OFF
-# define DBUG_BACKUP
-#endif
-
-#ifdef DBUG_BACKUP
-
-/*
-  Macros for debugging error (or other) conditions. Usage:
-
-  TEST_ERROR_IF(<condition deciding if TEST_ERROR should be true>);
-
-  if (<other conditions> || TEST_ERROR)
-  {
-    <report error>
-  }
-
-  The additional TEST_ERROR condition will be set only if "backup_error_test"
-  error injection is set in the server.
-
-  Notes:
-   - Whenever TEST_ERROR is used in a condition, TEST_ERROR_IF() should
-     be called before - otherwise TEST_ERROR might be unintentionally TRUE.
-   - This mechanism is not thread safe.
- */
-
-namespace backup {
- extern bool test_error_flag;
-}
-
-#define TEST_ERROR  backup::test_error_flag
-// FIXME: DBUG_EXECUTE_IF below doesn't work
-#define TEST_ERROR_IF(X) \
- do { \
-   backup::test_error_flag= FALSE; \
-   DBUG_EXECUTE_IF("backup_error_test", backup::test_error_flag= (X);); \
- } while(0)
-
-#else
-
-//#define BACKUP_BREAKPOINT(S)
-#define TEST_ERROR  FALSE
-#define TEST_ERROR_IF(X)
-
-#endif
-
-/**
-  @page BACKUP_BREAKPOINT Online Backup Breakpoints
-  Macros for creating breakpoints during testing.
-
-  @section WHAT What are breakpoints?
-  Breakpoints are devices used to pause the execution of the backup system
-  at a certain point in the code. There is a timeout that you can specify
-  when you set the lock for the breakpoint (from get_lock() see below)
-  which will enable execution to continue after the period in seconds
-  expires.
-
-  The best use of these breakpoints is for pausing execution at critical
-  points in the backup code to allow proper testing of certain features.
-  For example, suppose you wanted to ensure the Consistent Snapshot driver
-  was working properly. To do so, you would need to ensure no new @INSERT
-  statements are executed while the data is being backed up. If you use
-  a breakpoint, you can set the breakpoint to pause the backup kernel at
-  the point where it has set the consistent read and is reading rows.
-  You can then insert some rows and release the breakpoint. The result
-  should contain all of the rows in the table except those that were
-  inserted once the consistent read was set.
-
-  @section USAGE How to use breakpoints.
-  To make a breakpoint available, you must add a macro call to the code.
-  Simply insert the macro call as follows. The @c breakpoint_name is a 
-  text string that must be unique among the breakpoints. It is used in 
-  the macro as a means of tagging the code for pausing and resuming
-  execution. Once the code is compiled, you can use a client connection
-  to set and release the breakpoint. Be sure to use a separate connection
-  for getting, checking, and releasing locks. 
-
-  <b><c>BACKUP_BREAKPOINT("<breakpoint_name>");</c></b>
-  
-  Breakpoints use the user-defined locking functions @c get_lock() to set
-  the breakpoint and @c release_lock() to release it.
-
-  @subsection SET Setting breakpoints.
-  To set an existing breakpoint, issue the following command where @c
-  timeout is the number of seconds execution will pause once the breakpoint
-  is reached before continuing execution.
-
-  <b><c>SELECT get_lock("<breakpoint_name>",<timeout>);</c></b>
-
-  @subsection RELEASE Releasing breakpoints.
-  To release an existing breakpoint, issue the following command. This
-  releases execution allow the system to continue.
-
-  <b><c>SELECT release_lock("<breakpoint_name>");</c></b>
-
-  @subsection EXAMPLE Example - Testing the Consistent Snapshot Driver
-  To test the consistent snapshot driver, we can make use of the @c
-  backup_cs_locked breakpoint to pause execution after the consistent read
-  is initiated and before all of the rows from the table have been read.
-  Consider an InnoDB table with the following structure as our test table.
-
-  <c>CREATE TABLE t1 (a INT) ENGINE=INNODB;</c>
-
-  To perform this test using breakpoints, we need two client connections.
-  One will be used to execute the backup command and another to set and
-  release the breakpoint. In the first client, we set the breakpoint with
-  the <c>SELECT get_lock("backup_cs_locked", 100);</c> command. In the 
-  second client, we start the execution of the backup. We can return to
-  the first client and issue several @INSERT statements then issue the
-  <c>SELECT release_lock("backup_cs_locked");</c> command to release the
-  breakpoint.
-
-  We can then return to the second client, select all of the rows from the
-  table to verify the rows were inserted. We can verify that the consistent
-  snapshot worked by restoring the database (which is a destructive restore)
-  and then select all of the rows. This will show that the new rows 
-  inserted while the backup was running were not inserted into the table.
-  The following shows the output of the commands as described.
-
-  <b>First Client</b>
-  @code mysql> SELECT * FROM t1;
-  +---+
-  | a |
-  +---+
-  | 1 |
-  | 2 |
-  | 3 |
-  +---+
-  3 rows in set (0.00 sec)
-
-  mysql> SELECT get_lock("backup_cs_locked", 100);
-  +-----------------------------------+
-  | get_lock("backup_cs_locked", 100) |
-  +-----------------------------------+
-  |                                 1 |
-  +-----------------------------------+
-  1 row in set (0.00 sec) @endcode
-
-  <b>Second Client</b>
-  @code mysql> BACKUP DATABASE test TO 'test.bak'; @endcode
-
-  Note: The backup will pause while the breakpoint is set (the lock is held).
-
-  <b>First Client</b>
-  @code mysql> INSERT INTO t1 VALUES (101), (102), (103);
-  Query OK, 3 rows affected (0.02 sec)
-  Records: 3  Duplicates: 0  Warnings: 0
-
-  mysql> SELECT * FROM t1;
-  +-----+
-  | a   |
-  +-----+
-  |   1 |
-  |   2 |
-  |   3 |
-  | 101 |
-  | 102 |
-  | 103 |
-  +-----+
-  6 rows in set (0.00 sec)
-  
-  mysql> SELECT release_lock("backup_cs_locked");
-  +----------------------------------+
-  | release_lock("backup_cs_locked") |
-  +----------------------------------+
-  |                                1 |
-  +----------------------------------+
-  1 row in set (0.01 sec) @endcode
-
-  <b>Second Client</b>
-  @code +------------------------------+
-  | Backup Summary               |
-  +------------------------------+
-  |  header     =       14 bytes |
-  |  meta-data  =      120 bytes |
-  |  data       =       30 bytes |
-  |               -------------- |
-  |  total             164 bytes |
-  +------------------------------+
-  5 rows in set (33.45 sec)
-
-  mysql> SELECT * FROM t1;
-  +-----+
-  | a   |
-  +-----+
-  |   1 |
-  |   2 |
-  |   3 |
-  | 101 |
-  | 102 |
-  | 103 |
-  +-----+
-  6 rows in set (0.00 sec)
-  
-  mysql> RESTORE FROM 'test.bak';
-  +------------------------------+
-  | Restore Summary              |
-  +------------------------------+
-  |  header     =       14 bytes |
-  |  meta-data  =      120 bytes |
-  |  data       =       30 bytes |
-  |               -------------- |
-  |  total             164 bytes |
-  +------------------------------+
-  5 rows in set (0.08 sec)
-  
-  mysql> SELECT * FROM t1;
-  +---+
-  | a |
-  +---+
-  | 1 |
-  | 2 |
-  | 3 |
-  +---+
-  3 rows in set (0.00 sec)@endcode
-
-  Note: The backup will complete once breakpoint is released (the lock is
-  released).
-
-  @section BREAKPOINTS Breakpoints
-  The following are the available breakpoints included in the code.
-
-  - <b>backup_command</b>  Occurs at the start of the backup operation.
-  - <b>data_init</b>  Occurs at the start of the <b>INITIALIZE PHASE</b>.
-  - <b>data_prepare</b>  Occurs at the start of the <b>PREPARE PHASE</b>.
-  - <b>data_lock</b>  Occurs at the start of the <b>SYNC PHASE</b>.
-  - <b>data_unlock</b>  Occurs before the unlock calls.
-  - <b>data_finish</b>  Occurs at the start of the <b>FINISH PHASE</b>.
-  - <b>backup_meta</b>  Occurs before the call to write_meta_data().
-  - <b>backup_data</b>  Occurs before the call to write_table_data().
-  - <b>backup_done</b>  Occurs after the call to write_table_data() returns.
-  - <b>backup_cs_locked</b>  Consistent Snapshot - after the consistent
-                             read has been initiated but before rows are read.
-  - <b>backup_cs_open_tables</b>  Consistent Snapshot - before the call to
-                             open and lock tables.
-  - <b>backup_cs_reading</b>  Consistent Snapshot - occurs during read.
-
-  @section NOTES Developer Notes
-  - Breakpoints can be used in debug builds only. You must compile
-  the code using the @c DEBUG_EXTRA preprocessor directive. 
-  - When adding breakpoints, you must add a list item for each breakpoint
-  to the documentation for breakpoints. See the code for the macro
-  definition in @ref debug.h for details.
-  - You must use a dedicated connection for getting and releasing locks. Do
-  not issue a get_lock() or release_lock() in the same connection (thread) as
-  code that calls BACKUP_BREAKPOINT(). Using the same connection to get/release
-  locks and run code that issues BACKUP_BREAKPOINTs will result in an
-  assertion using DEBUG_ASSERT(thd->ull == NULL) from debug_sync_point() in
-  item_func.cc.
-
- */
-
-/*
-  Consider: set thd->proc_info when waiting on lock
-*/
-#define BACKUP_BREAKPOINT(S) \
- do { \
-  DBUG_PRINT("backup",("== breakpoint on '%s' ==",(S))); \
-  DBUG_EXECUTE_IF("backup_debug", DBUG_SYNC_POINT((S),BACKUP_BREAKPOINT_TIMEOUT);); \
- } while (0)
-
-#endif

=== modified file 'sql/backup/error.h'
--- a/sql/backup/error.h	2008-10-03 14:15:40 +0000
+++ b/sql/backup/error.h	2008-12-18 21:46:36 +0000
@@ -5,9 +5,9 @@ namespace util {
 
 /// Used to save messages pushed into the stack
 struct SAVED_MYSQL_ERROR {
-  uint code;
-  MYSQL_ERROR::enum_warning_level level;
-  char *msg;
+  uint code;                             ///< error code
+  MYSQL_ERROR::enum_warning_level level; ///< warning level
+  char *msg;                             ///< error message
 };
 
 

=== modified file 'sql/backup/image_info.cc'
--- a/sql/backup/image_info.cc	2008-11-26 10:05:19 +0000
+++ b/sql/backup/image_info.cc	2008-12-18 21:46:36 +0000
@@ -274,9 +274,9 @@ Image_info::Dbobj* Image_info::get_db_ob
 /**
   Add table to the catalogue.
 
-  @param[in]  db  table's database - this database must already be in 
+  @param[in] db   table's database - this database must already be in 
                   the catalogue
-  @param[in] name name of the table
+  @param[in] table_name name of the table
   @param[in] snap snapshot containing table's data
   @param[in] pos  table's position within the snapshot
 
@@ -286,7 +286,7 @@ Image_info::Dbobj* Image_info::get_db_ob
   @note The snapshot is added to the catalogue if it was not there already.
 
   @see @c get_table().
- */
+*/
 Image_info::Table* 
 Image_info::add_table(Db &db, const ::String &table_name, 
                       Snapshot_info &snap, ulong pos)

=== modified file 'sql/backup/image_info.h'
--- a/sql/backup/image_info.h	2008-12-10 15:53:06 +0000
+++ b/sql/backup/image_info.h	2009-01-08 14:57:41 +0000
@@ -67,66 +67,71 @@ public: // public interface
 
    // datatypes
    
-   typedef enum_bstream_item_type obj_type;
+  /// The object type from the stream item.
+  typedef enum_bstream_item_type obj_type;
 
-   class Obj;   ///< Base for all object classes.
-   class Ts;    ///< Class representing a tablespace.
-   class Db;    ///< Class representing a database.
-   class Table; ///< Class representing a table.
-   class Dbobj; ///< Class representing a per-database object other than table.
+  class Obj;   ///< Base for all object classes.
+  class Ts;    ///< Class representing a tablespace.
+  class Db;    ///< Class representing a database.
+  class Table; ///< Class representing a table.
+  class Dbobj; ///< Class representing a per-database object other than table.
 
-   class Iterator;      ///< Base for all iterators.
-   class Ts_iterator;   ///< Iterates over all tablespaces.
-   class Db_iterator;   ///< Iterates over all databases.
-   class Dbobj_iterator;  ///< Iterates over objects in a database.
+  class Iterator;      ///< Base for all iterators.
+  class Ts_iterator;   ///< Iterates over all tablespaces.
+  class Db_iterator;   ///< Iterates over all databases.
+  class Dbobj_iterator;  ///< Iterates over objects in a database.
 
-   virtual ~Image_info();
+  virtual ~Image_info();
  
-   // info about image (most of it is in the st_bstream_image_header base
+  // info about image (most of it is in the st_bstream_image_header base
 
-   size_t     data_size;      ///< How much of table data is saved in the image.
-   st_bstream_binlog_pos  master_pos; ///< To store master position info.
+  virtual bool is_valid() =0;  ///< Is the structure valid?
 
-   ulong      table_count() const;
-   uint       db_count() const;
-   uint       ts_count() const;
-   ushort     snap_count() const;
+  size_t     data_size;      ///< How much of table data is saved in the image.
+  st_bstream_binlog_pos  master_pos; ///< To store master position info.
 
-   // Examine contents of the catalogue.
+  ulong      table_count() const;
+  uint       db_count() const;
+  uint       ts_count() const;
+  ushort     snap_count() const;
 
-   bool has_db(const String&) const;
+  // Examine contents of the catalogue.
 
-   // Retrieve objects using their coordinates.
+  bool has_db(const String&) const;
 
-   Db*    get_db(uint pos) const;
-   Ts*    get_ts(uint pos) const;
-   Dbobj* get_db_object(uint db_num, ulong pos) const;
-   Table* get_table(ushort snap_num, ulong pos) const;
+  // Retrieve objects using their coordinates.
 
-   // Iterators for enumerating the contents of the archive.
+  Db*    get_db(uint pos) const;
+  Ts*    get_ts(uint pos) const;
+  Dbobj* get_db_object(uint db_num, ulong pos) const;
+  Table* get_table(ushort snap_num, ulong pos) const;
 
-   Db_iterator*     get_dbs() const;
-   Ts_iterator*     get_tablespaces() const;
-   Dbobj_iterator*  get_db_objects(const Db &db) const;
+  // Iterators for enumerating the contents of the archive.
 
-   /**
-     Pointers to @c Snapshot_info objects corresponding to the snapshots
-     present in the image.
-    */ 
-   Snapshot_info *m_snap[MAX_SNAP_COUNT];
+  Db_iterator*     get_dbs() const;
+  Ts_iterator*     get_tablespaces() const;
+  Dbobj_iterator*  get_db_objects(const Db &db) const;
+
+  /**
+    Pointers to @c Snapshot_info objects corresponding to the snapshots
+    present in the image.
+   */ 
+  Snapshot_info *m_snap[MAX_SNAP_COUNT];
    
-   // save timing & binlog info 
+  // save timing & binlog info 
    
-   void save_start_time(const time_t time);   
-   void save_end_time(const time_t time);
-   void save_vp_time(const time_t time);   
+  void save_start_time(const time_t time);   
+  void save_end_time(const time_t time);
+  void save_vp_time(const time_t time);   
+
+  void save_binlog_pos(const ::LOG_INFO&);
+  /// Save the master's binlog position.
+  void save_master_pos(const ::Master_info&);
 
-   void save_binlog_pos(const ::LOG_INFO&);
-   void save_master_pos(const ::Master_info&);
+  /// Return the validity point time.
+  time_t get_vp_time() const;
 
-   time_t get_vp_time() const;
-
- protected: // internal interface
+protected: // internal interface
   
   // Populate the catalogue
   
@@ -140,15 +145,15 @@ public: // public interface
 
  // IMPLEMENTATION
 
- protected:
+protected:
 
   Image_info();
-  uint m_table_count;
+  uint m_table_count;    ///< Number of tables in the image.
   MEM_ROOT  mem_root;    ///< Memory root for storage of catalogue items.
 
   class Tables; ///< Implementation of Table_list interface. 
 
- private:
+private:
 
   Map<uint, Db>   m_dbs; ///< Pointers to Db instances.
   Map<uint, Ts>   m_ts_map; ///< Pointers to Ts instances.
@@ -181,7 +186,7 @@ class Image_info::Tables:
 {
   typedef Map<uint, Image_info::Table> Base;
  
- public:
+public:
 
   Tables(ulong, ulong);
   ulong count() const;
@@ -222,19 +227,17 @@ Image_info::Tables::Tables(ulong init_si
  */
 class Snapshot_info
 {
- public:
+public:
 
+  /// Enumeration for snapshot type
   enum enum_snap_type {
-    /** snapshot created by native backup engine. */
-    NATIVE_SNAPSHOT= BI_NATIVE,
-    /** Snapshot created by built-in, blocking backup engine. */
-    DEFAULT_SNAPSHOT= BI_DEFAULT,
-    /** Snapshot created by built-in CS backup engine. */
-    CS_SNAPSHOT= BI_CS,
-    /** snapshot created by No data backup driver. */
-    NODATA_SNAPSHOT= BI_NODATA
+    NATIVE_SNAPSHOT= BI_NATIVE,   ///< created by native backup engine.
+    DEFAULT_SNAPSHOT= BI_DEFAULT, ///< created by blocking backup engine.
+    CS_SNAPSHOT= BI_CS,           ///< created by CS backup engine.
+    NODATA_SNAPSHOT= BI_NODATA    ///< created by No data backup engine.
   };
 
+  /// Enumeration for snapshot type type
   virtual enum_snap_type type() const =0; 
   version_t version() const; ///< Returns version of snapshot's format.
   
@@ -273,14 +276,16 @@ class Snapshot_info
   /// Create restore driver for the image.
   virtual result_t get_restore_driver(Restore_driver*&) =0;
 
+  /// Destructor
   virtual ~Snapshot_info();
 
   Image_info::Table* get_table(ulong pos) const;
 
- protected:
+protected:
  
   version_t m_version; ///< Stores version number of the snapshot's format.
 
+  /// Constructor
   Snapshot_info(const version_t);
 
   // Methods for adding and accessing tables stored in the table list.
@@ -330,7 +335,7 @@ Snapshot_info::~Snapshot_info()
 */
 class Image_info::Obj: public Sql_alloc
 {
- public:
+public:
  
   /* 
     Note: Since we are using Sql_alloc and allocate instances using MEM_ROOT,
@@ -338,6 +343,7 @@ class Image_info::Obj: public Sql_alloc
    */
   virtual ~Obj();
 
+  /// The type of the object
   obj_type type() const;
 
   /**
@@ -346,20 +352,19 @@ class Image_info::Obj: public Sql_alloc
    */ 
   virtual const st_bstream_item_info* info() const =0;
 
-  /**
-    Pointer to the corresponding @c obs::Obj instance, if it is known.
-   */ 
+  /// Pointer to the corresponding @c obs::Obj instance, if it is known.
   obs::Obj  *m_obj_ptr;
-  
-  /**
-    Create corresponding @c obs::Obj instance from a serialization string.
-   */ 
+
+  /// Create corresponding @c obs::Obj instance from a serialization string.
   virtual obs::Obj *materialize(uint ver, const ::String&) =0;
 
+  /// Definition for a table name in a Table_ref object.
   typedef Table_ref::name_buf describe_buf;
+
+  /// Description of object.
   virtual const char* describe(describe_buf&) const =0;
 
- protected:
+protected:
 
   String m_name;  ///< For storing object's name.
 
@@ -389,13 +394,17 @@ class Image_info::Ts
  : public st_bstream_ts_info,
    public Image_info::Obj
 {
- public:
+public:
 
+  /// Constructor
   Ts(const ::String&);
 
+  /// The information about the image.
   const st_bstream_item_info* info() const;
+  /// The information about the tablespace.
   const st_bstream_ts_info* ts_info() const;
   obs::Obj* materialize(uint ver, const ::String &sdata);
+  /// Description of object.
   const char* describe(describe_buf&) const;
 };
 
@@ -418,20 +427,23 @@ class Image_info::Db
 {
   ulong m_obj_count;    ///< Number of non-table objects in the database.
 
- public:
+public:
 
+  /// Constuctor
   Db(const ::String&);
 
   const st_bstream_item_info* info() const;
+  /// The database information in the stream.
   const st_bstream_db_info* db_info() const;
   ulong obj_count() const;
   obs::Obj* materialize(uint ver, const ::String &sdata);
   result_t add_obj(Dbobj&, ulong pos);
   Dbobj*   get_obj(ulong pos) const;
   void add_table(Table&);
+  /// Description of object.
   const char* describe(describe_buf&) const;
 
- private:
+private:
  
   Table *first_table; ///< Pointer to the first table in database's table list. 
   Table *last_table;  ///< Pointer to the last table in database's table list.
@@ -470,12 +482,14 @@ class Image_info::Dbobj
 {
   const Db &m_db;     ///< The database to which this obj belongs.
 
- public:
+public:
 
+  /// Constructor
   Dbobj(const Db &db, const obj_type type, const ::String &name);
 
   const st_bstream_item_info* info() const;
   obs::Obj* materialize(uint ver, const ::String &sdata);
+  /// Description of object.
   const char* describe(Obj::describe_buf&) const;
 
   friend class Db;
@@ -506,12 +520,14 @@ class Image_info::Table
   Table  *next_table; ///< Used to crate a linked list of tables in a database.
   TABLE_LIST  *m_table; ///< If not NULL, points at opened table.
 
- public:
+public:
 
+  /// Constructor
   Table(const Db &db, const ::String &name);
 
   const st_bstream_item_info* info() const;
   obs::Obj* materialize(uint ver, const ::String &sdata);
+  /// Description of object.
   const char* describe(Obj::describe_buf&) const;
 
   friend class Db;
@@ -558,8 +574,9 @@ Image_info::Table::Table(const Db &db, c
  */ 
 class Image_info::Iterator
 {
- public:
+public:
 
+  /// Constructor
   Iterator(const Image_info &info);
   virtual ~Iterator();
 
@@ -572,13 +589,15 @@ class Image_info::Iterator
    */
   virtual int init() { return 0; }  
 
+  /// The increment operation.
   Obj* operator++(int);
 
- protected:
+protected:
 
+  /// The image information class pointer.
   const Image_info &m_info;
 
- private:
+private:
 
   /** 
     Return pointer to the current object of the iterator.
@@ -615,14 +634,15 @@ Image_info::Iterator::~Iterator()
 class Image_info::Ts_iterator
  : public Image_info::Iterator
 {
- public:
+public:
 
+  /// Constructor  
   Ts_iterator(const Image_info&);
 
- protected:
+protected:
 
-  uint pos;
-  Obj* get_ptr() const;
+  uint pos;              ///< position in the iterator
+  Obj* get_ptr() const;  
   bool next();
 };
 
@@ -643,13 +663,14 @@ Image_info::Ts_iterator::Ts_iterator(con
 class Image_info::Db_iterator
  : public Image_info::Iterator
 {
- public:
+public:
 
+  /// Constructor
   Db_iterator(const Image_info&);
 
- protected:
+protected:
 
-  uint pos;
+  uint pos;  ///< Position in the iterator
   Obj* get_ptr() const;
   bool next();
 };
@@ -676,11 +697,12 @@ class Image_info::Dbobj_iterator
   Table *ptr;
   ulong pos;
 
- public:
+public:
 
+  /// Constructor
   Dbobj_iterator(const Image_info&, const Db&);
 
- private:
+private:
 
   Obj* get_ptr() const;
   bool next();
@@ -971,13 +993,13 @@ const st_bstream_item_info* Image_info::
   return &base; 
 }
 
+/// Implementation of Image_info::ts_info virtual method.
 inline
 const st_bstream_ts_info* Image_info::Ts::ts_info() const 
 {
   return this; 
 }
 
-
 /// Implementation of @c Image_info::Obj virtual method.
 inline
 const st_bstream_item_info* Image_info::Table::info() const 
@@ -993,7 +1015,11 @@ const st_bstream_item_info* Image_info::
 }
 
 
-/// Implementation of @c Image_info::Obj virtual method.
+/**
+  Implementation of @c Image_info::Obj virtual method.
+
+  @param[in] buf  The buffer for the desciption info.
+*/
 inline
 const char* Image_info::Ts::describe(describe_buf &buf) const
 {

=== modified file 'sql/backup/kernel.cc'
--- a/sql/backup/kernel.cc	2009-01-14 10:10:00 +0000
+++ b/sql/backup/kernel.cc	2009-01-21 15:06:10 +0000
@@ -118,10 +118,10 @@ static int send_reply(Backup_restore_ctx
 /**
   Call backup kernel API to execute backup related SQL statement.
 
-  @param[IN] thd        current thread object reference.
-  @param[IN] lex        results of parsing the statement.
-  @param[IN] backupdir  value of the backupdir variable from server.
-  @param[IN] overwrite  whether or not restore should overwrite existing
+  @param[in] thd        current thread object reference.
+  @param[in] lex        results of parsing the statement.
+  @param[in] backupdir  value of the backupdir variable from server.
+  @param[in] overwrite  whether or not restore should overwrite existing
                         DB with same name as in backup image
 
   @note This function sends response to the client (ok, result set or error).
@@ -208,12 +208,6 @@ execute_backup_command(THD *thd, LEX *le
   case SQLCOM_RESTORE:
   {
 
-    /*
-      Restore cannot be run on a slave while connected to a master.
-    */
-    if (obs::is_slave())
-      DBUG_RETURN(send_error(context, ER_RESTORE_ON_SLAVE));
-
     Restore_info *info= context.prepare_for_restore(backupdir, lex->backup_dir, 
                                                     thd->query);
     
@@ -276,8 +270,6 @@ int send_error(Backup_restore_ctx &conte
     va_end(args);
   }
 
-  if (context.backup::Logger::m_state == backup::Logger::RUNNING)
-    context.report_stop(my_time(0), FALSE); // FASLE = no success
   return error_code;
 }
 
@@ -349,7 +341,7 @@ namespace backup {
 */
 class Mem_allocator
 {
- public:
+public:
 
   Mem_allocator();
   ~Mem_allocator();
@@ -357,7 +349,7 @@ class Mem_allocator
   void* alloc(size_t);
   void  free(void*);
 
- private:
+private:
 
   struct node;
   node *first;  ///< Pointer to the first segment in the list.
@@ -382,9 +374,9 @@ pthread_mutex_t Backup_restore_ctx::run_
 
 Backup_restore_ctx::Backup_restore_ctx(THD *thd)
  :Logger(thd), m_state(CREATED), m_thd_options(thd->options),
-  m_error(0), m_remove_loc(FALSE), m_stream(NULL),
+  m_error(0), m_stream(NULL),
   m_catalog(NULL), mem_alloc(NULL), m_tables_locked(FALSE),
-  m_engage_binlog(FALSE)
+  m_engage_binlog(FALSE), m_completed(FALSE)
 {
   /*
     Check for progress tables.
@@ -413,8 +405,8 @@ Backup_restore_ctx::~Backup_restore_ctx(
     2. If orig_loc has a hard path, use it.
     3. If orig_loc has no path, append to backupdir
 
-  @param[IN]  backupdir  The backupdir system variable value.
-  @param[IN]  orig_loc   The path + file name specified in the backup command.
+  @param[in]  backupdir  The backupdir system variable value.
+  @param[in]  orig_loc   The path + file name specified in the backup command.
 
   @returns 0
 */
@@ -489,7 +481,7 @@ int Backup_restore_ctx::prepare_path(::S
 
   @returns 0 on success, error code otherwise.
  */ 
-int Backup_restore_ctx::prepare(String *backupdir, LEX_STRING location)
+int Backup_restore_ctx::prepare(::String *backupdir, LEX_STRING location)
 {
   if (m_error)
     return m_error;
@@ -598,9 +590,14 @@ Backup_restore_ctx::prepare_for_backup(S
 {
   using namespace backup;
   
+  // Do nothing if context is in error state.
   if (m_error)
     return NULL;
   
+  /*
+   Note: Logger must be initialized before any call to report_error() - 
+   otherwise an assertion will fail.
+  */ 
   if (Logger::init(BACKUP, query))      // Logs errors
   {
     fatal_error(ER_BACKUP_LOGGER_INIT);
@@ -631,9 +628,6 @@ Backup_restore_ctx::prepare_for_backup(S
     return NULL;
   }
 
-  // Mark that the file should be removed unless operation completes successfuly
-  m_remove_loc= TRUE;
-
   int my_open_status= s->open();
   if (my_open_status != 0)
   {
@@ -701,15 +695,35 @@ Backup_restore_ctx::prepare_for_restore(
 {
   using namespace backup;  
 
+
+  // Do nothing if context is in error state.
   if (m_error)
     return NULL;
   
+  /*
+   Note: Logger must be initialized before any call to report_error() - 
+   otherwise an assertion will fail.
+  */ 
   if (Logger::init(RESTORE, query))
   {
     fatal_error(ER_BACKUP_LOGGER_INIT);
     return NULL;
   }
 
+  /*
+    Block replication from starting.
+  */
+  obs::block_replication(TRUE, "RESTORE");
+
+  /*
+    Restore cannot be run on a slave while connected to a master.
+  */
+  if (obs::is_slave())
+  {
+    fatal_error(report_error(ER_RESTORE_ON_SLAVE));
+    return NULL;
+  }
+
   time_t when= my_time(0);  
   report_start(when);
 
@@ -840,15 +854,17 @@ Backup_restore_ctx::prepare_for_restore(
 */ 
 int Backup_restore_ctx::lock_tables_for_restore()
 {
-  TABLE_LIST *tables= NULL;
   int ret;
 
   /*
-    Iterate over all tables in all snapshots and create a linked TABLE_LIST
-    for call to open_and_lock_tables(). Store pointers to TABLE_LIST structures
-    in the restore catalogue for later access to opened tables.
-  */ 
+    Iterate over all tables in all snapshots and create a linked
+    TABLE_LIST for call to open_and_lock_tables(). Remember the
+    TABLE_LIST for later use in Backup_restore_ctx::unlock_tables().
+    Store pointers to TABLE_LIST structures in the restore catalogue for
+    later access to opened tables.
+  */
 
+  m_backup_tables= NULL;
   for (uint s= 0; s < m_catalog->snap_count(); ++s)
   {
     backup::Snapshot_info *snap= m_catalog->m_snap[s];
@@ -865,7 +881,8 @@ int Backup_restore_ctx::lock_tables_for_
         return fatal_error(log_error(ER_OUT_OF_RESOURCES));
       }
 
-      tables= backup::link_table_list(*ptr, tables); // Never errors
+      m_backup_tables= backup::link_table_list(*ptr, m_backup_tables);
+      DBUG_ASSERT(m_backup_tables);
       tbl->m_table= ptr;
     }
   }
@@ -879,7 +896,7 @@ int Backup_restore_ctx::lock_tables_for_
     Note 2: Skiping tmp tables is also important because otherwise a tmp table
     can occlude a regular table with the same name (BUG#33574).
   */ 
-  ret= open_and_lock_tables_derived(m_thd, tables,
+  ret= open_and_lock_tables_derived(m_thd, m_backup_tables,
                                     FALSE, /* do not process derived tables */
                                     MYSQL_OPEN_SKIP_TEMPORARY 
                                           /* do not open tmp tables */
@@ -902,6 +919,22 @@ void Backup_restore_ctx::unlock_tables()
 
   DBUG_PRINT("restore",("unlocking tables"));
 
+  /*
+    Refresh tables that have been restored. Some restore drivers might
+    restore a table layout that differs from the version created by
+    materialize(). We need to force a final close after restore with
+    close_cached_tables(). Note that we do this before we unlock the
+    tables. Otherwise other threads could use the still open tables
+    before we refresh them.
+
+    For information about a concrete problem, see the comment in
+    myisam_backup_engine.cc:Table_restore::close().
+
+    Use the restore table list as created by lock_tables_for_restore().
+  */
+  if (m_backup_tables)
+    close_cached_tables(m_thd, m_backup_tables, FALSE, FALSE);
+
   close_thread_tables(m_thd);                   // Never errors
   m_tables_locked= FALSE;
 
@@ -928,19 +961,51 @@ int Backup_restore_ctx::close()
 
   using namespace backup;
 
+  // Move context to error state if the catalog became corrupted.
+  if (m_catalog && !m_catalog->is_valid())
+    fatal_error(m_type == BACKUP ? ER_BACKUP_BACKUP : ER_BACKUP_RESTORE);
+
+  /*
+    Report end of the operation which has started if it has not been done 
+    before (Logger is in RUNNING state). 
+  */ 
+  if (Logger::m_state == RUNNING)
+  {
+    time_t  now= my_time(0);
+    if (m_catalog)
+      m_catalog->save_end_time(now);
+
+    // Report either completion or interruption depending on m_completed flag.
+    if (m_completed)
+      report_completed(now);
+    else
+    {
+      /*
+        If this is restore operation then m_data_changed flag in the 
+        Restore_info object tells if data has been modified or not.
+       */ 
+      const bool data_changed= m_type==RESTORE && m_catalog && 
+                         static_cast<Restore_info*>(m_catalog)->m_data_changed;
+      report_aborted(now, data_changed);
+    }
+  }
+
   /*
     Allow slaves connect after restore is complete.
   */
   obs::disable_slave_connections(FALSE);
 
   /*
+    Allow replication to start after restore is complete.
+  */
+  obs::block_replication(FALSE, "");
+
+  /*
     Turn binlog back on iff it was turned off earlier.
   */
   if (m_engage_binlog)
     obs::engage_binlog(TRUE);
 
-  time_t when= my_time(0);
-
   // unlock tables if they are still locked
   unlock_tables();                              // Never errors
 
@@ -951,7 +1016,7 @@ int Backup_restore_ctx::close()
 
   m_thd->options= m_thd_options;
 
-  // close stream
+  // close stream if not closed already (in which case m_steam is NULL)
 
   if (m_stream && !m_stream->close())
   {
@@ -959,16 +1024,13 @@ int Backup_restore_ctx::close()
     fatal_error(report_error(ER_BACKUP_CLOSE));
   }
 
-  if (m_catalog)
-    m_catalog->save_end_time(when); // Note: no errors.
-
   /* 
-    Remove the location, if asked for.
+    Remove the location if it is BACKUP operation and it has not completed
+    successfully.
     
-    Important: This is done only for backup operation - RESTORE should never
-    remove the specified backup image!
+    Important: RESTORE should never remove the specified backup image!
    */
-  if (m_remove_loc && m_state == PREPARED_FOR_BACKUP)
+  if (m_state == PREPARED_FOR_BACKUP && !m_completed)
   {
     int ret= my_delete(m_path.c_ptr(), MYF(0));
 
@@ -976,17 +1038,8 @@ int Backup_restore_ctx::close()
       Ignore ENOENT error since it is ok if the file doesn't exist.
      */
     if (ret && my_errno != ENOENT)
-      fatal_error(report_error(ER_CANT_DELETE_FILE, m_path.c_ptr(), my_errno));
-  }
-
-  /* We report completion of the operation only if no errors were detected,
-     and logger has been initialized.
-  */
-  if (!m_error)
-  {
-    if (backup::Logger::m_state == backup::Logger::RUNNING)
     {
-      report_stop(when, TRUE);
+      fatal_error(report_error(ER_CANT_DELETE_FILE, m_path.c_ptr(), my_errno));
     }
   }
 
@@ -1065,17 +1118,14 @@ int Backup_restore_ctx::do_backup()
   if (ret)
     DBUG_RETURN(fatal_error(report_error(ER_BACKUP_WRITE_SUMMARY)));
 
-  /*
-    Now backup image has been written. Set m_remove_loc to FALSE, so that the
-    backup file is not removed in Backup_restore_ctx::close().
-  */
-  m_remove_loc= FALSE;
+  DEBUG_SYNC(m_thd, "before_backup_completed");
+  m_completed= TRUE;
   report_stats_post(info);                      // Never errors
 
   DBUG_PRINT("backup",("Backup done."));
   DEBUG_SYNC(m_thd, "before_backup_done");
 
-  DBUG_RETURN(0);
+  DBUG_RETURN(close());
 }
 
 /**
@@ -1100,11 +1150,13 @@ int Backup_restore_ctx::restore_triggers
 
   DBUG_ENTER("restore_triggers_and_events");
 
+  DBUG_ASSERT(m_type == RESTORE);
+  Restore_info *info= static_cast<Restore_info*>(m_catalog);
   Image_info::Obj *obj;
   List<Image_info::Obj> events;
   Image_info::Obj::describe_buf buf;
 
-  Image_info::Iterator *dbit= m_catalog->get_dbs();
+  Image_info::Iterator *dbit= info->get_dbs();
   if (!dbit)
     DBUG_RETURN(fatal_error(report_error(ER_OUT_OF_RESOURCES)));
 
@@ -1113,7 +1165,7 @@ int Backup_restore_ctx::restore_triggers
   while ((obj= (*dbit)++)) 
   {
     Image_info::Iterator *it=
-                    m_catalog->get_db_objects(*static_cast<Image_info::Db*>(obj));
+                       info->get_db_objects(*static_cast<Image_info::Db*>(obj));
     if (!it)
       DBUG_RETURN(fatal_error(report_error(ER_OUT_OF_RESOURCES)));
 
@@ -1131,6 +1183,8 @@ int Backup_restore_ctx::restore_triggers
       
       case BSTREAM_IT_TRIGGER:
         DBUG_ASSERT(obj->m_obj_ptr);
+        // Mark that data is being changed.
+        info->m_data_changed= TRUE;
         if (obj->m_obj_ptr->create(m_thd))
         {
           delete it;
@@ -1155,11 +1209,15 @@ int Backup_restore_ctx::restore_triggers
   Image_info::Obj *ev;
 
   while ((ev= it++)) 
+  {
+    // Mark that data is being changed.
+    info->m_data_changed= TRUE;
     if (ev->m_obj_ptr->create(m_thd))
     {
       int ret= report_error(ER_BACKUP_CANT_RESTORE_EVENT,ev->describe(buf));
       DBUG_RETURN(fatal_error(ret));
     };
+  }
 
   DBUG_RETURN(0);
 }
@@ -1169,7 +1227,7 @@ int Backup_restore_ctx::restore_triggers
 
   @pre @c prepare_for_restore() method was called.
 
-  @param[IN] overwrite whether or not restore should overwrite existing
+  @param[in] overwrite whether or not restore should overwrite existing
                        DB with same name as in backup image
 
   @returns 0 on success, error code otherwise.
@@ -1247,7 +1305,11 @@ int Backup_restore_ctx::do_restore(bool
   if (err)
     DBUG_RETURN(fatal_error(err));
 
-  // Here restore drivers are created to restore table data
+  /* 
+   Here restore drivers are created to restore table data. Data is being
+   (potentially) changed so we set m_data_changed flag.
+  */
+  info.m_data_changed= TRUE;
   err= restore_table_data(m_thd, info, s);      // logs errors
 
   unlock_tables();                              // Never errors
@@ -1277,6 +1339,9 @@ int Backup_restore_ctx::do_restore(bool
 
   DBUG_PRINT("restore",("Done."));
 
+  DEBUG_SYNC(m_thd, "before_restore_completed");
+  m_completed= TRUE;
+
   err= read_summary(info, s);
   if (err)
     DBUG_RETURN(fatal_error(report_error(ER_BACKUP_READ_SUMMARY)));
@@ -1294,7 +1359,7 @@ int Backup_restore_ctx::do_restore(bool
 
   DEBUG_SYNC(m_thd, "before_restore_done");
 
-  DBUG_RETURN(0);
+  DBUG_RETURN(close());
 }
 
 /**
@@ -1342,8 +1407,8 @@ namespace backup {
 /// All allocated memory segments are linked into a list using this structure.
 struct Mem_allocator::node
 {
-  node *prev;
-  node *next;
+  node *prev;   ///< pointer to previous node in list
+  node *next;   ///< pointer to next node in the list
 };
 
 Mem_allocator::Mem_allocator() :first(NULL)
@@ -1462,7 +1527,9 @@ void bstream_free(bstream_byte *ptr)
   (it was read from image's header). Here we create @c Snapshot_info object
   for each of them.
 
-  @rerturns 0 on success, error code otherwise.
+  @param[in]  catalogue  The catalogue to restore.
+
+  @returns 0 on success, error code otherwise.
 */
 extern "C"
 int bcat_reset(st_bstream_image_header *catalogue)
@@ -2025,6 +2092,9 @@ int bcat_create_item(st_bstream_image_he
     }
   }
 
+  // Mark that data is being changed.
+  info->m_data_changed= TRUE;
+
   if (sobj->create(thd))
   {
     log.report_error(create_err, desc);
@@ -2164,6 +2234,15 @@ const char* Table_ref::describe(char *bu
   TODO: remove these functions. Currently they are only used by the myisam 
   native backup engine.
 */
+
+/**
+  Build the table list as a TABLE_LIST.
+
+  @param[in]  tables  The list of tables to convert.
+  @param[in]  lock    The lock type.
+
+  @retval  TABLE_LIST
+*/
 TABLE_LIST *build_table_list(const Table_list &tables, thr_lock_type lock)
 {
   TABLE_LIST *tl= NULL;
@@ -2182,7 +2261,12 @@ TABLE_LIST *build_table_list(const Table
   return tl;
 }
 
-void free_table_list(TABLE_LIST*)
+/**
+  Free the TABLE_LIST.
+
+  @param[in]  tables  The list of tables to free.
+*/
+void free_table_list(TABLE_LIST* tables)
 {}
 
 } // backup namespace

=== modified file 'sql/backup/logger.cc'
--- a/sql/backup/logger.cc	2008-12-10 15:53:06 +0000
+++ b/sql/backup/logger.cc	2008-12-18 21:46:36 +0000
@@ -118,8 +118,9 @@ int Logger::write_message(log_level::val
 /**
   Output message registered in errmsg.txt database.
 
-  @param level       level of the message (INFO,WARNING,ERROR)
-  @param error_code  code assigned to the message in errmsg.txt
+  @param[in] level       level of the message (INFO,WARNING,ERROR)
+  @param[in] error_code  code assigned to the message in errmsg.txt
+  @param[in] args        list of arguments
 
   If the message contains placeholders, additional arguments provide
   values to be put there.
@@ -206,7 +207,7 @@ void Logger::report_stats_post(const Ima
   backup_log->size(info.data_size);
 }
 
-/*
+/**
  Indicate if reported errors should be pushed on the warning stack.
 
  If @c flag is TRUE, errors will be pushed on the warning stack, otherwise

=== modified file 'sql/backup/logger.h'
--- a/sql/backup/logger.h	2009-01-13 15:26:20 +0000
+++ b/sql/backup/logger.h	2009-01-21 15:00:23 +0000
@@ -12,7 +12,8 @@ namespace backup {
 
 /// Logging levels for messages generated by backup system
 struct log_level {
- enum value {
+  /// Enumeration for logging level (info, warn, error) 
+  enum value {
     INFO=    MYSQL_ERROR::WARN_LEVEL_NOTE,
     WARNING= MYSQL_ERROR::WARN_LEVEL_WARN,
     ERROR=   MYSQL_ERROR::WARN_LEVEL_ERROR
@@ -39,11 +40,14 @@ class Image_info;
  */
 class Logger
 {
- public:
+public:
 
-   enum enum_type { BACKUP = 1, RESTORE } m_type;
-   enum { CREATED, READY, RUNNING, DONE } m_state;
+   /// Enumeration for type of operation
+   enum enum_type { BACKUP = 1, RESTORE } m_type;  ///< type of operation
+   /// Enumeration for log state
+   enum { CREATED, READY, RUNNING, DONE } m_state; ///< state of operation
 
+   /// Constructor
    Logger(THD*);
    ~Logger();
    int init(enum_type type, const char *query);
@@ -55,7 +59,8 @@ class Logger
    int log_error(int error_code, ...);
 
    void report_start(time_t);
-   void report_stop(time_t, bool);
+   void report_completed(time_t);
+   void report_aborted(time_t, bool data_changed);
    void report_state(enum_backup_state);
    void report_vp_time(time_t, bool);
    void report_binlog_pos(const st_bstream_binlog_pos&);
@@ -64,6 +69,8 @@ class Logger
    void report_backup_file(char * path);
    void report_stats_pre(const Image_info&);
    void report_stats_post(const Image_info&);
+
+   /// Return the Backup_id of the current operation
    ulonglong get_op_id() const 
    {
      DBUG_ASSERT(backup_log);
@@ -73,7 +80,7 @@ class Logger
    bool push_errors(bool);
    bool error_reported() const;
 
- protected:
+protected:
 
   /// Thread in which this logger is used.
   THD *m_thd;
@@ -82,7 +89,7 @@ class Logger
   int v_write_message(log_level::value, int, const char*, va_list);
   int write_message(log_level::value level , int error_code, const char *msg);
 
- private:
+private:
 
   // Prevent copying/assigments
   Logger(const Logger&);
@@ -188,26 +195,73 @@ void Logger::report_start(time_t when)
   backup_log->state(BUP_RUNNING);
 }
 
+/** 
+  Report that the operation has completed successfully.
+
+  @param[in] when       the time when operation has completed.
+
+  @note This method can be called only after @c report_start(). It can not be
+  called after end of operation has been logged with either this method or
+  @c report_aborted().
+*/
+inline
+void Logger::report_completed(time_t when)
+{
+   DBUG_ASSERT(m_state == RUNNING);
+   DBUG_ASSERT(backup_log);
+
+   report_error(log_level::INFO, m_type == BACKUP ? ER_BACKUP_BACKUP_DONE
+                                                  : ER_BACKUP_RESTORE_DONE);  
+   report_state(BUP_COMPLETE);
+  
+  // Report stop time to backup logs.
+  backup_log->stop(when);
+  /* 
+    Since the operation has completed, we can now write the backup history log
+    entry describing it.
+  */
+  backup_log->write_history();
+}
+
 /**
-  Report end of the operation.
+  Report that backup/restore operation has been aborted.
+
+  @param[in] when  time when the operation has ended.
+  @param[in] data_changed  tells if data has been already modified in case
+                           this is restore operation.
+
+  This method should be called when backup/restore operation has been aborted
+  before its completion, e.g., because of an error or user interruption.
+  
+  The method will log the stop time and ER_OPERATION_ABORTED warning.
+  However, if a restore operation has been interrupted and @c data_changed flag
+  is true, ER_OPERATION_ABORTED_CORRUPTED warning will be logged, to warn the 
+  user about the possibility of data corruption.
   
-  @param[in] success indicates if the operation ended successfuly
+  @note This method must be called after @c report_start(). It can not be
+  called after end of operation has been logged with either this method or
+  @c report_completed().
  */
 inline
-void Logger::report_stop(time_t when, bool success)
+void Logger::report_aborted(time_t when, bool data_changed)
 {
-  if (m_state == DONE)
-    return;
-
   DBUG_ASSERT(m_state == RUNNING);
   DBUG_ASSERT(backup_log);
 
-  report_error(log_level::INFO, m_type == BACKUP ? ER_BACKUP_BACKUP_DONE
-                                                 : ER_BACKUP_RESTORE_DONE);  
+ if (m_type == RESTORE && data_changed)
+   report_error(log_level::WARNING, ER_OPERATION_ABORTED_CORRUPTED);
+ else
+   report_error(log_level::WARNING, ER_OPERATION_ABORTED);
+
+  // Report stop time to backup logs.
 
   backup_log->stop(when);
-  backup_log->state(success ? BUP_COMPLETE : BUP_ERRORS);
+  /* 
+    Since the operation has ended, we can now write the backup history log
+    entry describing it.
+  */
   backup_log->write_history();
+
   m_state= DONE;
 }
 
@@ -231,8 +285,8 @@ void Logger::report_state(enum_backup_st
 /** 
   Report validity point creation time.
 
-  @param[IN] when   the time of validity point
-  @param[IN] report determines if VP time should be also reported in the
+  @param[in] when   the time of validity point
+  @param[in] report determines if VP time should be also reported in the
                     backup_progress log
 */
 inline
@@ -323,6 +377,11 @@ int Logger::init(enum_type type, const c
   return 0;
 }
 
+/**
+  Error reported
+
+  Returns TRUE if error is reported.
+*/
 inline
 bool Logger::error_reported() const
 {

=== modified file 'sql/backup/restore_info.h'
--- a/sql/backup/restore_info.h	2008-11-25 17:44:19 +0000
+++ b/sql/backup/restore_info.h	2009-01-08 14:57:41 +0000
@@ -31,20 +31,21 @@ int restore_table_data(THD*, Restore_inf
 
 class Restore_info: public backup::Image_info
 {
- public:
+public:
 
-  backup::Logger &m_log;
+  backup::Logger &m_log;  ///< Pointer to logger class
 
   ~Restore_info();
 
-  bool is_valid() const;
+  /// Determine of information class is valid.
+  bool is_valid();
 
   Image_info::Ts* add_ts(const ::String&, uint);
   Image_info::Db* add_db(const ::String&, uint);
   Image_info::Table* add_table(Image_info::Db&, const ::String&, 
                                backup::Snapshot_info&, ulong);
 
- private:
+private:
 
   /*
     Note: constructor is private because instances of this class are supposed
@@ -58,6 +59,9 @@ class Restore_info: public backup::Image
 
   THD *m_thd;
 
+  /// A flag to indicate that data has been modified during restore operation.
+  bool m_data_changed;
+
   friend int backup::restore_table_data(THD*, Restore_info&, 
                                         backup::Input_stream&);
   friend int ::bcat_add_item(st_bstream_image_header*,
@@ -71,7 +75,7 @@ class Restore_info: public backup::Image
 
 inline
 Restore_info::Restore_info(backup::Logger &log, THD *thd)
-  :m_log(log), m_thd(thd)
+  :m_log(log), m_thd(thd), m_data_changed(FALSE)
 {}
 
 inline
@@ -85,7 +89,7 @@ Restore_info::~Restore_info()
 }
 
 inline
-bool Restore_info::is_valid() const
+bool Restore_info::is_valid()
 {
   return TRUE; 
 }

=== modified file 'sql/backup/stream.cc'
--- a/sql/backup/stream.cc	2008-12-24 10:48:24 +0000
+++ b/sql/backup/stream.cc	2009-01-29 21:17:59 +0000
@@ -247,6 +247,12 @@ int Stream::open()
   return 0;
 }
 
+/**
+  Close stream
+
+  @retval TRUE success 
+  @retval FALSE failure
+*/
 bool Stream::close()
 {
   bool ret= TRUE;
@@ -261,6 +267,11 @@ bool Stream::close()
   return ret;
 }
 
+/**
+  Rewind stream
+
+  @returns int result of seek to start of stream
+*/
 bool Stream::rewind()
 {
 #ifdef HAVE_COMPRESS

=== modified file 'sql/backup/stream.h'
--- a/sql/backup/stream.h	2008-12-24 10:48:24 +0000
+++ b/sql/backup/stream.h	2009-01-29 21:17:59 +0000
@@ -33,8 +33,10 @@
 
 namespace backup {
 
+/// Structure for storing stream results.
 struct stream_result
 {
+  /// Enumeration of stream result values.
   enum value {
     OK= BSTREAM_OK,
     EOC= BSTREAM_EOC,
@@ -53,15 +55,16 @@ extern "C" int stream_read(void *instanc
 
  ****************************************************/
 
+/// Structure for storing information about the file stream.
 struct fd_stream: public backup_stream
 {
-  int m_fd;
-  size_t bytes;
-  uchar m_header_buf[10];
-  bool m_with_compression;
+  int m_fd;                 ///< file descriptor
+  size_t bytes;             ///< bytes read
+  uchar m_header_buf[10];   ///< header buffer
+  bool m_with_compression;  ///< switch to use compression
 #ifdef HAVE_COMPRESS
-  z_stream zstream;
-  uchar *zbuf;
+  z_stream zstream;         ///< the compression stream
+  uchar *zbuf;              ///< compression buffer
 #endif
   
   fd_stream() :m_fd(-1), bytes(0) {}
@@ -77,7 +80,7 @@ struct fd_stream: public backup_stream
 */
 class Stream: public fd_stream
 {
- public:
+public:
 
   int open();
   virtual bool close();
@@ -91,17 +94,18 @@ class Stream: public fd_stream
   virtual ~Stream()
   { close(); }
 
- protected:
+protected:
 
+  /// Constructor
   Stream(Logger&, ::String *, int);
 
-  ::String  *m_path;
-  int     m_flags;  ///< flags used when opening the file
-  size_t  m_block_size;
-  Logger&  m_log;
-
-  virtual File get_file() = 0; // Create or open file
+  ::String  *m_path;    ///< path for file
+  int     m_flags;      ///< flags used when opening the file
+  size_t  m_block_size; ///< block size for data stream
+  Logger&  m_log;       ///< reference to logger class
 
+  /// Create or open file
+  virtual File get_file() = 0; 
 
   friend int stream_write(void*, bstream_blob*, bstream_blob);
   friend int stream_read(void*, bstream_blob*, bstream_blob);
@@ -113,34 +117,34 @@ private:
 };
 
 /// Used to write to backup stream.
-class Output_stream:
-  public Stream
+class Output_stream: public Stream
 {
- public:
+public:
 
+  /// Constructor
   Output_stream(Logger&, ::String *, bool);
 
   int open();
   bool close();
   bool rewind();
 
- protected:
+protected:
 
   virtual File get_file();
 
- private:
+private:
 
   int write_magic_and_version();
   bool init();
 };
 
 /// Used to read from backup stream.
-class Input_stream:
-  public Stream
+class Input_stream: public Stream
 {
- public:
+public:
 
-   Input_stream(Logger&, ::String *);
+  /// Constructor
+  Input_stream(Logger&, ::String *);
 
   int open();
   bool close();
@@ -148,11 +152,11 @@ class Input_stream:
 
   int next_chunk();
 
- protected:
+protected:
 
   virtual File get_file();
 
- private:
+private:
 
   int check_magic_and_version();
   bool init();
@@ -163,6 +167,14 @@ class Input_stream:
  Wrappers around backup stream functions which perform necessary type conversions.
 */
 
+/**
+  Write the preamble.
+
+  @param[in]  info  The image info.
+  @param[in]  s     The output stream.
+
+  @retval  ERROR if stream error, OK if no errors.
+*/
 inline
 result_t
 write_preamble(const Image_info &info, Output_stream &s)
@@ -175,6 +187,14 @@ write_preamble(const Image_info &info, O
   return ret == BSTREAM_ERROR ? ERROR : OK;
 }
 
+/**
+  Write the summary.
+
+  @param[in]  info  The image info.
+  @param[in]  s     The output stream.
+
+  @retval  ERROR if stream error, OK if no errors.
+*/
 inline
 result_t
 write_summary(const Image_info &info, Output_stream &s)
@@ -187,6 +207,14 @@ write_summary(const Image_info &info, Ou
   return ret == BSTREAM_ERROR ? ERROR : OK;
 }
 
+/**
+  Read the header.
+
+  @param[in]  info  The image info.
+  @param[in]  s     The input stream.
+
+  @retval  ERROR if stream error, OK if no errors.
+*/
 inline
 result_t
 read_header(Image_info &info, Input_stream &s)
@@ -195,6 +223,14 @@ read_header(Image_info &info, Input_stre
   return ret == BSTREAM_ERROR ? ERROR : OK;
 }
 
+/**
+  Read the catalogue.
+
+  @param[in]  info  The image info.
+  @param[in]  s     The input stream.
+
+  @retval  ERROR if stream error, OK if no errors.
+*/
 inline
 result_t
 read_catalog(Image_info &info, Input_stream &s)
@@ -203,6 +239,14 @@ read_catalog(Image_info &info, Input_str
   return ret == BSTREAM_ERROR ? ERROR : OK;
 }
 
+/**
+  Read the metadata.
+
+  @param[in]  info  The image info.
+  @param[in]  s     The input stream.
+
+  @retval  ERROR if stream error, OK if no errors.
+*/
 inline
 result_t
 read_meta_data(Image_info &info, Input_stream &s)
@@ -211,6 +255,14 @@ read_meta_data(Image_info &info, Input_s
   return ret == BSTREAM_ERROR ? ERROR : OK;
 }
 
+/**
+  Read the summary data.
+
+  @param[in]  info  The image info.
+  @param[in]  s     The input stream.
+
+  @retval  ERROR if stream error, OK if no errors.
+*/
 inline
 result_t
 read_summary(Image_info &info, Input_stream &s)

=== modified file 'sql/backup/stream_v1.c'
--- a/sql/backup/stream_v1.c	2008-11-13 13:02:36 +0000
+++ b/sql/backup/stream_v1.c	2008-12-18 21:46:36 +0000
@@ -19,6 +19,7 @@
 # define ASSERT(X)
 #else
 # include <assert.h>
+/// Macro to map assertion call when debug is off.
 # define ASSERT(X) assert(X)
 #endif
 
@@ -32,14 +33,18 @@
 
 /* local types */
 
-typedef unsigned char bool;
-#define TRUE    1
-#define FALSE   0
+typedef unsigned char bool; ///< definition of bool
+#define TRUE    1  ///< definition of true
+#define FALSE   0  ///< definition of false
 
-typedef bstream_byte byte;
-typedef bstream_blob blob;
+typedef bstream_byte byte;  ///< type definition of bstream byte
+typedef bstream_blob blob;  ///< type definition of bstream blob
 
-/* this is needed for seamless compilation on windows */
+/**
+  Macro mapping bzro() to memset().
+  
+  @note This is needed for seamless compilation on windows.
+*/
 #define bzero(A,B)  memset((A),0,(B))
 
 /*
@@ -50,17 +55,20 @@ typedef bstream_blob blob;
   error.
 */
 
+/// Macro to check write result.
 #define CHECK_WR_RES(X) \
   do{\
    if ((ret= X) != BSTREAM_OK) goto wr_error;\
   } while(0)
 
+/// Macro to check read ok return.
 #define CHECK_RD_OK(X) \
  do{\
    if ((ret= X) != BSTREAM_OK)\
     { ret=BSTREAM_ERROR; goto rd_error; }\
  } while(0)
 
+/// Macro to check read result.
 #define CHECK_RD_RES(X) \
  do{\
    if ((ret= X) == BSTREAM_ERROR) goto rd_error;\
@@ -82,6 +90,16 @@ int bstream_write(backup_stream*, bstrea
 int bstream_write_part(backup_stream*, bstream_blob*, bstream_blob);
 int bstream_write_blob(backup_stream*, bstream_blob);
 int bstream_end_chunk(backup_stream*);
+
+/**
+ Flush backup stream`s output buffer to the output stream.
+
+ This empties the output buffer.
+
+ @param[in]  s  The backup stream to flush.
+
+ @returns Status of operation.
+*/
 int bstream_flush(backup_stream*);
 
 int bstream_read(backup_stream*, bstream_blob*);
@@ -108,6 +126,11 @@ byte get_byte_short(short value)
   return (byte)value;
 }
 
+/**
+  Return byte from unsigned short.
+
+  @param[in]  value  Item to be changed to byte type.
+*/
 byte get_byte_ushort(unsigned short value)
 {
   ASSERT(value < 256);
@@ -600,6 +623,7 @@ int bstream_rd_image_info(backup_stream
   @endverbatim
 */
 
+/// Definition of extra data flag bits.
 #define BSTREAM_FLAG_HAS_EXTRA_DATA   0x80
 
 int bstream_wr_db_catalogue(backup_stream*, struct st_bstream_image_header*,
@@ -1394,6 +1418,7 @@ int bstream_rd_meta_data(backup_stream *
   @endverbatim
 */
 
+/// Definition of create statement flag bits.
 #define BSTREAM_FLAG_HAS_CREATE_STMT 0x40
 
 /**
@@ -1441,21 +1466,18 @@ int bstream_wr_meta_item(backup_stream *
   @c (*item). This description is not persistent - next call to this function
   can overwrite it with description of another item.
 
-  @param cat  the catalogue object where items are located
-  @param db   default database for per-db items for which database coordinates
-              are not stored in the entry (i.e., for tables)
-  @param kind  format in which item coordinates are stored
-  @param flags the flags saved in the entry are stored in that location
-  @param item  pointer to a structure describing item found is stored here.
-
+  @param[in] s    the backup stream
+  @param[in] kind  format in which item coordinates are stored
+  @param[in] flags the flags saved in the entry are stored in that location
+  @param[in] item  pointer to a structure describing item found is stored here.
 
   @retval BSTREAM_ERROR  Error while reading
   @retval BSTREAM_OK     Read successful
   @retval BSTREAM_EOC    Read successful and end of chunk has been reached
   @retval BSTREAM_EOS    Read successful and end of stream has been reached
 
-  If function returns BSTREAM_OK and @c (*item) is set to NULL, it means that we
-  are looking at an empty item list.
+  @note If function returns BSTREAM_OK and @c (*item) is set to NULL, it means
+        that we are looking at an empty item list.
 */
 int bstream_rd_meta_item(backup_stream *s,
                          enum enum_bstream_meta_item_kind kind,
@@ -1542,7 +1564,12 @@ int bstream_rd_meta_item(backup_stream *
 /**
   Write entry with given item's meta-data.
 
-  @param kind  determines format in which item's coordinates are saved.
+  @param[in]  s     backup stream.
+  @param[in]  cat   image catalogue.
+  @param[in]  kind  determines format in which item's coordinates are saved.
+  @param[in]  item  stream item to write.
+
+  @returns Status of operation.
 */
 int bstream_wr_item_def(backup_stream *s,
                    struct st_bstream_image_header *cat,

=== modified file 'sql/backup/stream_v1.h'
--- a/sql/backup/stream_v1.h	2008-09-11 09:47:53 +0000
+++ b/sql/backup/stream_v1.h	2008-12-18 21:46:36 +0000
@@ -17,6 +17,7 @@
  *
  *********************************************************************/
 
+/// Definition for backup stream byte type.
 typedef unsigned char bstream_byte;
 
 /**
@@ -34,6 +35,7 @@ struct st_blob
   bstream_byte *end;   /**< one byte after the last byte of the blob */
 };
 
+/// Definition for backup stream blob structure.
 typedef struct st_blob bstream_blob;
 
 /**
@@ -52,6 +54,7 @@ struct st_bstream_time
   unsigned int        year;  /**< years since 1900 */
 };
 
+/// Definition for backup stream time structure.
 typedef struct st_bstream_time bstream_time_t;
 
 /**
@@ -66,7 +69,8 @@ struct st_bstream_binlog_pos
   unsigned long int pos;    /**< position (offset) within the file */
 };
 
-/* struct st_backup_stream is defined below */
+/// Definition for backup stream structure.
+/// @note The struct st_backup_stream is defined below.
 typedef struct st_backup_stream backup_stream;
 
 /** Codes returned by backup stream functions */
@@ -94,10 +98,10 @@ enum enum_bstream_ret_codes {
 */
 struct st_server_version
 {
-  unsigned short int  major;
-  unsigned short int  minor;
-  unsigned short int  release;
-  bstream_blob        extra;
+  unsigned short int  major;   ///< major level
+  unsigned short int  minor;   ///< minor level
+  unsigned short int  release; ///< release level
+  bstream_blob        extra;   ///< extra data about the server
 };
 
 /**
@@ -246,7 +250,7 @@ struct st_bstream_item_info
 */
 struct st_bstream_ts_info
 {
-  struct st_bstream_item_info  base;
+  struct st_bstream_item_info  base;  ///< The base of the info class.
 };
 
 /**
@@ -256,7 +260,7 @@ struct st_bstream_ts_info
 */
 struct st_bstream_db_info
 {
-  struct st_bstream_item_info  base;
+  struct st_bstream_item_info  base;  ///< The base of the info class.
 };
 
 
@@ -297,8 +301,11 @@ struct st_bstream_titem_info
   inside enum_bstream_item_type but defined separately.
 */
 
+/// Definition of backup stream global item index.
 #define BSTREAM_IT_GLOBAL    BSTREAM_IT_LAST
+/// Definition of backup stream per database item index.
 #define BSTREAM_IT_PERDB     (BSTREAM_IT_LAST+1)
+/// Definition of backup stream per table item index.
 #define BSTREAM_IT_PERTABLE  (BSTREAM_IT_LAST+2)
 
 
@@ -334,7 +341,16 @@ int bstream_wr_data_chunk(backup_stream*
                           struct st_bstream_data_chunk*);
 int bstream_wr_summary(backup_stream *s, struct st_bstream_image_header *hdr);
 
-int bstream_flush(backup_stream*);
+/**
+ Flush backup stream`s output buffer to the output stream.
+
+ This empties the output buffer.
+
+ @param[in]  s  The backup stream to flush.
+
+ @returns Status of operation.
+*/
+int bstream_flush(backup_stream* s);
 
 /*********************************************************************
  *
@@ -391,8 +407,11 @@ int bstream_next_chunk(backup_stream*);
  */
 struct st_abstract_stream
 {
-  int (*write)(void*, bstream_blob*, bstream_blob);
+  /// Pointer to write method.
+  int (*write)(void*, bstream_blob*, bstream_blob); 
+  /// Pointer to read method.
   int (*read)(void*, bstream_blob*, bstream_blob);
+  /// Pointer to forward method.
   int (*forward)(void*, unsigned long int*);
 };
 
@@ -402,10 +421,10 @@ struct st_abstract_stream
 */
 struct st_bstream_buffer
 {
-  bstream_byte  *begin;
-  bstream_byte  *pos;
-  bstream_byte  *header;
-  bstream_byte  *end;
+  bstream_byte  *begin;   ///< pointer to start of stream buffer
+  bstream_byte  *pos;     ///< current position in buffer
+  bstream_byte  *header;  ///< pointer to header
+  bstream_byte  *end;     ///< pointer to end of stream buffer
 };
 
 /**
@@ -413,19 +432,21 @@ struct st_bstream_buffer
 */
 struct st_backup_stream
 {
-  struct st_abstract_stream stream;
-  unsigned long int block_size;
-  short int init_block_count;
-  enum { CLOSED,         /* stream has been closed */
-         FIRST_BLOCK,    /* reading/writing the first block of a stream */
-         NORMAL,         /* normal operation */
-         LAST_FRAGMENT,  /* reading last fragment of a chunk */
-         EOS,            /* end of stream detected */
-         ERROR } state;
-  enum { READING, WRITING } mode;
-  struct st_bstream_buffer buf;
-  bstream_blob mem;
-  bstream_blob data_buf;
+  struct st_abstract_stream stream; ///< stream metadata
+  unsigned long int block_size;     ///< block size
+  short int init_block_count;       ///< initial block count
+  /// Enumeration for state of the stream
+  enum { CLOSED,         ///< stream has been closed 
+         FIRST_BLOCK,    ///< reading/writing the first block of a stream 
+         NORMAL,         ///< normal operation 
+         LAST_FRAGMENT,  ///< reading last fragment of a chunk 
+         EOS,            ///< end of stream detected 
+         ERROR } state;  ///< state of the stream
+  /// Enumeration for mode of stream (read, write).
+  enum { READING, WRITING } mode; ///< current mode.
+  struct st_bstream_buffer buf; ///< stream buffer
+  bstream_blob mem;             ///< pointer to location in buffer
+  bstream_blob data_buf;        ///< pointer to data buffer
 };
 
 int bstream_open_wr(backup_stream*, unsigned long int, unsigned long int);

=== modified file 'sql/backup/stream_v1_transport.c'
--- a/sql/backup/stream_v1_transport.c	2008-09-16 16:09:18 +0000
+++ b/sql/backup/stream_v1_transport.c	2009-01-20 11:36:32 +0000
@@ -20,6 +20,7 @@
 # define ASSERT(X)
 #else
 # include <assert.h>
+/// Macro to map assertion call when debug is off.
 # define ASSERT(X) assert(X)
 #endif
 
@@ -67,23 +68,57 @@
 
 /* Local type definitions. */
 
-#define TRUE    1
-#define FALSE   0
+#define TRUE    1  ///< definition of true
+#define FALSE   0  ///< definition of false
 
-typedef unsigned char bool;
-typedef bstream_byte byte;
-typedef bstream_blob blob;
+typedef unsigned char bool; ///< Type definition of unsigned character to bool.
+typedef bstream_byte byte;  ///< Type definition of bstream_byte to byte.
+typedef bstream_blob blob;  ///< Type definition of bstream_blob to blob.
 
-/* this is needed for seamless compilation on windows */
+/**
+  Macro mapping bzro() to memset().
+  
+  @note This is needed for seamless compilation on windows.
+*/
 #define bzero(A,B)  memset((A),0,(B))
 
 /*
   Helper functions defined in stream_v1.c
 */
+
+/**
+  Return byte from unsigned long int.
+
+  @param[in]  value  Item to be changed to byte type.
+*/
 extern byte get_byte_ulong(unsigned long int value);
+
+/**
+  Return byte from unsigned int.
+
+  @param[in]  value  Item to be changed to byte type.
+*/
 extern byte get_byte_uint(unsigned int value);
+
+/**
+  Return byte from unsigned short.
+
+  @param[in]  value  Item to be changed to byte type.
+*/
 extern byte get_byte_ushort(short value);
+
+/**
+  Return byte from short.
+
+  @param[in]  value  Item to be changed to byte type.
+*/
 extern byte get_byte_short(short value);
+
+/**
+  Return byte from size_t.
+
+  @param[in]  value  Item to be changed to byte type.
+*/
 extern byte get_byte_size_t(size_t value);
 
 /*
@@ -139,20 +174,20 @@ extern byte get_byte_size_t(size_t value
  *
  *************************************************************************/
 
-#define FR_EOC    0x80
-#define FR_EOS    0xC0
-#define FR_MORE   0x00
-#define FR_LAST   0x40
+#define FR_EOC    0x80  ///< bits for EOC fragment
+#define FR_EOS    0xC0  ///< bits for EOS fragment
+#define FR_MORE   0x00  ///< bits for MORE fragment
+#define FR_LAST   0x40  ///< bits for LAST fragment
 
-#define FR_TYPE_MASK  0xC0
-#define FR_LEN_MASK   (~FR_TYPE_MASK)
+#define FR_TYPE_MASK  0xC0  ///< type bits for mask
+#define FR_LEN_MASK   (~FR_TYPE_MASK)  ///< type length for mask
 
 /** biggest size of small fragment */
 #define FR_SMALL_MAX  ((size_t)FR_LEN_MASK)
-#define FR_BIG        0x80        /**< type bits for big fragment */
-#define FR_HUGE       0xC0        /**< type bits for huge fragment */
-#define FR_BIG_SHIFT  6           /**< value shift for big fragment */
-#define FR_HUGE_SHIFT 12          /**< value shift for huge fragment */
+#define FR_BIG        0x80        ///< type bits for big fragment
+#define FR_HUGE       0xC0        ///< type bits for huge fragment
+#define FR_BIG_SHIFT  6           ///< value shift for big fragment 
+#define FR_HUGE_SHIFT 12          ///< value shift for huge fragment 
 /** header for the biggest possible chunk */
 #define FR_HUGE_MAX_HDR (FR_HUGE|FR_LEN_MASK)
 /** size of the biggest possible chunk */
@@ -276,12 +311,15 @@ int read_fragment_header(byte **header)
  (parameter S) to write/read bytes to/from underlying stream.
 */
 
+/// Macro mapping as_write to write method call result.
 #define as_write(S,Data,Env) \
   ((S)->write ? (S)->write((S),(Data),(Env)) : BSTREAM_ERROR)
 
+/// Macro mapping as_read to read method call result.
 #define as_read(S,Buf,Env) \
   ((S)->read ?(S)->read((S),(Buf),(Env)) : BSTREAM_ERROR)
 
+/// Macro mapping as_forward to forward method call result.
 #define as_forward(S,Off) \
   ((S)->forward ? (S)->forward((S),(Off)) : (*(Off)=0, BSTREAM_ERROR))
 
@@ -546,6 +584,7 @@ int close_current_fragment(backup_stream
 
  *************************************************************************/
 
+/// Input buffer macro to check validity.
 #define IBUF_INV(B) \
   ASSERT((B).begin <= (B).pos); \
   ASSERT((B).begin <= (B).header); \
@@ -829,9 +868,10 @@ int load_next_fragment(backup_stream *s)
 /**
   Open backup stream for writing.
 
-  @param block_size   size of output stream blocks
-  @param offset       current position of the output stream inside the
-                      current stream block
+  @param[in] s            backup stream
+  @param[in] block_size   size of output stream blocks
+  @param[in] offset       current position of the output stream inside the
+                          current stream block
 
   @pre The abstract stream methods in @c s should be setup and ready for
   writing.
@@ -868,8 +908,9 @@ int bstream_open_wr(backup_stream *s,
 /**
   Open backup stream for reading.
 
-  @param offset    current position of the input stream inside the
-                   current stream block
+  @param[in] s       backup stream
+  @param[in] offset  current position of the input stream inside the
+                     current stream block
 
   @pre The abstract stream methods in @c s should be setup and ready for
   reading.
@@ -907,7 +948,24 @@ int bstream_open_rd(backup_stream *s, un
   return BSTREAM_OK;
 }
 
+/**
+ Set the end chunk for the output stream.
+
+ @param[in]  s  The backup stream.
+
+ @returns Status of operation.
+*/
 int bstream_end_chunk(backup_stream *s);
+
+/**
+ Flush backup stream`s output buffer to the output stream.
+
+ This empties the output buffer.
+
+ @param[in]  s  The backup stream to flush.
+
+ @returns Status of operation.
+*/
 int bstream_flush(backup_stream *s);
 
 /**
@@ -1078,26 +1136,40 @@ int bstream_write_part(backup_stream *s,
   /*
    To avoid copying bytes to the internal output buffer we try to cut a prefix
    of the data to be written which forms a valid fragment and write this
-   fragment to output stream.
-
-   Note: after call to biggest_fragment_prefix() blob fragment contains the
-   bytes which didn't fit into the prefix.
+   prefix to output stream.
   */
   *(s->buf.header)= biggest_fragment_prefix(&fragment);
 
   /*
-    We use this method only if it will actually write enough of the bytes
-    to be written - if it is only few bytes we save them into the output
-    buffer anyway.
+   After the call to biggest_fragment_prefix the situation is as follows:
+
+       output buffer
+               current fragment
+   [ ===== | 0x00 ===============]
+            ^
+            header                       data
+                                 [====================]
+
+                  --------------------[---------------]
+                       prefix              fragment
+
+   Fragment blob describes the data which did not fit into the prefix.
+  */ 
+
+  /*
+    We write prefix directly to the stream if it includes whole output
+    buffer and there is enough bytes to be written - if it is only few bytes 
+    we rather keep them in the buffer.
    */
-  if (fragment.end > (s->buf.pos + MIN_WRITE_SIZE))
+  if (fragment.begin > s->buf.pos && 
+      fragment.begin > (s->buf.begin + MIN_WRITE_SIZE))
   {
     /* write contents of the output buffer */
     ret= write_buffer(s);
     if (ret != BSTREAM_OK)
       return BSTREAM_ERROR;
 
-    /* write remainder of the fragment from data blob */
+    /* write remainder of the prefix from data blob */
     saved_end= data->end;
     data->end= data->begin + (fragment.begin - s->buf.pos);
 
@@ -1258,11 +1330,6 @@ int bstream_end_chunk(backup_stream *s)
   return ret;
 }
 
-/**
- Flush backup stream`s output buffer to the output stream.
-
- This empties the output buffer.
-*/
 int bstream_flush(backup_stream *s)
 {
   struct st_bstream_buffer *buf= &s->buf;

=== modified file 'sql/handler.cc'
--- a/sql/handler.cc	2009-01-31 15:53:35 +0000
+++ b/sql/handler.cc	2009-02-03 09:16:53 +0000
@@ -4514,6 +4514,11 @@ int DsMrr_impl::dsmrr_fill_buffer(handle
   while ((rowids_buf_cur < rowids_buf_end) && 
          !(res= h2->handler::multi_range_read_next(&range_info)))
   {
+    KEY_MULTI_RANGE *curr_range= &h2->handler::mrr_cur_range;
+    if (h2->mrr_funcs.skip_index_tuple &&
+        h2->mrr_funcs.skip_index_tuple(h2->mrr_iter, curr_range->ptr))
+      continue;
+    
     /* Put rowid, or {rowid, range_id} pair into the buffer */
     h2->position(table->record[0]);
     memcpy(rowids_buf_cur, h2->ref, h2->ref_length);
@@ -4623,6 +4628,7 @@ ha_rows DsMrr_impl::dsmrr_info(uint keyn
   }
   else
   {
+    /* *flags and *bufsz were set by choose_mrr_impl */
     DBUG_PRINT("info", ("DS-MRR implementation choosen"));
   }
   return 0;
@@ -4664,7 +4670,7 @@ ha_rows DsMrr_impl::dsmrr_info_const(uin
   }
   else
   {
-    *flags &= ~HA_MRR_USE_DEFAULT_IMPL;
+    /* *flags and *bufsz were set by choose_mrr_impl */
     DBUG_PRINT("info", ("DS-MRR implementation choosen"));
   }
   return rows;
@@ -4730,10 +4736,8 @@ bool DsMrr_impl::choose_mrr_impl(uint ke
   COST_VECT dsmrr_cost;
   bool res;
   THD *thd= current_thd;
-  if ((thd->variables.optimizer_use_mrr == 2) || 
-      (*flags & HA_MRR_INDEX_ONLY) || (*flags & HA_MRR_SORTED) ||
-      (keyno == table->s->primary_key && 
-       h->primary_key_is_clustered()) || 
+  if (thd->variables.optimizer_use_mrr == 2 || *flags & HA_MRR_INDEX_ONLY ||
+      (keyno == table->s->primary_key && h->primary_key_is_clustered()) ||
        key_uses_partial_cols(table, keyno))
   {
     /* Use the default implementation */

=== modified file 'sql/handler.h'
--- a/sql/handler.h	2009-01-26 16:03:39 +0000
+++ b/sql/handler.h	2009-01-29 21:17:59 +0000
@@ -1205,7 +1205,19 @@ typedef struct st_range_seq_if
       0 - The record shall be left in the stream
   */ 
   bool (*skip_record) (range_seq_t seq, char *range_info, uchar *rowid);
- 
+
+  /*
+    Check if the record combination matches the index condition
+    SYNOPSIS
+      skip_index_tuple()
+        seq         The value returned by RANGE_SEQ_IF::init()
+        range_info  Information about the next range 
+    
+    RETURN
+      0 - The record combination satisfies the index condition
+      1 - Otherwise
+  */ 
+  bool (*skip_index_tuple) (range_seq_t seq, char *range_info);
 } RANGE_SEQ_IF;
 
 class COST_VECT

=== modified file 'sql/mysql_priv.h'
--- a/sql/mysql_priv.h	2009-01-26 16:03:39 +0000
+++ b/sql/mysql_priv.h	2009-01-29 21:17:59 +0000
@@ -607,7 +607,7 @@ enum open_table_mode
 
 /* Used to check GROUP BY list in the MODE_ONLY_FULL_GROUP_BY mode */
 #define UNDEF_POS (-1)
-#define BACKUP_WAIT_TIMEOUT_DEFAULT 50;
+#define BACKUP_WAIT_TIMEOUT_DEFAULT 50
 
 /* BINLOG_DUMP options */
 
@@ -1952,6 +1952,8 @@ extern ulong slow_launch_threads, slow_l
 extern ulong table_cache_size, table_def_size;
 extern ulong max_connections,max_connect_errors, connect_timeout;
 extern my_bool slave_allow_batching;
+extern my_bool allow_slave_start;
+extern LEX_CSTRING reason_slave_blocked;
 extern ulong slave_net_timeout, slave_trans_retries;
 extern uint max_user_connections;
 extern ulong what_to_log,flush_time;
@@ -2043,7 +2045,7 @@ extern pthread_mutex_t LOCK_mysql_create
        LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_short,
        LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
        LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
-       LOCK_global_system_variables, LOCK_user_conn,
+       LOCK_global_system_variables, LOCK_user_conn, LOCK_slave_start,
        LOCK_prepared_stmt_count,
        LOCK_connection_count;
 #ifdef HAVE_OPENSSL

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2009-01-26 16:03:39 +0000
+++ b/sql/mysqld.cc	2009-01-29 21:17:59 +0000
@@ -547,6 +547,8 @@ ulong query_buff_size, slow_launch_time,
 ulong open_files_limit, max_binlog_size, max_relay_log_size;
 ulong slave_net_timeout, slave_trans_retries;
 my_bool slave_allow_batching;
+my_bool allow_slave_start= TRUE;
+LEX_CSTRING reason_slave_blocked;
 ulong slave_exec_mode_options;
 const char *slave_exec_mode_str= "STRICT";
 ulong thread_cache_size=0, thread_pool_size= 0;
@@ -698,7 +700,7 @@ pthread_mutex_t LOCK_mysql_create_db, LO
 		LOCK_crypt,
 	        LOCK_global_system_variables,
                 LOCK_user_conn, LOCK_slave_list, LOCK_active_mi,
-                LOCK_connection_count;
+                LOCK_connection_count, LOCK_slave_start;
 
 /**
   The below lock protects access to two global server variables:
@@ -1402,6 +1404,7 @@ void clean_up(bool print_message)
   free_max_user_conn();
 #ifdef HAVE_REPLICATION
   end_slave_list();
+  end_slave_start();
 #endif
   delete binlog_filter;
   delete rpl_filter;
@@ -3942,6 +3945,7 @@ static int init_server_components()
   my_uuid_init((ulong) (my_rnd(&sql_rand))*12345,12345);
 #ifdef HAVE_REPLICATION
   init_slave_list();
+  init_slave_start();
 #endif
 
   /* Setup logs */

=== modified file 'sql/opt_range.cc'
--- a/sql/opt_range.cc	2009-01-26 16:03:39 +0000
+++ b/sql/opt_range.cc	2009-01-29 21:17:59 +0000
@@ -1159,7 +1159,6 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(T
   DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
 
   in_ror_merged_scan= 0;
-  sorted= 0;
   index= key_nr;
   head=  table;
   key_part_info= head->key_info[index].key_part;
@@ -1195,6 +1194,20 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(T
 }
 
 
+void QUICK_RANGE_SELECT::need_sorted_output()
+{
+  if (!(mrr_flags & HA_MRR_SORTED))
+  {
+    /*
+      Native implementation can't produce sorted output. We'll have to
+      switch to default
+    */
+    mrr_flags |= HA_MRR_USE_DEFAULT_IMPL; 
+  }
+  mrr_flags |= HA_MRR_SORTED;
+}
+
+
 int QUICK_RANGE_SELECT::init()
 {
   DBUG_ENTER("QUICK_RANGE_SELECT::init");
@@ -7518,7 +7531,7 @@ ha_rows check_quick_select(PARAM *param,
                            uint *mrr_flags, uint *bufsize, COST_VECT *cost)
 {
   SEL_ARG_RANGE_SEQ seq;
-  RANGE_SEQ_IF seq_if = {sel_arg_range_seq_init, sel_arg_range_seq_next, 0};
+  RANGE_SEQ_IF seq_if = {sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
   handler *file= param->table->file;
   ha_rows rows;
   uint keynr= param->real_keynr[idx];
@@ -7545,7 +7558,10 @@ ha_rows check_quick_select(PARAM *param,
     param->is_ror_scan= FALSE;
   
   *mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
-  *mrr_flags|= HA_MRR_NO_ASSOCIATION;
+  /*
+    Pass HA_MRR_SORTED to see if MRR implementation can handle sorting.
+  */
+  *mrr_flags|= HA_MRR_NO_ASSOCIATION | HA_MRR_SORTED;
 
   bool pk_is_clustered= file->primary_key_is_clustered();
   if (index_only && 
@@ -8434,9 +8450,7 @@ int QUICK_RANGE_SELECT::reset()
   if (!mrr_buf_desc)
     empty_buf.buffer= empty_buf.buffer_end= empty_buf.end_of_used_area= NULL;
  
-  if (sorted)
-     mrr_flags |= HA_MRR_SORTED;
-  RANGE_SEQ_IF seq_funcs= {quick_range_seq_init, quick_range_seq_next, 0};
+  RANGE_SEQ_IF seq_funcs= {quick_range_seq_init, quick_range_seq_next, 0, 0};
   error= file->multi_range_read_init(&seq_funcs, (void*)this, ranges.elements,
                                      mrr_flags, mrr_buf_desc? mrr_buf_desc: 
                                                               &empty_buf);
@@ -8628,7 +8642,7 @@ int QUICK_RANGE_SELECT::get_next_prefix(
     result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0,
 				   last_range->max_keypart_map ? &end_key : 0,
                                    test(last_range->flag & EQ_RANGE),
-				   sorted);
+				   TRUE);
     if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE))
       last_range= 0;			// Stop searching
 

=== modified file 'sql/opt_range.h'
--- a/sql/opt_range.h	2009-01-08 19:06:44 +0000
+++ b/sql/opt_range.h	2009-01-29 21:17:59 +0000
@@ -118,7 +118,6 @@ class QUICK_RANGE :public Sql_alloc {
 class QUICK_SELECT_I
 {
 public:
-  bool sorted;
   ha_rows records;  /* estimate of # of records to be retrieved */
   double  read_time; /* time to perform this retrieval          */
   TABLE   *head;
@@ -190,7 +189,13 @@ public:
 
   virtual bool reverse_sorted() = 0;
   virtual bool unique_key_range() { return false; }
-
+  
+  /*
+    Request that this quick select produces sorted output. Not all quick
+    selects can do it, the caller is responsible for calling this function
+    only for those quick selects that can.
+  */
+  virtual void need_sorted_output() = 0;
   enum {
     QS_TYPE_RANGE = 0,
     QS_TYPE_INDEX_MERGE = 1,
@@ -328,7 +333,8 @@ public:
   QUICK_RANGE_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc,
                      MEM_ROOT *parent_alloc, bool *create_err);
   ~QUICK_RANGE_SELECT();
-
+  
+  void need_sorted_output();
   int init();
   int reset(void);
   int get_next();
@@ -453,6 +459,7 @@ public:
   ~QUICK_INDEX_MERGE_SELECT();
 
   int  init();
+  void need_sorted_output() { DBUG_ASSERT(0); /* Can't do it */ }
   int  reset(void);
   int  get_next();
   bool reverse_sorted() { return false; }
@@ -512,6 +519,7 @@ public:
   ~QUICK_ROR_INTERSECT_SELECT();
 
   int  init();
+  void need_sorted_output() { DBUG_ASSERT(0); /* Can't do it */ }
   int  reset(void);
   int  get_next();
   bool reverse_sorted() { return false; }
@@ -566,6 +574,7 @@ public:
   ~QUICK_ROR_UNION_SELECT();
 
   int  init();
+  void need_sorted_output() { DBUG_ASSERT(0); /* Can't do it */ }
   int  reset(void);
   int  get_next();
   bool reverse_sorted() { return false; }
@@ -685,6 +694,7 @@ public:
   void adjust_prefix_ranges();
   bool alloc_buffers();
   int init();
+  void need_sorted_output() { /* always do it */ }
   int reset();
   int get_next();
   bool reverse_sorted() { return false; }

=== modified file 'sql/set_var.cc'
--- a/sql/set_var.cc	2009-01-26 16:03:39 +0000
+++ b/sql/set_var.cc	2009-01-29 21:17:59 +0000
@@ -3115,40 +3115,60 @@ bool sys_var_insert_id::update(THD *thd,
 
 
 /**
-  Get value.
+  Get value of backup_wait_timeout.
 
   Returns the value for the backup_wait_timeout session variable.
 
+  The variable is of type SHOW_LONG.
+
   @param[IN] thd    Thread object
-  @param[IN] type   Type of variable
-  @param[IN] base   Not used 
+  @param[IN] type   Type of variable (unused)
+  @param[IN] base   base name (unused)
 
-  @returns value of variable
+  @returns value of variable as address to ulong
 */
-uchar *sys_var_backup_wait_timeout::value_ptr(THD *thd, enum_var_type type,
-				   LEX_STRING *base)
+
+uchar *sys_var_backup_wait_timeout::value_ptr(THD *thd, enum_var_type type
+                                              __attribute__((unused)),
+                                              LEX_STRING *base
+                                              __attribute__((unused)))
 {
-  thd->sys_var_tmp.ulong_value= thd->backup_wait_timeout;
-  return (uchar*) &thd->sys_var_tmp.ulonglong_value;
+  return (uchar*) &thd->backup_wait_timeout;
 }
 
 
 /**
-  Update value.
+  Update value of backup_wait_timeout.
 
   Set the backup_wait_timeout variable.
 
+  The variable is of type SHOW_LONG.
+
   @param[IN] thd    Thread object
   @param[IN] var    Pointer to value from command.
 
   @returns 0
 */
+
 bool sys_var_backup_wait_timeout::update(THD *thd, set_var *var)
 {
-  if (var->save_result.ulong_value > (LONG_MAX/1000))
-    thd->backup_wait_timeout= LONG_MAX/1000;
+  /*
+    The default sys_var::check() method sets ulonglong_value.
+    This can corrupt other values on some platforms.
+    Since we don't redefine check() for backup_wait_timeout,
+    we need to use ulonglong_value. Since we assign to an ulong
+    variable, we better check the value and limit it.
+  */
+  if (var->save_result.ulonglong_value > ULONG_MAX)
+    thd->backup_wait_timeout= ULONG_MAX;
   else
-    thd->backup_wait_timeout= var->save_result.ulong_value;
+  {
+    /*
+      This cast is required for the Windows compiler. The assignment is
+      safe because we checked the range of the value above.
+    */
+    thd->backup_wait_timeout= (ulong) var->save_result.ulonglong_value;
+  }
   return 0;
 }
 
@@ -3156,16 +3176,16 @@ bool sys_var_backup_wait_timeout::update
 /**
   Set default value.
 
-  Set the backup_wait_timeout variable to the default value.
+  Set the backup_wait_timeout variable to their default value.
 
   @param[IN] thd    Thread object
-  @param[IN] type   Type of variable
-
-  @returns 0
+  @param[IN] type   Type of variable (unused)
 */
-void sys_var_backup_wait_timeout::set_default(THD *thd, enum_var_type type)
-{ 
-  thd->backup_wait_timeout= BACKUP_WAIT_TIMEOUT_DEFAULT; 
+
+void sys_var_backup_wait_timeout::set_default(THD *thd, enum_var_type type
+                                              __attribute__((unused)))
+{
+  thd->backup_wait_timeout= BACKUP_WAIT_TIMEOUT_DEFAULT;
 }
 
 
@@ -3762,7 +3782,7 @@ int set_var_init()
   uint count= 0;
   DBUG_ENTER("set_var_init");
   
-  for (sys_var *var=vars.first; var; var= var->next, count++);
+  for (sys_var *var=vars.first; var; var= var->next, count++) {}
 
   if (hash_init(&system_variable_hash, system_charset_info, count, 0,
                 0, (hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
@@ -4530,10 +4550,10 @@ bool sys_var_opt_readonly::update(THD *t
     can cause to wait on a read lock, it's required for the client application
     to unlock everything, and acceptable for the server to wait on all locks.
   */
-  if (result= close_cached_tables(thd, NULL, FALSE, TRUE))
+  if ((result= close_cached_tables(thd, NULL, FALSE, TRUE)))
     goto end_with_read_lock;
 
-  if (result= make_global_read_lock_block_commit(thd))
+  if ((result= make_global_read_lock_block_commit(thd)))
     goto end_with_read_lock;
 
   /* Change the opt_readonly system variable, safe because the lock is held */

=== modified file 'sql/share/errmsg.txt'
--- a/sql/share/errmsg.txt	2009-01-26 16:03:39 +0000
+++ b/sql/share/errmsg.txt	2009-01-29 21:17:59 +0000
@@ -6452,3 +6452,12 @@ ER_BACKUP_RESTORE_DBS
 
 ER_WARN_ENGINE_TRANSACTION_ROLLBACK
   eng "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted"
+
+ER_BACKUP_SYNCHRONIZE
+  eng "Backup failed to synchronize table images."
+ER_RESTORE_CANNOT_START_SLAVE
+  eng "Cannot start slave. SLAVE START is blocked by %-.64s."
+ER_OPERATION_ABORTED
+  eng "Operation aborted"
+ER_OPERATION_ABORTED_CORRUPTED
+  eng "Operation aborted - data might be corrupted"

=== modified file 'sql/si_objects.cc'
--- a/sql/si_objects.cc	2008-12-13 19:55:44 +0000
+++ b/sql/si_objects.cc	2009-01-29 21:17:59 +0000
@@ -3137,6 +3137,28 @@ int disable_slave_connections(bool disab
 }
 
 /**
+  Set state where replication is blocked from starting.
+
+  This method tells the server that a process that requires replication
+  to be turned off while the operation is in progress.
+  This is used to prohibit slaves from starting.
+
+  @param[in] block  TRUE = block slave start, FALSE = do not block
+  @param[in] reason  Reason for the block
+*/
+void block_replication(bool block, const char *reason)
+{
+  pthread_mutex_lock(&LOCK_slave_start);
+  allow_slave_start= !block;
+  if (block)
+  {
+    reason_slave_blocked.length= strlen(reason);
+    reason_slave_blocked.str= (char *)reason;
+  }
+  pthread_mutex_unlock(&LOCK_slave_start);
+}
+
+/**
   Write an incident event in the binary log.
 
   This method can be used to issue an incident event to inform the slave

=== modified file 'sql/si_objects.h'
--- a/sql/si_objects.h	2008-12-04 23:14:30 +0000
+++ b/sql/si_objects.h	2009-01-21 15:06:10 +0000
@@ -509,6 +509,12 @@ int num_slaves_attached();
 */
 int disable_slave_connections(bool disable);
 
+/*
+  Set state where replication is blocked (TRUE) or not blocked (FALSE)
+  from starting. Include reason for feedback to user.
+*/
+void block_replication(bool block, const char *reason);
+
 /**
   Enumeration of the incidents that can occur on the master.
 */

=== modified file 'sql/sql_class.cc'
--- a/sql/sql_class.cc	2009-01-31 15:53:35 +0000
+++ b/sql/sql_class.cc	2009-02-03 09:16:53 +0000
@@ -421,7 +421,7 @@ THD::THD()
           when the DDL blocker is engaged.
   */
    DDL_exception(FALSE),
-   backup_wait_timeout(50),
+   backup_wait_timeout(BACKUP_WAIT_TIMEOUT_DEFAULT),
 #if defined(ENABLED_DEBUG_SYNC)
    debug_sync_control(0),
 #endif /* defined(ENABLED_DEBUG_SYNC) */

=== modified file 'sql/sql_join_cache.cc'
--- a/sql/sql_join_cache.cc	2009-01-16 14:28:04 +0000
+++ b/sql/sql_join_cache.cc	2009-01-26 15:07:22 +0000
@@ -810,6 +810,82 @@ uint JOIN_CACHE_BKA::aux_buffer_incr()
   return incr; 
 }
 
+
+/*
+  Check if the record combination matches the index condition
+
+  SYNOPSIS
+    JOIN_CACHE_BKA::skip_index_tuple()
+      rseq             Value returned by bka_range_seq_init()
+      range_info       MRR range association data
+    
+  DESCRIPTION
+    This function is invoked from MRR implementation to check if an index
+    tuple matches the index condition. It is used in the case where the index
+    condition actually depends on both columns of the used index and columns
+    from previous tables.
+    
+    Accessing columns of the previous tables requires special handling with
+    BKA. The idea of BKA is to collect record combinations in a buffer and 
+    then do a batch of ref access lookups, i.e. by the time we're doing a
+    lookup its previous-records-combination is not in prev_table->record[0]
+    but somewhere in the join buffer.
+    
+    We need to get it from there back into prev_table(s)->record[0] before we
+    can evaluate the index condition, and that's why we need this function
+    instead of regular IndexConditionPushdown.
+
+  NOTE
+    Possible optimization:
+    Before we unpack the record from a previous table
+    check if this table is used in the condition.
+    If so then unpack the record otherwise skip the unpacking.
+    This should be done by a special virtual method
+    get_partial_record_by_pos().
+
+  RETURN
+    0    The record combination satisfies the index condition
+    1    Otherwise
+*/
+
+bool JOIN_CACHE_BKA::skip_index_tuple(range_seq_t rseq, char *range_info)
+{
+  DBUG_ENTER("JOIN_CACHE_BKA::skip_index_tuple");
+  JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+  cache->get_record_by_pos((uchar*)range_info);
+  DBUG_RETURN(!join_tab->cache_idx_cond->val_int());
+}
+
+
+/*
+  Check if the record combination matches the index condition
+
+  SYNOPSIS
+    bka_skip_index_tuple()
+      rseq             Value returned by bka_range_seq_init()
+      range_info       MRR range association data
+    
+  DESCRIPTION
+    This is wrapper for JOIN_CACHE_BKA::skip_index_tuple method,
+    see comments there.
+
+  NOTE
+    This function is used as a RANGE_SEQ_IF::skip_index_tuple callback.
+ 
+  RETURN
+    0    The record combination satisfies the index condition
+    1    Otherwise
+*/
+
+static 
+bool bka_skip_index_tuple(range_seq_t rseq, char *range_info)
+{
+  DBUG_ENTER("bka_skip_index_tuple");
+  JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+  DBUG_RETURN(cache->skip_index_tuple(rseq, range_info));
+}
+
+
 /* 
   Write record fields and their required offsets into the join cache buffer
 
@@ -1910,10 +1986,16 @@ inline bool JOIN_CACHE::check_match(ucha
 
 enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last)
 {
+  uint cnt; 
   enum_nested_loop_state rc= NESTED_LOOP_OK;
   bool is_first_inner= join_tab == join_tab->first_unmatched;
-  bool is_last_inner= join_tab == join_tab->first_unmatched->last_inner; 
-  uint cnt= records - (is_key_access() ? 0 : test(skip_last));
+  bool is_last_inner= join_tab == join_tab->first_unmatched->last_inner;
+ 
+  /* Return at once if there are no records in the join buffer */
+  if (!records)
+    return NESTED_LOOP_OK;
+  
+  cnt= records - (is_key_access() ? 0 : test(skip_last));
 
   /* This function may be called only for inner tables of outer joins */ 
   DBUG_ASSERT(join_tab->first_inner);
@@ -1965,6 +2047,16 @@ enum_nested_loop_state JOIN_CACHE::join_
   }
 
 finish:
+  if (is_first_inner)
+  {
+    /* 
+      Restore the values of the fields of the last record put into join buffer.
+      The value of the fields of the last record in the buffer must be restored
+      since at the null complementing pass fields of the records with matches
+      are skipped and their fields are not read into the record buffers at all. 
+    */
+    get_record_by_pos(last_rec_pos);
+  }
   return rc;
 }
 
@@ -2140,7 +2232,9 @@ enum_nested_loop_state JOIN_CACHE_BKA::j
   RANGE_SEQ_IF seq_funcs= { bka_range_seq_init, 
                             bka_range_seq_next,
                             check_only_first_match ?
-                              bka_range_seq_skip_record : 0 };
+                              bka_range_seq_skip_record : 0,
+                            join_tab->cache_idx_cond ?
+                              bka_skip_index_tuple : 0 };
 
   /* The value of skip_last must be always FALSE when this function is called */
   DBUG_ASSERT(!skip_last);
@@ -2880,6 +2974,86 @@ bool bka_unique_range_seq_skip_record(ra
   DBUG_RETURN(res);
 }
 
+ 
+/*
+  Check if the record combination matches the index condition
+
+  SYNOPSIS
+    JOIN_CACHE_BKA_UNIQUE::skip_index_tuple()
+      rseq             Value returned by bka_range_seq_init()
+      range_info       MRR range association data
+    
+  DESCRIPTION
+    See JOIN_CACHE_BKA::skip_index_tuple().
+    This function is the variant for use with
+    JOIN_CACHE_BKA_UNIQUE. The difference from JOIN_CACHE_BKA case is that
+    there may be multiple previous table record combinations that share the
+    same key, i.e. they map to the same MRR range.
+    As a consequence, we need to loop through all previous table record
+    combinations that match the given MRR range key range_info until we find
+    one that satisfies the index condition.
+
+  NOTE
+    Possible optimization:
+    Before we unpack the record from a previous table
+    check if this table is used in the condition.
+    If so then unpack the record otherwise skip the unpacking.
+    This should be done by a special virtual method
+    get_partial_record_by_pos().
+
+  RETURN
+    0    The record combination satisfies the index condition
+    1    Otherwise
+
+
+*/
+
+bool JOIN_CACHE_BKA_UNIQUE::skip_index_tuple(range_seq_t rseq, char *range_info)
+{
+  DBUG_ENTER("JOIN_CACHE_BKA_UNIQUE::skip_index_tuple");
+  JOIN_CACHE_BKA_UNIQUE *cache= (JOIN_CACHE_BKA_UNIQUE *) rseq;
+  uchar *last_rec_ref_ptr=  cache->get_next_rec_ref((uchar*) range_info);
+  uchar *next_rec_ref_ptr= last_rec_ref_ptr;
+  do
+  {
+    next_rec_ref_ptr= cache->get_next_rec_ref(next_rec_ref_ptr);
+    uchar *rec_ptr= next_rec_ref_ptr + cache->rec_fields_offset;
+    cache->get_record_by_pos(rec_ptr);
+    if (join_tab->cache_idx_cond->val_int())
+      DBUG_RETURN(FALSE);
+  } while(next_rec_ref_ptr != last_rec_ref_ptr);
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Check if the record combination matches the index condition
+
+  SYNOPSIS
+    bka_unique_skip_index_tuple()
+      rseq             Value returned by bka_range_seq_init()
+      range_info       MRR range association data
+    
+  DESCRIPTION
+    This is wrapper for JOIN_CACHE_BKA_UNIQUE::skip_index_tuple method,
+    see comments there.
+
+  NOTE
+    This function is used as a RANGE_SEQ_IF::skip_index_tuple callback.
+ 
+  RETURN
+    0    The record combination satisfies the index condition
+    1    Otherwise
+*/
+
+static 
+bool bka_unique_skip_index_tuple(range_seq_t rseq, char *range_info)
+{
+  DBUG_ENTER("bka_unique_skip_index_tuple");
+  JOIN_CACHE_BKA_UNIQUE *cache= (JOIN_CACHE_BKA_UNIQUE *) rseq;
+  DBUG_RETURN(cache->skip_index_tuple(rseq, range_info));
+}
+
 
 /*
   Using BKA_UNIQUE find matches from the next table for records from join buffer   
@@ -2922,11 +3096,12 @@ JOIN_CACHE_BKA_UNIQUE::join_matching_rec
   bool no_association= test(mrr_mode &  HA_MRR_NO_ASSOCIATION);
 
   /* Set functions to iterate over keys in the join buffer */
-
   RANGE_SEQ_IF seq_funcs= { bka_unique_range_seq_init,
                             bka_unique_range_seq_next,
                             check_only_first_match && !no_association ?
-			      bka_unique_range_seq_skip_record : 0 };
+                              bka_unique_range_seq_skip_record : 0,
+                            join_tab->cache_idx_cond ?
+                              bka_unique_skip_index_tuple : 0  };
 
   /* The value of skip_last must be always FALSE when this function is called */
   DBUG_ASSERT(!skip_last);

=== modified file 'sql/sql_lex.cc'
--- a/sql/sql_lex.cc	2009-01-26 16:03:39 +0000
+++ b/sql/sql_lex.cc	2009-01-29 21:17:59 +0000
@@ -1652,7 +1652,6 @@ void st_select_lex::init_select()
   select_limit= 0;      /* denotes the default limit = HA_POS_ERROR */
   offset_limit= 0;      /* denotes the default offset = 0 */
   with_sum_func= 0;
-  is_correlated= 0;
   cur_pos_in_select_list= UNDEF_POS;
   non_agg_fields.empty();
   cond_value= having_value= Item::COND_UNDEF;
@@ -1860,7 +1859,6 @@ void st_select_lex::mark_as_dependent(st
           sl->uncacheable|= UNCACHEABLE_UNITED;
       }
     }
-    s->is_correlated= TRUE;
     Item_subselect *subquery_predicate= s->master_unit()->item;
     if (subquery_predicate)
       subquery_predicate->is_correlated= TRUE;

=== modified file 'sql/sql_lex.h'
--- a/sql/sql_lex.h	2009-01-26 16:03:39 +0000
+++ b/sql/sql_lex.h	2009-01-29 21:17:59 +0000
@@ -673,8 +673,6 @@ public:
     query processing end even if we use temporary table
   */
   bool subquery_in_having;
-  /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */
-  bool is_correlated;
   /*
     This variable is required to ensure proper work of subqueries and
     stored procedures. Generally, one should use the states of

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2009-01-31 15:53:35 +0000
+++ b/sql/sql_parse.cc	2009-02-03 09:16:53 +0000
@@ -2303,6 +2303,14 @@ mysql_execute_command(THD *thd)
     goto error;
 #else
   {
+    /* 
+       Reset warnings for BACKUP and RESTORE commands. Note: this will
+       cause problems if BACKUP/RESTORE is allowed inside stored
+       routines and events. In that case, warnings should not be
+       cleared.
+    */
+    thd->warning_info->opt_clear_warning_info(thd->query_id);
+
     /*
       Create a string from the backupdir system variable and pass
       to backup system.

=== modified file 'sql/sql_repl.cc'
--- a/sql/sql_repl.cc	2009-01-26 16:32:29 +0000
+++ b/sql/sql_repl.cc	2009-01-29 21:17:59 +0000
@@ -1017,6 +1017,31 @@ err:
 
 
 /**
+  Initialize mutex for slave start variable.
+*/
+
+void init_slave_start()
+{
+  pthread_mutex_init(&LOCK_slave_start, MY_MUTEX_INIT_FAST);
+  pthread_mutex_lock(&LOCK_slave_start);
+  allow_slave_start= TRUE;
+  reason_slave_blocked.length= 0;
+  reason_slave_blocked.str= (char *)"";
+  pthread_mutex_unlock(&LOCK_slave_start);
+}
+
+
+/**
+  Destroy mutex for slave start variable.
+*/
+
+void end_slave_start()
+{
+  pthread_mutex_destroy(&LOCK_slave_start);
+}
+
+
+/**
   Execute a START SLAVE statement.
 
   @param thd Pointer to THD object for the client thread executing the
@@ -1037,6 +1062,25 @@ int start_slave(THD* thd , Master_info*
 
   if (check_access(thd, SUPER_ACL, any_db,0,0,0,0))
     DBUG_RETURN(1);
+
+
+  /*
+    Ensure there are no restores running on the server.
+  */
+  pthread_mutex_lock(&LOCK_slave_start);
+  bool proceed= allow_slave_start;
+  bool success= TRUE;
+  if (!proceed)
+  {
+    slave_errno= ER_RESTORE_CANNOT_START_SLAVE;
+    if (net_report)
+      my_error(slave_errno, MYF(0), reason_slave_blocked);
+    success= FALSE;
+  }
+  pthread_mutex_unlock(&LOCK_slave_start);
+  if (!success)
+    DBUG_RETURN(1);
+
   lock_slave_threads(mi);  // this allows us to cleanly read slave_running
   // Get a mask of _stopped_ threads
   init_thread_mask(&thread_mask,mi,1 /* inverse */);

=== modified file 'sql/sql_repl.h'
--- a/sql/sql_repl.h	2008-05-09 10:27:23 +0000
+++ b/sql/sql_repl.h	2008-12-16 20:54:07 +0000
@@ -52,6 +52,8 @@ bool show_binlogs(THD* thd);
 extern int init_master_info(Master_info* mi);
 void kill_zombie_dump_threads(uint32 slave_server_id);
 int check_binlog_magic(IO_CACHE* log, const char** errmsg);
+void init_slave_start();
+void end_slave_start();
 
 typedef struct st_load_file_info
 {

=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc	2009-01-30 14:13:39 +0000
+++ b/sql/sql_select.cc	2009-02-03 09:16:53 +0000
@@ -136,7 +136,8 @@ static void restore_prev_sj_state(const
 
 static COND *optimize_cond(JOIN *join, COND *conds,
                            List<TABLE_LIST> *join_list,
-			   Item::cond_result *cond_value);
+			   bool build_equalities,
+                           Item::cond_result *cond_value);
 static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
 static bool open_tmp_table(TABLE *table);
 static bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, 
@@ -1487,7 +1488,7 @@ JOIN::optimize()
       thd->restore_active_arena(arena, &backup);
   }
 
-  conds= optimize_cond(this, conds, join_list, &cond_value);   
+  conds= optimize_cond(this, conds, join_list, TRUE, &cond_value);
   if (thd->is_error())
   {
     error= 1;
@@ -1496,7 +1497,7 @@ JOIN::optimize()
   }
 
   {
-    having= optimize_cond(this, having, join_list, &having_value);
+    having= optimize_cond(this, having, join_list, FALSE, &having_value);
     if (thd->is_error())
     {
       error= 1;
@@ -3549,7 +3550,10 @@ bool JOIN::flatten_subqueries()
       DBUG_RETURN(TRUE);
   }
 skip_conversion:
-  /* 3. Finalize those we didn't convert */
+  /* 
+    3. Finalize (perform IN->EXISTS rewrite) the subqueries that we didn't
+    convert:
+  */
   for (; in_subq!= in_subq_end; in_subq++)
   {
     JOIN *child_join= (*in_subq)->unit->first_select()->join;
@@ -3721,13 +3725,12 @@ bool find_eq_ref_candidate(TABLE *table,
      
     PRECONDITIONS
     When this function is called, the join may have several semi-join nests
-    (possibly within different semi-join nests), but it is guaranteed that
-    one semi-join nest does not contain another.
+    but it is guaranteed that one semi-join nest does not contain another.
    
     ACTION
     A table can be pulled out of the semi-join nest if
-     - It is a constant table
-     - It is accessed 
+     - It is a constant table, or
+     - It is accessed via eq_ref(outer_tables)
 
     POSTCONDITIONS
      * Tables that were pulled out have JOIN_TAB::emb_sj_nest == NULL
@@ -6233,7 +6236,8 @@ best_access_path(JOIN      *join,
                 in ReuseRangeEstimateForRef-3.
               */
               if (table->quick_keys.is_set(key) &&
-                  const_part & (1 << table->quick_key_parts[key]) &&
+                  (const_part & ((1 << table->quick_key_parts[key])-1)) ==
+                  (((key_part_map)1 << table->quick_key_parts[key])-1) &&
                   table->quick_n_ranges[key] == 1 &&
                   records > (double) table->quick_rows[key])
               {
@@ -6652,45 +6656,6 @@ choose_plan(JOIN *join, table_map join_t
             jtab_sort_func, (void*)join->emb_sjm_nest);
   join->cur_sj_inner_tables= 0;
 
-#if 0
-  if (!join->emb_sjm_nest && straight_join)
-  {
-    /* Put all sj-inner tables right after their last outer table table.  */
-    uint inner;
-
-    /* Find the first inner table (inner tables follow outer) */
-    for (inner= join->const_tables;
-         inner < join->tables && !join->best_ref[inner]->emb_sj_nest;
-         inner++);
-
-    while (inner < join->tables) /* for each group of inner tables... */
-    {
-      TABLE_LIST *emb_sj_nest= join->best_ref[inner]->emb_sj_nest;
-      uint n_tables= my_count_bits(emb_sj_nest->sj_inner_tables);
-      table_map cur_map= join->const_table_map;
-      table_map needed_map= emb_sj_nest->nested_join->sj_depends_on |
-                            emb_sj_nest->nested_join->sj_corr_tables;
-      /* Locate the last outer table with which this semi-join is correlated */
-      uint last_outer;
-      for (last_outer= join->const_tables; last_outer < inner; last_outer++)
-      {
-        cur_map |= join->best_ref[last_outer]->table->map;
-        if (!(needed_map & ~cur_map))
-          break;
-      }
-      /* Move the inner tables to here */
-      JOIN_TAB *tmp[MAX_TABLES];
-      memcpy(tmp, join->best_ref + inner, n_tables*sizeof(JOIN_TAB));
-      for (uint i= inner - 1; i > last_outer; i--)
-      {
-        join->best_ref[i + n_tables]= join->best_ref[i]; 
-      }
-      memcpy(join->best_ref + last_outer + 1, tmp, n_tables*sizeof(JOIN_TAB));
-      inner += n_tables;
-    }
-  }
-#endif
-
   if (straight_join)
   {
     optimize_straight_join(join, join_tables);
@@ -9037,7 +9002,12 @@ bool uses_index_fields_only(Item *item,
     }
   case Item::COND_ITEM:
     {
-      /* This is a function, apply condition recursively to arguments */
+      /*
+        This is a AND/OR condition. Regular AND/OR clauses are handled by
+        make_cond_for_index() which will chop off the part that can be
+        checked with index. This code is for handling non-top-level AND/ORs,
+        e.g. func(x AND y).
+      */
       List_iterator<Item> li(*((Item_cond*)item)->argument_list());
       Item *item;
       while ((item=li++))
@@ -9052,7 +9022,13 @@ bool uses_index_fields_only(Item *item,
       Item_field *item_field= (Item_field*)item;
       if (item_field->field->table != tbl) 
         return TRUE;
-      return item_field->field->part_of_key.is_set(keyno);
+      /*
+        The below is probably a repetition - the first part checks the
+        other two, but let's play it safe:
+      */
+      return item_field->field->part_of_key.is_set(keyno) &&
+             item_field->field->type() != MYSQL_TYPE_GEOMETRY &&
+             item_field->field->type() != MYSQL_TYPE_BLOB;
     }
   case Item::REF_ITEM:
     return uses_index_fields_only(item->real_item(), tbl, keyno,
@@ -9103,6 +9079,7 @@ Item *make_cond_for_index(Item *cond, TA
     uint n_marked= 0;
     if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
     {
+      table_map used_tables= 0;
       Item_cond_and *new_cond=new Item_cond_and;
       if (!new_cond)
 	return (COND*) 0;
@@ -9112,7 +9089,10 @@ Item *make_cond_for_index(Item *cond, TA
       {
 	Item *fix= make_cond_for_index(item, table, keyno, other_tbls_ok);
 	if (fix)
+        {
 	  new_cond->argument_list()->push_back(fix);
+          used_tables|= fix->used_tables();
+        }
         n_marked += test(item->marker == ICP_COND_USES_INDEX_ONLY);
       }
       if (n_marked ==((Item_cond*)cond)->argument_list()->elements)
@@ -9121,9 +9101,11 @@ Item *make_cond_for_index(Item *cond, TA
       case 0:
 	return (COND*) 0;
       case 1:
+        new_cond->used_tables_cache= used_tables;
 	return new_cond->argument_list()->head();
       default:
 	new_cond->quick_fix_field();
+        new_cond->used_tables_cache= used_tables;
 	return new_cond;
       }
     }
@@ -9145,6 +9127,7 @@ Item *make_cond_for_index(Item *cond, TA
       if (n_marked ==((Item_cond*)cond)->argument_list()->elements)
         cond->marker= ICP_COND_USES_INDEX_ONLY;
       new_cond->quick_fix_field();
+      new_cond->used_tables_cache= ((Item_cond_or*) cond)->used_tables_cache;
       new_cond->top_level_item();
       return new_cond;
     }
@@ -9250,9 +9233,28 @@ static void push_index_cond(JOIN_TAB *ta
 
     if (idx_cond)
     {
+      Item *idx_remainder_cond= 0;
       tab->pre_idx_push_select_cond= tab->select_cond;
-      Item *idx_remainder_cond= 
-        tab->table->file->idx_cond_push(keyno, idx_cond);
+
+      /*
+        For BKA cache we store condition to special BKA cache field
+        because evaluation of the condition requires additional operations
+        before the evaluation. This condition is used in 
+        JOIN_CACHE_BKA[_UNIQUE]::skip_index_tuple() functions.
+      */
+      if (tab->use_join_cache &&
+          /*
+            if cache is used then the value is TRUE only 
+            for BKA[_UNIQUE] cache (see check_join_cache_usage func).
+            In this case other_tbls_ok is an equivalent of
+            cache->is_key_access().
+          */
+          other_tbls_ok &&
+          (idx_cond->used_tables() &
+           ~(tab->table->map | tab->join->const_table_map)))
+        tab->cache_idx_cond= idx_cond;
+      else
+        idx_remainder_cond= tab->table->file->idx_cond_push(keyno, idx_cond);
 
       /*
         Disable eq_ref's "lookup cache" if we've pushed down an index
@@ -9730,15 +9732,45 @@ bool setup_sj_materialization(JOIN_TAB *
       bool dummy;
       Item_equal *item_eq;
       Field *copy_to=((Item_field*)it++)->field; 
-      Item *head;
+      /*
+        Tricks with Item_equal are due to the following: suppose we have a
+        query:
+        
+        ... WHERE cond(ot.col) AND ot.col IN (SELECT it2.col FROM it1,it2
+                                               WHERE it1.col= it2.col)
+         then equality propagation will create an 
+         
+           Item_equal(it1.col, it2.col, ot.col) 
+         
+         then substitute_for_best_equal_field() will change the conditions
+         according to the join order:
+
+           it1
+           it2    it1.col=it2.col
+           ot     cond(it1.col)
+
+         although we've originally had "SELECT it2.col", conditions attached 
+         to subsequent outer tables will refer to it1.col, so SJM-Scan will
+         need to unpack data to there. 
+         That is, if an element from subquery's select list participates in 
+         equality propagation, then we need to unpack it to the first
+         element equality propagation member that refers to table that is
+         within the subquery.
+      */
       item_eq= find_item_equal(tab->join->cond_equal, copy_to, &dummy);
 
-      if (!item_eq->const_item && 
-          (head= item_eq->fields.head())->used_tables() &
-          emb_sj_nest->sj_inner_tables)
+      if (item_eq)
       {
-        DBUG_ASSERT(head->type() == Item::FIELD_ITEM);
-        copy_to= ((Item_field*)head)->field;
+        List_iterator<Item_field> it(item_eq->fields);
+        Item_field *item;
+        while ((item= it++))
+        {
+          if (!(item->used_tables() & ~emb_sj_nest->sj_inner_tables))
+          {
+            copy_to= item->field;
+            break;
+          }
+        }
       }
       sjm->copy_field[i].set(copy_to, sjm->table->field[i], FALSE);
     }
@@ -9752,10 +9784,12 @@ bool setup_sj_materialization(JOIN_TAB *
 
   SYNOPSIS
     check_join_cache_usage()
-      tab            joined table to check join buffer usage for 
-      join           join for which the check is performed 
-      options        options of the join 
-      no_jbuf_after  don't use join buffering after table with this number
+      tab                 joined table to check join buffer usage for
+      join                join for which the check is performed
+      options             options of the join
+      no_jbuf_after       don't use join buffering after table with this number
+      icp_other_tables_ok OUT TRUE if condition pushdown supports
+                          other tables presence
 
   DESCRIPTION
     The function finds out whether the table 'tab' can be joined using a join
@@ -9830,14 +9864,15 @@ bool setup_sj_materialization(JOIN_TAB *
 #endif
 
   RETURN
-    TRUE   if a join buffer can be employed to join the table 'tab'
-    FALSE  otherwise 
+
+    cache level if cache is used, otherwise returns 0
 */
 
 static
-bool check_join_cache_usage(JOIN_TAB *tab,
+uint check_join_cache_usage(JOIN_TAB *tab,
                             JOIN *join, ulonglong options,
-                            uint no_jbuf_after)
+                            uint no_jbuf_after,
+                            bool *icp_other_tables_ok)
 {
   uint flags;
   COST_VECT cost;
@@ -9847,9 +9882,10 @@ bool check_join_cache_usage(JOIN_TAB *ta
   uint cache_level= join->thd->variables.join_cache_level;
   bool force_unlinked_cache= test(cache_level & 1);
   uint i= tab - join->join_tab;
-  
+
+  *icp_other_tables_ok= TRUE;
   if (cache_level == 0 || i == join->const_tables)
-    return FALSE;
+    return 0;
 
   if (options & SELECT_NO_JOIN_CACHE)
     goto no_join_cache;
@@ -9916,17 +9952,19 @@ bool check_join_cache_usage(JOIN_TAB *ta
     if (cache_level <= 2 && (tab->first_inner || tab->first_sj_inner_tab))
       goto no_join_cache;
     if ((options & SELECT_DESCRIBE) ||
-        (tab->cache || 
-         (tab->cache= new JOIN_CACHE_BNL(join, tab, prev_cache))) &&
+        ((tab->cache= new JOIN_CACHE_BNL(join, tab, prev_cache))) &&
         !tab->cache->init())
-      return TRUE;
+    {
+      *icp_other_tables_ok= FALSE;
+      return cache_level;
+    }
     goto no_join_cache;
   case JT_SYSTEM:
   case JT_CONST:
   case JT_REF:
   case JT_EQ_REF:
     if (cache_level <= 4)
-      return FALSE;
+      return 0;
     flags= HA_MRR_NO_NULL_ENDPOINTS;
     if (tab->table->covering_keys.is_set(tab->ref.key))
       flags|= HA_MRR_INDEX_ONLY;
@@ -9935,14 +9973,12 @@ bool check_join_cache_usage(JOIN_TAB *ta
     if ((rows != HA_POS_ERROR) && !(flags & HA_MRR_USE_DEFAULT_IMPL) &&
         (!(flags & HA_MRR_NO_ASSOCIATION) || cache_level > 6) &&
         ((options & SELECT_DESCRIBE) ||
-         (tab->cache ||
-          cache_level <= 6 && 
+         (cache_level <= 6 && 
           (tab->cache= new JOIN_CACHE_BKA(join, tab, flags, prev_cache)) ||
 	  cache_level > 6 &&  
           (tab->cache= new JOIN_CACHE_BKA_UNIQUE(join, tab, flags, prev_cache))
-         ) &&
-	 !tab->cache->init()))
-      return TRUE;
+          ) && !tab->cache->init()))
+      return cache_level;
     goto no_join_cache;
   default : ;
   }
@@ -9950,7 +9986,7 @@ bool check_join_cache_usage(JOIN_TAB *ta
 no_join_cache:
   if (cache_level>2)
     revise_cache_usage(tab); 
-  return FALSE;          
+  return 0;
 }
 
 
@@ -9994,10 +10030,12 @@ make_join_readinfo(JOIN *join, ulonglong
   {
     JOIN_TAB *tab=join->join_tab+i;
     TABLE *table=tab->table;
-    bool using_join_cache;
+    bool icp_other_tables_ok;
     tab->read_record.table= table;
     tab->read_record.file=table->file;
     tab->next_select=sub_select;		/* normal select */
+    tab->use_join_cache= FALSE;
+    tab->cache_idx_cond= 0;
     /* 
       TODO: don't always instruct first table's ref/range access method to 
       produce sorted output.
@@ -10032,13 +10070,12 @@ make_join_readinfo(JOIN *join, ulonglong
       tab->read_first_record= tab->type == JT_SYSTEM ?
 	                        join_read_system :join_read_const;
       tab->read_record.read_record= join_no_more_records;
-      using_join_cache= FALSE;
-      if (check_join_cache_usage(tab, join, options, no_jbuf_after))
+      if (check_join_cache_usage(tab, join, options, no_jbuf_after,
+                                 &icp_other_tables_ok))
       {
-        using_join_cache= TRUE;
+        tab->use_join_cache= TRUE;
 	tab[-1].next_select=sub_select_cache;
       }
-      tab->use_join_cache= using_join_cache;        
       if (table->covering_keys.is_set(tab->ref.key) &&
           !table->no_keyread)
       {
@@ -10046,7 +10083,7 @@ make_join_readinfo(JOIN *join, ulonglong
         table->file->extra(HA_EXTRA_KEYREAD);
       }
       else
-        push_index_cond(tab, tab->ref.key, !using_join_cache);
+        push_index_cond(tab, tab->ref.key, icp_other_tables_ok);
       break;
     case JT_EQ_REF:
       table->status=STATUS_NO_RECORD;
@@ -10059,13 +10096,12 @@ make_join_readinfo(JOIN *join, ulonglong
       tab->quick=0;
       tab->read_first_record= join_read_key;
       tab->read_record.read_record= join_no_more_records;
-      using_join_cache= FALSE;
-      if (check_join_cache_usage(tab, join, options, no_jbuf_after))
+      if (check_join_cache_usage(tab, join, options, no_jbuf_after,
+                                 &icp_other_tables_ok))
       {
-        using_join_cache= TRUE;
+        tab->use_join_cache= TRUE;
 	tab[-1].next_select=sub_select_cache;
       }
-      tab->use_join_cache= using_join_cache;        
       if (table->covering_keys.is_set(tab->ref.key) &&
 	  !table->no_keyread)
       {
@@ -10073,7 +10109,7 @@ make_join_readinfo(JOIN *join, ulonglong
 	table->file->extra(HA_EXTRA_KEYREAD);
       }
       else
-        push_index_cond(tab, tab->ref.key, !using_join_cache);
+        push_index_cond(tab, tab->ref.key, icp_other_tables_ok);
       break;
     case JT_REF_OR_NULL:
     case JT_REF:
@@ -10085,11 +10121,11 @@ make_join_readinfo(JOIN *join, ulonglong
       }
       delete tab->quick;
       tab->quick=0;
-      using_join_cache= FALSE;
-      if (check_join_cache_usage(tab, join, options, no_jbuf_after))
+      if (check_join_cache_usage(tab, join, options, no_jbuf_after,
+                                 &icp_other_tables_ok))
       {
-        using_join_cache= TRUE;
-	tab[-1].next_select=sub_select_cache;
+        tab->use_join_cache= TRUE;
+        tab[-1].next_select=sub_select_cache;
       } 
       if (tab->type == JT_REF)
       {
@@ -10101,7 +10137,6 @@ make_join_readinfo(JOIN *join, ulonglong
 	tab->read_first_record= join_read_always_key_or_null;
 	tab->read_record.read_record= join_read_next_same_or_null;
       }
-      tab->use_join_cache= using_join_cache;
       if (table->covering_keys.is_set(tab->ref.key) &&
 	  !table->no_keyread)
       {
@@ -10109,7 +10144,7 @@ make_join_readinfo(JOIN *join, ulonglong
 	table->file->extra(HA_EXTRA_KEYREAD);
       }
       else
-        push_index_cond(tab, tab->ref.key, !using_join_cache);
+        push_index_cond(tab, tab->ref.key, icp_other_tables_ok);
       break;
     case JT_FT:
       table->status=STATUS_NO_RECORD;
@@ -10124,11 +10159,11 @@ make_join_readinfo(JOIN *join, ulonglong
           materialization nest.
       */
       table->status=STATUS_NO_RECORD;
-      using_join_cache= FALSE;
-      if (check_join_cache_usage(tab, join, options, no_jbuf_after))
+      if (check_join_cache_usage(tab, join, options, no_jbuf_after,
+                                 &icp_other_tables_ok))
       {
-          using_join_cache= TRUE;
-	  tab[-1].next_select=sub_select_cache;
+        tab->use_join_cache= TRUE;
+        tab[-1].next_select=sub_select_cache;
       }
       /* These init changes read_record */
       if (tab->use_quick == 2)
@@ -10200,10 +10235,9 @@ make_join_readinfo(JOIN *join, ulonglong
 	    tab->type=JT_NEXT;		// Read with index_first / index_next
 	  }
 	}
-        tab->use_join_cache= using_join_cache;
         if (tab->select && tab->select->quick &&
             tab->select->quick->index != MAX_KEY && ! tab->table->key_read)
-          push_index_cond(tab, tab->select->quick->index, !using_join_cache);
+          push_index_cond(tab, tab->select->quick->index, icp_other_tables_ok);
       }
       break;
     default:
@@ -13214,7 +13248,7 @@ static void restore_prev_sj_state(const
 
 static COND *
 optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
-              Item::cond_result *cond_value)
+              bool build_equalities, Item::cond_result *cond_value)
 {
   THD *thd= join->thd;
   DBUG_ENTER("optimize_cond");
@@ -13232,10 +13266,12 @@ optimize_cond(JOIN *join, COND *conds, L
       multiple equality contains a constant.
     */ 
     DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
-    conds= build_equal_items(join->thd, conds, NULL, join_list,
-                             &join->cond_equal);
-    DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
-
+    if (build_equalities)
+    {
+      conds= build_equal_items(join->thd, conds, NULL, join_list,
+                               &join->cond_equal);
+      DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
+    }
     /* change field = field to field = const for each found field = const */
     propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
     /*
@@ -18851,7 +18887,7 @@ check_reverse_order:
     }
   }
   else if (select && select->quick)
-    select->quick->sorted= 1;
+    select->quick->need_sorted_output();
   DBUG_RETURN(1);
 use_filesort:
   table->file->extra(HA_EXTRA_NO_ORDERBY_LIMIT);
@@ -21526,6 +21562,8 @@ void select_describe(JOIN *join, bool ne
         if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno &&
             table->file->pushed_idx_cond)
           extra.append(STRING_WITH_LEN("; Using index condition"));
+        else if (tab->cache_idx_cond)
+          extra.append(STRING_WITH_LEN("; Using index condition(BKA)"));
 
         if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || 
             quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||

=== modified file 'sql/sql_select.h'
--- a/sql/sql_select.h	2009-01-16 14:28:04 +0000
+++ b/sql/sql_select.h	2009-01-26 15:07:22 +0000
@@ -219,6 +219,10 @@ typedef struct st_join_table
   TABLE_REF	ref;
   bool          use_join_cache;
   JOIN_CACHE	*cache;
+  /*
+    Index condition for BKA access join
+  */
+  Item          *cache_idx_cond;
   SQL_SELECT    *cache_select;
   JOIN		*join;
   /* SemiJoinDuplicateElimination variables: */
@@ -833,7 +837,7 @@ public:
     join= j;
     join_tab= tab;
     prev_cache= next_cache= 0;
-    mrr_mode= flags;    
+    mrr_mode= flags;
   }
 
   /* 
@@ -860,8 +864,10 @@ public:
   bool is_key_access() { return TRUE; }
 
   /* Shall get the key built over the next record from the join buffer */
-  virtual uint get_next_key(uchar **key);    
+  virtual uint get_next_key(uchar **key);
 
+  /* Check if the record combination matches the index condition */
+  bool skip_index_tuple(range_seq_t rseq, char *range_info);
 };
 
 /*
@@ -1150,7 +1156,9 @@ public:
     return get_next_rec_ref(curr_key_entry+key_entry_length-
                             get_size_of_rec_offset());
   }
-
+  
+  /* Check if the record combination matches the index condition */
+  bool skip_index_tuple(range_seq_t rseq, char *range_info);
 };
 
 

=== modified file 'storage/falcon/ha_falcon.cpp'
--- a/storage/falcon/ha_falcon.cpp	2009-01-20 14:42:38 +0000
+++ b/storage/falcon/ha_falcon.cpp	2009-02-03 09:16:53 +0000
@@ -1848,7 +1848,7 @@ ha_rows StorageInterface::multi_range_re
 											   flags, cost);
 	if ((res != HA_POS_ERROR) && !native_requested)
 		{
-		*flags &= ~HA_MRR_USE_DEFAULT_IMPL;
+		*flags &= ~(HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED);
 		/* We'll be returning records without telling which range they are contained in */
 		*flags |= HA_MRR_NO_ASSOCIATION;
 		/* We'll use our own internal buffer so we won't need any buffer space from the SQL layer */
@@ -1869,7 +1869,7 @@ ha_rows StorageInterface::multi_range_re
 										 cost);
 	if ((res != HA_POS_ERROR) && !native_requested)
 		{
-		*flags &= ~HA_MRR_USE_DEFAULT_IMPL;
+		*flags &= ~(HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED);
 		/* See _info_const() function for explanation of these: */
 		*flags |= HA_MRR_NO_ASSOCIATION;
 		*bufsz = 0;

=== modified file 'storage/maria/ha_maria.h'
--- a/storage/maria/ha_maria.h	2008-12-14 11:36:15 +0000
+++ b/storage/maria/ha_maria.h	2009-01-12 17:50:30 +0000
@@ -185,6 +185,5 @@ public:
   Item *idx_cond_push(uint keyno, Item* idx_cond);
 private:
   DsMrr_impl ds_mrr;
-  key_map keys_with_parts;
   friend my_bool index_cond_func_maria(void *arg);
 };

=== modified file 'storage/maria/ma_key.c'
--- a/storage/maria/ma_key.c	2008-12-14 11:36:15 +0000
+++ b/storage/maria/ma_key.c	2009-01-25 16:59:07 +0000
@@ -26,7 +26,8 @@
 
 #define CHECK_KEYS                              /* Enable safety checks */
 
-static int _ma_put_key_in_record(MARIA_HA *info,uint keynr,uchar *record);
+static int _ma_put_key_in_record(MARIA_HA *info, uint keynr,
+                                 my_bool unpack_blobs, uchar *record);
 
 #define FIX_LENGTH(cs, pos, length, char_length)                            \
             do {                                                            \
@@ -476,6 +477,9 @@ void _ma_copy_key(MARIA_KEY *to, const M
     _ma_put_key_in_record()
     info		MARIA handler
     keynr		Key number that was used
+    unpack_blobs        TRUE  <=> Unpack blob columns
+                        FALSE <=> Skip them. This is used by index condition 
+                                  pushdown check function
     record 		Store key here
 
     Last read key is in info->lastkey
@@ -489,7 +493,7 @@ void _ma_copy_key(MARIA_KEY *to, const M
 */
 
 static int _ma_put_key_in_record(register MARIA_HA *info, uint keynr,
-				 uchar *record)
+				 my_bool unpack_blobs, uchar *record)
 {
   reg2 uchar *key;
   uchar *pos,*key_end;
@@ -582,16 +586,19 @@ static int _ma_put_key_in_record(registe
       if (length > keyseg->length || key+length > key_end)
 	goto err;
 #endif
-      memcpy(record+keyseg->start+keyseg->bit_start,
-	     (char*) &blob_ptr,sizeof(char*));
-      memcpy(blob_ptr,key,length);
-      blob_ptr+=length;
+      if (unpack_blobs)
+      {
+        memcpy(record+keyseg->start+keyseg->bit_start,
+               (char*) &blob_ptr,sizeof(char*));
+        memcpy(blob_ptr,key,length);
+        blob_ptr+=length;
 
-      /* The above changed info->lastkey2. Inform maria_rnext_same(). */
-      info->update&= ~HA_STATE_RNEXT_SAME;
+        /* The above changed info->lastkey2. Inform maria_rnext_same(). */
+        info->update&= ~HA_STATE_RNEXT_SAME;
 
-      _ma_store_blob_length(record+keyseg->start,
-			    (uint) keyseg->bit_start,length);
+        _ma_store_blob_length(record+keyseg->start,
+                              (uint) keyseg->bit_start,length);
+      }
       key+=length;
     }
     else if (keyseg->flag & HA_SWAP_KEY)
@@ -634,7 +641,7 @@ int _ma_read_key_record(MARIA_HA *info,
   {
     if (info->lastinx >= 0)
     {				/* Read only key */
-      if (_ma_put_key_in_record(info,(uint) info->lastinx,buf))
+      if (_ma_put_key_in_record(info, (uint)info->lastinx, TRUE, buf))
       {
         maria_print_error(info->s, HA_ERR_CRASHED);
 	my_errno=HA_ERR_CRASHED;
@@ -671,7 +678,7 @@ int ma_check_index_cond(register MARIA_H
 {
   if (info->index_cond_func)
   {
-    if (_ma_put_key_in_record(info, keynr, record))
+    if (_ma_put_key_in_record(info, keynr, FALSE, record))
     {
       maria_print_error(info->s, HA_ERR_CRASHED);
       my_errno=HA_ERR_CRASHED;

=== modified file 'storage/myisam/ha_myisam.cc'
--- a/storage/myisam/ha_myisam.cc	2009-01-26 16:03:39 +0000
+++ b/storage/myisam/ha_myisam.cc	2009-01-29 21:17:59 +0000
@@ -658,8 +658,6 @@ int ha_myisam::open(const char *name, in
       int_table_flags|= HA_HAS_OLD_CHECKSUM;
   }
   
-  keys_with_parts.clear_all();
-
   for (i= 0; i < table->s->keys; i++)
   {
     plugin_ref parser= table->key_info[i].parser;
@@ -667,17 +665,6 @@ int ha_myisam::open(const char *name, in
       file->s->keyinfo[i].parser=
         (struct st_mysql_ftparser *)plugin_decl(parser)->info;
     table->key_info[i].block_size= file->s->keyinfo[i].block_length;
-
-    KEY_PART_INFO *kp= table->key_info[i].key_part;
-    KEY_PART_INFO *kp_end= kp + table->key_info[i].key_parts;
-    for (; kp != kp_end; kp++)
-    {
-      if (!kp->field->part_of_key.is_set(i))
-      {
-        keys_with_parts.set_bit(i);
-        break;
-      }
-    }
   }
   my_errno= 0;
   goto end;

=== modified file 'storage/myisam/ha_myisam.h'
--- a/storage/myisam/ha_myisam.h	2009-01-09 16:38:52 +0000
+++ b/storage/myisam/ha_myisam.h	2009-01-12 17:50:30 +0000
@@ -62,8 +62,7 @@ class ha_myisam: public handler
   {
     return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
             0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
-            HA_READ_ORDER | HA_KEYREAD_ONLY | 
-            (keys_with_parts.is_set(inx)?0:HA_DO_INDEX_COND_PUSHDOWN));
+            HA_READ_ORDER | HA_KEYREAD_ONLY | HA_DO_INDEX_COND_PUSHDOWN);
   }
   uint max_supported_keys()          const { return MI_MAX_KEY; }
   uint max_supported_key_length()    const { return HA_MAX_KEY_LENGTH; }
@@ -168,7 +167,6 @@ public:
   Item *idx_cond_push(uint keyno, Item* idx_cond);
 private:
   DsMrr_impl ds_mrr;
-  key_map keys_with_parts;
   friend my_bool index_cond_func_myisam(void *arg);
 };
 

=== modified file 'storage/myisam/mi_close.c'
--- a/storage/myisam/mi_close.c	2008-07-09 07:12:43 +0000
+++ b/storage/myisam/mi_close.c	2008-12-30 12:13:31 +0000
@@ -30,6 +30,7 @@ int mi_close(register MI_INFO *info)
   DBUG_PRINT("enter",("base: %p  reopen: %u  locks: %u",
 		      info, (uint) share->reopen,
                       (uint) share->tot_locks));
+  DBUG_PRINT("myisam", ("close '%s'", share->unresolv_file_name));
 
   pthread_mutex_lock(&THR_LOCK_myisam);
   if (info->lock_type == F_EXTRA_LCK)
@@ -65,6 +66,7 @@ int mi_close(register MI_INFO *info)
   my_free(mi_get_rec_buff_ptr(info, info->rec_buff), MYF(MY_ALLOW_ZERO_PTR));
   if (flag)
   {
+    DBUG_PRINT("myisam", ("close share '%s'", share->unresolv_file_name));
     if (share->kfile >= 0 &&
 	flush_key_blocks(share->key_cache, share->kfile,
 			 share->temporary ? FLUSH_IGNORE_CHANGED :

=== modified file 'storage/myisam/mi_key.c'
--- a/storage/myisam/mi_key.c	2008-07-22 14:16:22 +0000
+++ b/storage/myisam/mi_key.c	2009-01-29 21:17:59 +0000
@@ -31,7 +31,8 @@
               set_if_smaller(char_length,length);                           \
             } while(0)
 
-static int _mi_put_key_in_record(MI_INFO *info,uint keynr,uchar *record);
+static int _mi_put_key_in_record(MI_INFO *info, uint keynr, 
+                                 my_bool unpack_blobs, uchar *record);
 
 /*
   Make a intern key from a record
@@ -311,6 +312,9 @@ uint _mi_pack_key(register MI_INFO *info
     _mi_put_key_in_record()
     info		MyISAM handler
     keynr		Key number that was used
+    unpack_blobs        TRUE  <=> Unpack blob columns
+                        FALSE <=> Skip them. This is used by index condition 
+                                  pushdown check function
     record 		Store key here
 
     Last read key is in info->lastkey
@@ -323,8 +327,8 @@ uint _mi_pack_key(register MI_INFO *info
    1   error
 */
 
-static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
-				 uchar *record)
+static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, 
+                                 my_bool unpack_blobs, uchar *record)
 {
   reg2 uchar *key;
   uchar *pos,*key_end;
@@ -417,16 +421,17 @@ static int _mi_put_key_in_record(registe
       if (length > keyseg->length || key+length > key_end)
 	goto err;
 #endif
-      memcpy(record+keyseg->start+keyseg->bit_start,
-	     (char*) &blob_ptr,sizeof(char*));
-      memcpy(blob_ptr,key,length);
-      blob_ptr+=length;
-
-      /* The above changed info->lastkey2. Inform mi_rnext_same(). */
-      info->update&= ~HA_STATE_RNEXT_SAME;
-
-      _mi_store_blob_length(record+keyseg->start,
-			    (uint) keyseg->bit_start,length);
+      if (unpack_blobs)
+      {
+        memcpy(record+keyseg->start+keyseg->bit_start,
+               (char*) &blob_ptr,sizeof(char*));
+        memcpy(blob_ptr,key,length);
+        blob_ptr+=length;
+        /* The above changed info->lastkey2. Inform mi_rnext_same(). */
+        info->update&= ~HA_STATE_RNEXT_SAME;
+        _mi_store_blob_length(record+keyseg->start,
+                              (uint) keyseg->bit_start,length);
+      }
       key+=length;
     }
     else if (keyseg->flag & HA_SWAP_KEY)
@@ -470,7 +475,7 @@ int _mi_read_key_record(MI_INFO *info, m
   {
     if (info->lastinx >= 0)
     {				/* Read only key */
-      if (_mi_put_key_in_record(info,(uint) info->lastinx,buf))
+      if (_mi_put_key_in_record(info, (uint)info->lastinx, TRUE, buf))
       {
         mi_print_error(info->s, HA_ERR_CRASHED);
 	my_errno=HA_ERR_CRASHED;
@@ -504,7 +509,7 @@ int _mi_read_key_record(MI_INFO *info, m
 
 int mi_check_index_cond(register MI_INFO *info, uint keynr, uchar *record)
 {
-  if (_mi_put_key_in_record(info, keynr, record))
+  if (_mi_put_key_in_record(info, keynr, FALSE, record))
   {
     mi_print_error(info->s, HA_ERR_CRASHED);
     my_errno=HA_ERR_CRASHED;

=== modified file 'storage/myisam/mi_open.c'
--- a/storage/myisam/mi_open.c	2009-01-30 14:13:39 +0000
+++ b/storage/myisam/mi_open.c	2009-02-03 09:16:53 +0000
@@ -89,6 +89,7 @@ MI_INFO *mi_open(const char *name, int m
   my_off_t key_root[HA_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE];
   ulonglong max_key_file_length, max_data_file_length;
   DBUG_ENTER("mi_open");
+  DBUG_PRINT("myisam", ("open '%s'", name));
 
   LINT_INIT(m_info);
   kfile= -1;
@@ -115,6 +116,7 @@ MI_INFO *mi_open(const char *name, int m
   pthread_mutex_lock(&THR_LOCK_myisam);
   if (!(old_info=test_if_reopen(name_buff)))
   {
+    DBUG_PRINT("myisam", ("open share '%s'", name));
     share= &share_buff;
     bzero((uchar*) &share_buff,sizeof(share_buff));
     share_buff.state.rec_per_key_part=rec_per_key_part;

=== modified file 'storage/myisam/myisam_backup_engine.cc'
--- a/storage/myisam/myisam_backup_engine.cc	2009-01-13 15:26:20 +0000
+++ b/storage/myisam/myisam_backup_engine.cc	2009-01-21 15:00:23 +0000
@@ -1758,6 +1758,25 @@ result_t Table_restore::close()
     But since the share does now cache the new values from the
     index file, the backup kernel's close writes the correct
     information back to the file.
+
+    This used to work until a brave soul tried to backup and restore
+    compressed tables. Now we know, that replacing the state info is
+    insufficient. The table is always re-created as a non-compressed
+    table. The setup of the share is pretty different between normal and
+    compressed tables. We could try to replace all relevant information.
+    But that would make quite some code duplication with mi_open().
+    Changes there might be forgotten here. And it might still be
+    insufficient. The table instance MI_OPEN might have some setup
+    differences too. Perhaps even the handler ha_myisam. In theory it
+    might even happen that we create fixed length records, while the
+    restored MYI has dynamic records or vice versa. Or we restore a
+    table that had been created by a former MySQL version and has
+    different field types, e.g. varchar.
+
+    So the only practical solution seems to be to re-open the table
+    after restore. But this must be done in the server. The fix here is
+    still required to defeat writing of wrong share data at close as
+    decribed above.
   */
   {
     MI_INFO      *mi_info;

Thread
bzr commit into mysql-6.0-bugteam branch (Matthias.Leich:3005) Matthias Leich3 Feb