List:Commits« Previous MessageNext Message »
From:Tor Didriksen Date:October 5 2011 8:23am
Subject:bzr push into mysql-trunk branch (tor.didriksen:3393 to 3396)
View as plain text  
 3396 Tor Didriksen	2011-10-05 [merge]
      Merge trunk => opt-team

    modified:
      mysql-test/r/join_outer_bka_nixbnl.result
      mysql-test/r/subquery_nomat_nosj_bka_nixbnl.result
      mysql-test/r/subquery_none_bka_nixbnl.result
 3395 Tor Didriksen	2011-10-05 [merge]
      empty merge trunk => opt-team

 3394 Tor Didriksen	2011-10-05 [merge]
      Merge trunk => opt-team

    removed:
      mysql-test/extra/rpl_tests/rpl_insert_duplicate.test
      mysql-test/suite/rpl/r/rpl_insert_duplicate.result
      mysql-test/suite/rpl/r/rpl_insert_select.result
      mysql-test/suite/rpl/t/rpl_insert_duplicate.test
      mysql-test/suite/rpl/t/rpl_insert_select.test
    added:
      mysql-test/suite/rpl/r/rpl_replicate_rewrite_db.result
      mysql-test/suite/rpl/t/rpl_replicate_rewrite_db.test
    modified:
      client/mysqltest.cc
      cmake/create_initial_db.cmake.in
      libmysql/authentication_win/handshake_client.cc
      mysql-test/extra/rpl_tests/rpl_insert_id.test
      mysql-test/extra/rpl_tests/rpl_insert_ignore.test
      mysql-test/include/commit.inc
      mysql-test/include/subquery.inc
      mysql-test/r/commit_1innodb.result
      mysql-test/r/group_by.result
      mysql-test/r/having.result
      mysql-test/r/join_outer.result
      mysql-test/r/join_outer_bka.result
      mysql-test/r/partition.result
      mysql-test/r/subquery_nomat_nosj.result
      mysql-test/r/subquery_nomat_nosj_bka.result
      mysql-test/r/subquery_none.result
      mysql-test/r/subquery_none_bka.result
      mysql-test/suite/binlog/r/binlog_stm_blackhole.result
      mysql-test/suite/binlog/r/binlog_unsafe.result
      mysql-test/suite/binlog/t/binlog_unsafe.test
      mysql-test/suite/rpl/r/rpl_checksum.result
      mysql-test/suite/rpl/r/rpl_insert_ignore.result
      mysql-test/suite/rpl/r/rpl_known_bugs_detection.result
      mysql-test/suite/rpl/r/rpl_log_pos.result
      mysql-test/suite/rpl/r/rpl_manual_change_index_file.result
      mysql-test/suite/rpl/r/rpl_packet.result
      mysql-test/suite/rpl/r/rpl_row_event_max_size.result
      mysql-test/suite/rpl/t/rpl_known_bugs_detection.test
      mysql-test/suite/rpl/t/rpl_manual_change_index_file.test
      mysql-test/suite/rpl/t/rpl_packet.test
      mysql-test/suite/rpl/t/rpl_row_event_max_size.test
      mysql-test/t/join_outer.test
      mysql-test/t/partition.test
      sql/item_subselect.cc
      sql/item_subselect.h
      sql/mysqld.cc
      sql/opt_explain.cc
      sql/opt_range.cc
      sql/rpl_master.cc
      sql/share/errmsg-utf8.txt
      sql/sql_lex.cc
      sql/sql_lex.h
      sql/sql_parse.cc
      sql/sql_select.cc
      sql/sql_select.h
      storage/innobase/buf/buf0buf.c
      storage/innobase/buf/buf0lru.c
      storage/innobase/dict/dict0stats.c
      storage/innobase/handler/i_s.cc
      storage/innobase/include/btr0cur.h
      storage/innobase/include/btr0cur.ic
      storage/innobase/include/buf0buf.h
      storage/innobase/include/buf0buf.ic
      storage/innobase/include/buf0types.h
      storage/innobase/rem/rem0rec.c
      storage/innobase/srv/srv0start.c
      unittest/mysys/my_atomic-t.c
 3393 Tor Didriksen	2011-10-04 [merge]
      merge opt-backporting => opt-team

    modified:
      mysql-test/collections/default.experimental
=== modified file 'client/mysqltest.cc'
--- a/client/mysqltest.cc	2011-09-26 08:48:20 +0000
+++ b/client/mysqltest.cc	2011-09-30 13:27:12 +0000
@@ -1361,7 +1361,14 @@ static void cleanup_and_exit(int exit_co
     }
   }
 
+  /* exit() appears to be not 100% reliable on Windows under some conditions */
+#ifdef __WIN__
+  fflush(stdout);
+  fflush(stderr);
+  _exit(exit_code);
+#else
   exit(exit_code);
+#endif
 }
 
 void print_file_stack()
@@ -5340,6 +5347,7 @@ do_handle_error:
 
   var_set_errno(0);
   handle_no_error(command);
+  revert_properties();
   return 1; /* Connected */
 }
 
@@ -7307,6 +7315,7 @@ void run_query_normal(struct st_connecti
 
   /* If we come here the query is both executed and read successfully */
   handle_no_error(command);
+  revert_properties();
 
 end:
 
@@ -7502,8 +7511,6 @@ void handle_no_error(struct st_command *
     die("query '%s' succeeded - should have failed with sqlstate %s...",
         command->query, command->expected_errors.err[0].code.sqlstate);
   }
-
-  revert_properties();
   DBUG_VOID_RETURN;
 }
 
@@ -7534,9 +7541,6 @@ void run_query_stmt(MYSQL *mysql, struct
   DBUG_ENTER("run_query_stmt");
   DBUG_PRINT("query", ("'%-.60s'", query));
 
-  /* Remember disable_result_log since handle_no_error() may reset it */
-  my_bool dis_res= disable_result_log;
-  
   /*
     Init a new stmt if it's not already one created for this connection
   */
@@ -7632,7 +7636,7 @@ void run_query_stmt(MYSQL *mysql, struct
 
   /* If we got here the statement was both executed and read successfully */
   handle_no_error(command);
-  if (!dis_res)
+  if (!disable_result_log)
   {
     /*
       Not all statements creates a result set. If there is one we can
@@ -7708,7 +7712,7 @@ end:
     dynstr_free(&ds_prepare_warnings);
     dynstr_free(&ds_execute_warnings);
   }
-
+  revert_properties();
 
   /* Close the statement if - no reconnect, need new prepare */
   if (mysql->reconnect)

=== modified file 'cmake/create_initial_db.cmake.in'
--- a/cmake/create_initial_db.cmake.in	2011-07-25 15:13:06 +0000
+++ b/cmake/create_initial_db.cmake.in	2011-09-30 15:56:18 +0000
@@ -59,7 +59,6 @@ SET(BOOTSTRAP_COMMAND 
   --datadir=.
   --default-storage-engine=MyISAM
   --default-tmp-storage-engine=MyISAM
-  --loose-skip-innodb
   --loose-skip-ndbcluster
   --max_allowed_packet=8M
   --net_buffer_length=16K

=== modified file 'libmysql/authentication_win/handshake_client.cc'
--- a/libmysql/authentication_win/handshake_client.cc	2011-09-21 11:01:41 +0000
+++ b/libmysql/authentication_win/handshake_client.cc	2011-09-30 12:27:08 +0000
@@ -161,6 +161,21 @@ int Handshake_client::write_packet(Blob 
       keep all the data.
     */
     unsigned block_count= data.len()/512 + ((data.len() % 512) ? 1 : 0);
+
+#if !defined(DBUG_OFF) && defined(WINAUTH_USE_DBUG_LIB)
+
+    /*
+      For testing purposes, use wrong block count to see how server
+      handles this.
+    */
+    DBUG_EXECUTE_IF("winauth_first_packet_test",{
+      block_count= data.len() == 601 ? 0 :
+                   data.len() == 602 ? 1 : 
+                   block_count;
+    });
+
+#endif
+
     DBUG_ASSERT(block_count < (unsigned)0x100);
     saved_byte= data[254];
     data[254] = block_count;

=== removed file 'mysql-test/extra/rpl_tests/rpl_insert_duplicate.test'
--- a/mysql-test/extra/rpl_tests/rpl_insert_duplicate.test	2011-01-31 13:11:05 +0000
+++ b/mysql-test/extra/rpl_tests/rpl_insert_duplicate.test	1970-01-01 00:00:00 +0000
@@ -1,59 +0,0 @@
-# BUG#59338 Inconsistency in binlog for statements that don't change any rows STATEMENT SBR
-# In SBR, if a statement does not fail, it is always written to the binary log,
-# regardless if rows are changed or not. If there is a failure, a statement is
-# only written to the binary log if a non-transactional (.e.g. MyIsam) engine
-# is updated. INSERT ON DUPLICATE KEY UPDATE was not following the rule above
-# and was not written to the binary log, if then engine was Innodb.
-#
-# In this test case, we check if INSERT ON DUPLICATE KEY UPDATE that does not
-# change anything is still written to the binary log.
-
-# Prepare environment
---connection master
-
-eval CREATE TABLE t1 (
- a INT UNSIGNED NOT NULL PRIMARY KEY
-) ENGINE=$engine_type;
-
-eval CREATE TABLE t2 (
- a INT UNSIGNED
-) ENGINE=$engine_type;
-
-INSERT INTO t1 VALUES (1);
-INSERT INTO t2 VALUES (1);
-
-# An insert duplicate that does not update anything must be written to the binary
-# log in SBR and MIXED modes. We check this property by summing a before and after
-# the update and comparing the binlog positions. The sum should be the same at both
-# points and the statement should be in the binary log.
---let $binlog_file= query_get_value("SHOW MASTER STATUS", File, 1)
---let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1)
---let $statement_file=INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a
---eval $statement_file
-
---let $assert_cond= SUM(a) = 1 FROM t1
---let $assert_text= Sum of elements in t1 should be 1.
---source include/assert.inc
-
-if (`SELECT @@BINLOG_FORMAT = 'ROW'`)
-{
-  --let $binlog_position_cmp= =
-  --let $assert_cond= [SHOW MASTER STATUS, Position, 1] $binlog_position_cmp $binlog_start
-  --let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
-}
-if (`SELECT @@BINLOG_FORMAT != 'ROW'`)
-{
-  --let $assert_cond= \'[\'SHOW BINLOG EVENTS IN "$binlog_file" FROM $binlog_start LIMIT 1, 1\', Info, 1]\' LIKE \'%$statement_file\'
-  --let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
-}
---source include/assert.inc
-
-# Compare master and slave
---sync_slave_with_master
---let $diff_tables= master:test.t1 , slave:test.t1
---source include/diff_tables.inc
-
-# Clean up
---connection master
-drop table t1, t2;
---sync_slave_with_master

=== modified file 'mysql-test/extra/rpl_tests/rpl_insert_id.test'
--- a/mysql-test/extra/rpl_tests/rpl_insert_id.test	2010-12-19 17:15:12 +0000
+++ b/mysql-test/extra/rpl_tests/rpl_insert_id.test	2011-09-29 09:17:27 +0000
@@ -77,6 +77,7 @@ eval create table t2(b int auto_incremen
 insert into t1 values (10);
 insert into t1 values (null),(null),(null);
 insert into t2 values (5,0);
+--disable_warnings ONCE
 insert into t2 (c) select * from t1 ORDER BY a;
 select * from t2 ORDER BY b;
 sync_slave_with_master;
@@ -113,8 +114,10 @@ set @@session.sql_auto_is_null=1;
 eval create table t1(a int auto_increment, key(a)) engine=$engine_type;
 eval create table t2(a int) engine=$engine_type;
 insert into t1 (a) values (null);
+--disable_warnings
 insert into t2 (a) select a from t1 where a is null;
 insert into t2 (a) select a from t1 where a is null;
+--enable_warnings
 select * from t2;
 sync_slave_with_master;
 connection slave;
@@ -172,17 +175,15 @@ begin
 end|
 delimiter ;|
 
---disable_warnings
+--disable_warnings ONCE
 insert into t1 (last_id) values (0);
---enable_warnings
 
 drop trigger t1_bi;
 
 # Check that nested call doesn't affect outer context.
 select last_insert_id();
---disable_warnings
+--disable_warnings ONCE
 select bug15728_insert();
---enable_warnings
 select last_insert_id();
 insert into t1 (last_id) values (bug15728());
 # This should be exactly one greater than in the previous call.
@@ -190,9 +191,8 @@ select last_insert_id();
 
 # BUG#20339 - stored procedure using LAST_INSERT_ID() does not
 # replicate statement-based
---disable_warnings
+--disable_warnings ONCE
 drop procedure if exists foo;
---enable_warnings
 delimiter |;
 create procedure foo()
 begin
@@ -252,6 +252,7 @@ select * from t1 order by n;
 # table's counter, the counter for next row is bigger than the
 # after-value of the updated row.
 connection master;
+--disable_warnings ONCE
 insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
 select * from t1 order by n;
 sync_slave_with_master;
@@ -270,6 +271,7 @@ delete from t1 where b <> 100;
 select * from t1 order by n;
 
 connection master;
+--disable_warnings ONCE
 insert into t1 values(null,100),(null,350) on duplicate key update n=2;
 select * from t1 order by n;
 sync_slave_with_master;
@@ -287,6 +289,7 @@ connection master;
 # testcase with INSERT VALUES
 eval CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b INT,
                       UNIQUE(b)) ENGINE=$engine_type;
+--disable_warnings ONCE
 INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10;
 SELECT * FROM t1 ORDER BY a;
 sync_slave_with_master;
@@ -314,19 +317,23 @@ INSERT INTO t2 (field_a, field_b, field_
 INSERT INTO t2 (field_a, field_b, field_c) VALUES (4, 'd', '4d');
 INSERT INTO t2 (field_a, field_b, field_c) VALUES (5, 'e', '5e');
 # Updating table t1 based on values from table t2
+--disable_warnings
 INSERT INTO t1 (field_1, field_2, field_3)
 SELECT t2.field_a, t2.field_b, t2.field_c
 FROM t2
 ON DUPLICATE KEY UPDATE
 t1.field_3 = t2.field_c;
+--enable_warnings
 # Inserting new record into t2
 INSERT INTO t2 (field_a, field_b, field_c) VALUES (6, 'f', '6f');
 # Updating t1 again
+--disable_warnings
 INSERT INTO t1 (field_1, field_2, field_3)
 SELECT t2.field_a, t2.field_b, t2.field_c
 FROM t2
 ON DUPLICATE KEY UPDATE
 t1.field_3 = t2.field_c;
+--enable_warnings
 SELECT * FROM t1 ORDER BY id;
 sync_slave_with_master;
 SELECT * FROM t1 ORDER BY id;
@@ -433,9 +440,8 @@ delimiter ;|
 
 INSERT INTO t1 VALUES (NULL, -1);
 CALL p1();
---disable_warnings
+--disable_warnings ONCE
 SELECT f1();
---enable_warnings
 INSERT INTO t1 VALUES (NULL, f2()), (NULL, LAST_INSERT_ID()),
                       (NULL, LAST_INSERT_ID()), (NULL, f2()), (NULL, f2());
 INSERT INTO t1 VALUES (NULL, f2());
@@ -504,16 +510,14 @@ insert into t2 (id) values(1),(2),(3);
 delete from t2;
 set sql_log_bin=1;
 #inside SELECT, then inside INSERT
---disable_warnings
+--disable_warnings ONCE
 select insid();
---enable_warnings
 set sql_log_bin=0;
 insert into t2 (id) values(5),(6),(7);
 delete from t2 where id>=5;
 set sql_log_bin=1;
---disable_warnings
+--disable_warnings ONCE
 insert into t1 select insid();
---enable_warnings
 select * from t1 order by id;
 select * from t2 order by id;
 
@@ -537,6 +541,7 @@ begin
   insert into t2 values(null,3);
 end|
 delimiter ;|
+--disable_warnings ONCE
 call foo();
 select * from t1 order by n;
 select * from t2 order by id;

=== modified file 'mysql-test/extra/rpl_tests/rpl_insert_ignore.test'
--- a/mysql-test/extra/rpl_tests/rpl_insert_ignore.test	2011-01-31 13:11:05 +0000
+++ b/mysql-test/extra/rpl_tests/rpl_insert_ignore.test	2011-09-29 09:17:27 +0000
@@ -31,7 +31,7 @@ INSERT INTO t2 VALUES (3, 5);
 INSERT INTO t2 VALUES (4, 3);
 INSERT INTO t2 VALUES (5, 4);
 INSERT INTO t2 VALUES (6, 6);
-
+--disable_warnings ONCE
 INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
 --let $assert_cond= COUNT(*) = 6 FROM t1
 --let $assert_text= Count of elements in t1 should be 6.
@@ -51,25 +51,70 @@ INSERT IGNORE INTO t1 SELECT NULL, t2.b 
 --let $binlog_file= query_get_value("SHOW MASTER STATUS", File, 1)
 --let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1)
 --let $statement_file=INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a
+--disable_warnings ONCE
 --eval $statement_file
 
 --let $assert_cond= COUNT(*) = 6 FROM t1
 --let $assert_text= Count of elements in t1 should be 6.
 --source include/assert.inc
 
-if (`SELECT @@BINLOG_FORMAT = 'ROW'`)
+if (`SELECT @@BINLOG_FORMAT != 'STATEMENT'`)
 {
   --let $binlog_position_cmp= =
   --let $assert_cond= [SHOW MASTER STATUS, Position, 1] $binlog_position_cmp $binlog_start
   --let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
 }
-if (`SELECT @@BINLOG_FORMAT != 'ROW'`)
+if (`SELECT @@BINLOG_FORMAT = 'STATEMENT'`)
 {
   --let $assert_cond= \'[\'SHOW BINLOG EVENTS IN "$binlog_file" FROM $binlog_start LIMIT 2, 1\', Info, 1]\' LIKE \'%$statement_file\'
   --let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
 }
+
 --source include/assert.inc
 
+# An insert duplicate that does not update anything must be written to the binary
+# log in SBR and MIXED modes. We check this property by summing a before and after
+# the update and comparing the binlog positions. The sum should be the same at both
+# points and the statement should be in the binary log.
+--disable_warnings
+DROP TABLE t1;
+DROP TABLE t2;
+--enable_warnings
+eval CREATE TABLE t1 (
+ a INT UNSIGNED NOT NULL PRIMARY KEY
+) ENGINE=$engine_type;
+
+eval CREATE TABLE t2 (
+ a INT UNSIGNED
+) ENGINE=$engine_type;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (1);
+
+--let $binlog_file= query_get_value("SHOW MASTER STATUS", File, 1)
+--let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1)
+--let $statement_file=INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a
+--disable_warnings ONCE
+--eval $statement_file
+
+--let $assert_cond= SUM(a) = 1 FROM t1
+--let $assert_text= Sum of elements in t1 should be 1.
+--source include/assert.inc
+
+if (`SELECT @@BINLOG_FORMAT != 'STATEMENT'`)
+{
+  --let $binlog_position_cmp= =
+  --let $assert_cond= [SHOW MASTER STATUS, Position, 1] $binlog_position_cmp $binlog_start
+  --let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
+}
+if (`SELECT @@BINLOG_FORMAT = 'STATEMENT'`)
+{
+  --let $assert_cond= \'[\'SHOW BINLOG EVENTS IN "$binlog_file" FROM $binlog_start LIMIT 1, 1\', Info, 1]\' LIKE \'%$statement_file\'
+  --let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
+}
+--source include/assert.inc
+
+
 # Clean up
 --connection master
 drop table t1, t2;

=== modified file 'mysql-test/include/commit.inc'
--- a/mysql-test/include/commit.inc	2011-01-31 13:44:38 +0000
+++ b/mysql-test/include/commit.inc	2011-10-03 10:45:54 +0000
@@ -521,10 +521,34 @@ commit;
 call p_verify_status_increment(2, 2, 2, 2);
 --echo # 15. Read-write statement: UPDATE IGNORE, change 0 rows. 
 --echo #
+--disable_warnings
 update ignore t1 set a=2 where a=1;
-call p_verify_status_increment(2, 2, 1, 0);
-commit;
-call p_verify_status_increment(2, 2, 1, 0);
+--enable_warnings
+if (`select @@binlog_format = 'STATEMENT'`)
+{
+  --disable_query_log
+  call p_verify_status_increment(2, 2, 1, 0);
+  --enable_query_log
+}
+if (`select @@binlog_format != 'STATEMENT'`)
+{
+  --disable_query_log
+  call p_verify_status_increment(1, 0, 1, 0);
+  --enable_query_log
+}
+commit;
+if (`select @@binlog_format = 'STATEMENT'`)
+{
+  --disable_query_log
+  call p_verify_status_increment(2, 2, 1, 0);
+  --enable_query_log
+}
+if (`select @@binlog_format != 'STATEMENT'`)
+{
+  --disable_query_log
+  call p_verify_status_increment(1, 0, 1, 0);
+  --enable_query_log
+}
 --echo #
 --echo # Create a stored function that modifies a
 --echo # non-transactional table. Demonstrate that changes in
@@ -603,7 +627,9 @@ call p_verify_status_increment(2, 0, 1, 
 
 --echo # 21. Read-write statement: UPDATE, change 0 (transactional) rows. 
 --echo #
+--disable_warnings
 update t1 set a=2 where a=f1()+10;
+--enable_warnings
 call p_verify_status_increment(2, 0, 1, 0);
 commit;
 call p_verify_status_increment(2, 0, 1, 0);
@@ -703,7 +729,9 @@ call p_verify_status_increment(4, 4, 4, 
 --echo # 
 insert into t2 select a from t1;
 commit;
+--disable_warnings
 replace into t2 select a from t1;
+--enable_warnings
 commit;
 call p_verify_status_increment(8, 8, 8, 8);
 #

=== modified file 'mysql-test/include/subquery.inc'
--- a/mysql-test/include/subquery.inc	2011-08-26 13:26:33 +0000
+++ b/mysql-test/include/subquery.inc	2011-09-29 12:47:32 +0000
@@ -5561,4 +5561,85 @@ WHERE t1.k < ALL(
 DROP TABLE t1, t2, t3; 
 
 --echo #
+--echo # Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+--echo #               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+--echo #
+CREATE TABLE ot (
+  col_int_nokey int(11), 
+  col_varchar_nokey varchar(1)
+) ;
+
+INSERT INTO ot VALUES (1,'x');
+
+CREATE TABLE it (
+  col_int_key int(11), 
+  col_varchar_key varchar(1), 
+  KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+--echo
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+   (SELECT col_varchar_key
+    FROM it 
+    WHERE col_int_key IS NULL);
+
+--echo
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+   (SELECT col_varchar_key
+    FROM it 
+    WHERE col_int_key IS NULL);
+
+--echo
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+   (SELECT col_varchar_key
+    FROM it 
+    WHERE coalesce(col_int_nokey, 1) );
+
+--echo
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+   (SELECT col_varchar_key
+    FROM it 
+    WHERE coalesce(col_int_nokey, 1) );
+
+DROP TABLE it;
+
+CREATE TABLE it (
+   col_int_key int(11),
+   col_varchar_key varchar(1),
+   col_varchar_key2 varchar(1),
+   KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+   KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+    (SELECT col_varchar_key, col_varchar_key2
+     FROM it
+     WHERE col_int_key IS NULL);
+
+--echo
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+    (SELECT col_varchar_key, col_varchar_key2
+     FROM it
+     WHERE col_int_key IS NULL);
 
+--echo
+DROP TABLE it, ot;

=== modified file 'mysql-test/r/commit_1innodb.result'
--- a/mysql-test/r/commit_1innodb.result	2011-01-31 13:44:38 +0000
+++ b/mysql-test/r/commit_1innodb.result	2011-10-03 10:45:54 +0000
@@ -549,11 +549,9 @@ SUCCESS
 # 15. Read-write statement: UPDATE IGNORE, change 0 rows. 
 #
 update ignore t1 set a=2 where a=1;
-call p_verify_status_increment(2, 2, 1, 0);
 SUCCESS
 
 commit;
-call p_verify_status_increment(2, 2, 1, 0);
 SUCCESS
 
 #

=== modified file 'mysql-test/r/group_by.result'
--- a/mysql-test/r/group_by.result	2011-10-04 05:55:38 +0000
+++ b/mysql-test/r/group_by.result	2011-10-05 08:04:47 +0000
@@ -1676,7 +1676,7 @@ c	(SELECT a FROM t1 WHERE b = c)
 SELECT b c, (SELECT a FROM t1 WHERE b = c)
 FROM t1 
 HAVING b = 10;
-ERROR 42000: non-grouping field 'b' is used in HAVING clause
+ERROR 42000: Non-grouping field 'b' is used in HAVING clause
 SELECT MAX(b) c, (SELECT a FROM t1 WHERE b = c)
 FROM t1 
 HAVING b = 10;

=== modified file 'mysql-test/r/having.result'
--- a/mysql-test/r/having.result	2011-07-19 15:11:15 +0000
+++ b/mysql-test/r/having.result	2011-09-29 12:55:06 +0000
@@ -419,7 +419,7 @@ select f1 from t1 group by f1 having max
 f1
 set session sql_mode='ONLY_FULL_GROUP_BY';
 select f1 from t1 having max(f1)=f1;
-ERROR 42000: non-grouping field 'f1' is used in HAVING clause
+ERROR 42000: Non-grouping field 'f1' is used in HAVING clause
 select f1 from t1 group by f1 having max(f1)=f1;
 f1
 set session sql_mode='';

=== modified file 'mysql-test/r/join_outer.result'
--- a/mysql-test/r/join_outer.result	2011-08-29 11:57:44 +0000
+++ b/mysql-test/r/join_outer.result	2011-09-30 12:20:04 +0000
@@ -1740,3 +1740,40 @@ execute prep_stmt_9846;
 field1
 deallocate prepare prep_stmt_9846;
 drop table t1;
+#
+# Bug#13040136 - ASSERT IN PLAN_CHANGE_WATCHDOG::~PLAN_CHANGE_WATCHDOG()
+#
+CREATE TABLE t1 (
+col_varchar_10 VARCHAR(10),
+col_int_key INTEGER,
+col_varchar_10_key VARCHAR(10),
+pk INTEGER NOT NULL,
+PRIMARY KEY (pk),
+KEY (col_int_key),
+KEY (col_varchar_10_key)
+);
+INSERT INTO t1 VALUES ('q',NULL,'o',1);
+CREATE TABLE t2 (
+pk INTEGER NOT NULL AUTO_INCREMENT,
+col_varchar_10_key VARCHAR(10),
+col_int_key INTEGER,
+col_varchar_10 VARCHAR(10),
+PRIMARY KEY (pk),
+KEY (col_varchar_10_key),
+KEY col_int_key (col_int_key)
+);
+INSERT INTO t2 VALUES
+(1,'r',NULL,'would'),(2,'tell',-655032320,'t'),
+(3,'d',9,'a'),(4,'gvafasdkiy',6,'ugvafasdki'),
+(5,'that\'s',NULL,'she'),(6,'bwftwugvaf',7,'cbwftwugva'),
+(7,'f',-700055552,'mkacbwftwu'),(8,'a',9,'be'),
+(9,'d',NULL,'u'),(10,'ckiixcsxmk',NULL,'o');
+SELECT DISTINCT t2.col_int_key 
+FROM
+t1
+LEFT JOIN t2
+ON t1.col_varchar_10 = t2.col_varchar_10_key 
+WHERE t2.pk
+ORDER BY t2.col_int_key;
+col_int_key
+DROP TABLE t1,t2;

=== modified file 'mysql-test/r/join_outer_bka.result'
--- a/mysql-test/r/join_outer_bka.result	2011-08-18 09:21:45 +0000
+++ b/mysql-test/r/join_outer_bka.result	2011-09-30 12:20:04 +0000
@@ -1741,4 +1741,41 @@ execute prep_stmt_9846;
 field1
 deallocate prepare prep_stmt_9846;
 drop table t1;
+#
+# Bug#13040136 - ASSERT IN PLAN_CHANGE_WATCHDOG::~PLAN_CHANGE_WATCHDOG()
+#
+CREATE TABLE t1 (
+col_varchar_10 VARCHAR(10),
+col_int_key INTEGER,
+col_varchar_10_key VARCHAR(10),
+pk INTEGER NOT NULL,
+PRIMARY KEY (pk),
+KEY (col_int_key),
+KEY (col_varchar_10_key)
+);
+INSERT INTO t1 VALUES ('q',NULL,'o',1);
+CREATE TABLE t2 (
+pk INTEGER NOT NULL AUTO_INCREMENT,
+col_varchar_10_key VARCHAR(10),
+col_int_key INTEGER,
+col_varchar_10 VARCHAR(10),
+PRIMARY KEY (pk),
+KEY (col_varchar_10_key),
+KEY col_int_key (col_int_key)
+);
+INSERT INTO t2 VALUES
+(1,'r',NULL,'would'),(2,'tell',-655032320,'t'),
+(3,'d',9,'a'),(4,'gvafasdkiy',6,'ugvafasdki'),
+(5,'that\'s',NULL,'she'),(6,'bwftwugvaf',7,'cbwftwugva'),
+(7,'f',-700055552,'mkacbwftwu'),(8,'a',9,'be'),
+(9,'d',NULL,'u'),(10,'ckiixcsxmk',NULL,'o');
+SELECT DISTINCT t2.col_int_key 
+FROM
+t1
+LEFT JOIN t2
+ON t1.col_varchar_10 = t2.col_varchar_10_key 
+WHERE t2.pk
+ORDER BY t2.col_int_key;
+col_int_key
+DROP TABLE t1,t2;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/join_outer_bka_nixbnl.result'
--- a/mysql-test/r/join_outer_bka_nixbnl.result	2011-10-04 06:35:35 +0000
+++ b/mysql-test/r/join_outer_bka_nixbnl.result	2011-10-04 14:05:31 +0000
@@ -1741,4 +1741,41 @@ execute prep_stmt_9846;
 field1
 deallocate prepare prep_stmt_9846;
 drop table t1;
+#
+# Bug#13040136 - ASSERT IN PLAN_CHANGE_WATCHDOG::~PLAN_CHANGE_WATCHDOG()
+#
+CREATE TABLE t1 (
+col_varchar_10 VARCHAR(10),
+col_int_key INTEGER,
+col_varchar_10_key VARCHAR(10),
+pk INTEGER NOT NULL,
+PRIMARY KEY (pk),
+KEY (col_int_key),
+KEY (col_varchar_10_key)
+);
+INSERT INTO t1 VALUES ('q',NULL,'o',1);
+CREATE TABLE t2 (
+pk INTEGER NOT NULL AUTO_INCREMENT,
+col_varchar_10_key VARCHAR(10),
+col_int_key INTEGER,
+col_varchar_10 VARCHAR(10),
+PRIMARY KEY (pk),
+KEY (col_varchar_10_key),
+KEY col_int_key (col_int_key)
+);
+INSERT INTO t2 VALUES
+(1,'r',NULL,'would'),(2,'tell',-655032320,'t'),
+(3,'d',9,'a'),(4,'gvafasdkiy',6,'ugvafasdki'),
+(5,'that\'s',NULL,'she'),(6,'bwftwugvaf',7,'cbwftwugva'),
+(7,'f',-700055552,'mkacbwftwu'),(8,'a',9,'be'),
+(9,'d',NULL,'u'),(10,'ckiixcsxmk',NULL,'o');
+SELECT DISTINCT t2.col_int_key 
+FROM
+t1
+LEFT JOIN t2
+ON t1.col_varchar_10 = t2.col_varchar_10_key 
+WHERE t2.pk
+ORDER BY t2.col_int_key;
+col_int_key
+DROP TABLE t1,t2;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/partition.result'
--- a/mysql-test/r/partition.result	2011-07-19 15:11:15 +0000
+++ b/mysql-test/r/partition.result	2011-10-03 12:22:21 +0000
@@ -2422,3 +2422,10 @@ alter table t1 add column j int;
 unlock tables;
 drop table t1;
 # End of 5.5 tests
+CREATE TABLE t1(a INT PRIMARY KEY) PARTITION BY LINEAR KEY (a);
+CREATE ALGORITHM=TEMPTABLE VIEW vtmp AS
+SELECT 1 FROM t1 AS t1_0 JOIN t1 ON t1_0.a LIKE (SELECT 1 FROM t1);
+SELECT * FROM vtmp;
+1
+DROP VIEW vtmp;
+DROP TABLE t1;

=== modified file 'mysql-test/r/subquery_nomat_nosj.result'
--- a/mysql-test/r/subquery_nomat_nosj.result	2011-09-13 07:22:49 +0000
+++ b/mysql-test/r/subquery_nomat_nosj.result	2011-09-29 12:47:32 +0000
@@ -6733,4 +6733,95 @@ COUNT(*)
 6
 DROP TABLE t1, t2, t3;
 #
+# Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+#               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+#
+CREATE TABLE ot (
+col_int_nokey int(11), 
+col_varchar_nokey varchar(1)
+) ;
+INSERT INTO ot VALUES (1,'x');
+CREATE TABLE it (
+col_int_key int(11), 
+col_varchar_key varchar(1), 
+KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	9	func,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`)))))
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	4	func	1	100.00	Using index
+Warnings:
+Note	1276	Field or reference 'test.ot.col_int_nokey' of SELECT #2 was resolved in SELECT #1
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik)))
+DROP TABLE it;
+CREATE TABLE it (
+col_int_key int(11),
+col_varchar_key varchar(1),
+col_varchar_key2 varchar(1),
+KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cvk2_cik,idx_cvk_cik	idx_cvk_cvk2_cik	13	func,const,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>(('x','x'),<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cvk2_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key2`)))))
+
+DROP TABLE it, ot;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/subquery_nomat_nosj_bka.result'
--- a/mysql-test/r/subquery_nomat_nosj_bka.result	2011-08-26 13:26:33 +0000
+++ b/mysql-test/r/subquery_nomat_nosj_bka.result	2011-09-29 12:47:32 +0000
@@ -6734,5 +6734,96 @@ COUNT(*)
 6
 DROP TABLE t1, t2, t3;
 #
+# Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+#               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+#
+CREATE TABLE ot (
+col_int_nokey int(11), 
+col_varchar_nokey varchar(1)
+) ;
+INSERT INTO ot VALUES (1,'x');
+CREATE TABLE it (
+col_int_key int(11), 
+col_varchar_key varchar(1), 
+KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	9	func,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`)))))
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	4	func	1	100.00	Using index
+Warnings:
+Note	1276	Field or reference 'test.ot.col_int_nokey' of SELECT #2 was resolved in SELECT #1
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik)))
+DROP TABLE it;
+CREATE TABLE it (
+col_int_key int(11),
+col_varchar_key varchar(1),
+col_varchar_key2 varchar(1),
+KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cvk2_cik,idx_cvk_cik	idx_cvk_cvk2_cik	13	func,const,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>(('x','x'),<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cvk2_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key2`)))))
+
+DROP TABLE it, ot;
 set optimizer_switch=default;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/subquery_nomat_nosj_bka_nixbnl.result'
--- a/mysql-test/r/subquery_nomat_nosj_bka_nixbnl.result	2011-10-04 06:35:35 +0000
+++ b/mysql-test/r/subquery_nomat_nosj_bka_nixbnl.result	2011-10-04 14:05:31 +0000
@@ -6734,5 +6734,96 @@ COUNT(*)
 6
 DROP TABLE t1, t2, t3;
 #
+# Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+#               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+#
+CREATE TABLE ot (
+col_int_nokey int(11), 
+col_varchar_nokey varchar(1)
+) ;
+INSERT INTO ot VALUES (1,'x');
+CREATE TABLE it (
+col_int_key int(11), 
+col_varchar_key varchar(1), 
+KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	9	func,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`)))))
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	4	func	1	100.00	Using index
+Warnings:
+Note	1276	Field or reference 'test.ot.col_int_nokey' of SELECT #2 was resolved in SELECT #1
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik)))
+DROP TABLE it;
+CREATE TABLE it (
+col_int_key int(11),
+col_varchar_key varchar(1),
+col_varchar_key2 varchar(1),
+KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cvk2_cik,idx_cvk_cik	idx_cvk_cvk2_cik	13	func,const,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>(('x','x'),<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cvk2_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key2`)))))
+
+DROP TABLE it, ot;
 set optimizer_switch=default;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/subquery_none.result'
--- a/mysql-test/r/subquery_none.result	2011-09-13 07:22:49 +0000
+++ b/mysql-test/r/subquery_none.result	2011-09-29 12:47:32 +0000
@@ -6732,4 +6732,95 @@ COUNT(*)
 6
 DROP TABLE t1, t2, t3;
 #
+# Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+#               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+#
+CREATE TABLE ot (
+col_int_nokey int(11), 
+col_varchar_nokey varchar(1)
+) ;
+INSERT INTO ot VALUES (1,'x');
+CREATE TABLE it (
+col_int_key int(11), 
+col_varchar_key varchar(1), 
+KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	9	func,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`)))))
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	4	func	1	100.00	Using index
+Warnings:
+Note	1276	Field or reference 'test.ot.col_int_nokey' of SELECT #2 was resolved in SELECT #1
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik)))
+DROP TABLE it;
+CREATE TABLE it (
+col_int_key int(11),
+col_varchar_key varchar(1),
+col_varchar_key2 varchar(1),
+KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cvk2_cik,idx_cvk_cik	idx_cvk_cvk2_cik	13	func,const,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>(('x','x'),<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cvk2_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key2`)))))
+
+DROP TABLE it, ot;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/subquery_none_bka.result'
--- a/mysql-test/r/subquery_none_bka.result	2011-08-26 13:26:33 +0000
+++ b/mysql-test/r/subquery_none_bka.result	2011-09-29 12:47:32 +0000
@@ -6733,5 +6733,96 @@ COUNT(*)
 6
 DROP TABLE t1, t2, t3;
 #
+# Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+#               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+#
+CREATE TABLE ot (
+col_int_nokey int(11), 
+col_varchar_nokey varchar(1)
+) ;
+INSERT INTO ot VALUES (1,'x');
+CREATE TABLE it (
+col_int_key int(11), 
+col_varchar_key varchar(1), 
+KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	9	func,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`)))))
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	4	func	1	100.00	Using index
+Warnings:
+Note	1276	Field or reference 'test.ot.col_int_nokey' of SELECT #2 was resolved in SELECT #1
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik)))
+DROP TABLE it;
+CREATE TABLE it (
+col_int_key int(11),
+col_varchar_key varchar(1),
+col_varchar_key2 varchar(1),
+KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cvk2_cik,idx_cvk_cik	idx_cvk_cvk2_cik	13	func,const,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>(('x','x'),<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cvk2_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key2`)))))
+
+DROP TABLE it, ot;
 set optimizer_switch=default;
 set optimizer_switch=default;

=== modified file 'mysql-test/r/subquery_none_bka_nixbnl.result'
--- a/mysql-test/r/subquery_none_bka_nixbnl.result	2011-10-04 06:35:35 +0000
+++ b/mysql-test/r/subquery_none_bka_nixbnl.result	2011-10-04 14:05:31 +0000
@@ -6733,5 +6733,96 @@ COUNT(*)
 6
 DROP TABLE t1, t2, t3;
 #
+# Bug#12838171: 51VS56: TRANSFORMED IN()+SUBQ QUERY 
+#               PRODUCES EMPTY RESULT ON 5.6, 1 ROW ON 5.1
+#
+CREATE TABLE ot (
+col_int_nokey int(11), 
+col_varchar_nokey varchar(1)
+) ;
+INSERT INTO ot VALUES (1,'x');
+CREATE TABLE it (
+col_int_key int(11), 
+col_varchar_key varchar(1), 
+KEY idx_cvk_cik (col_varchar_key,col_int_key)
+) ;
+INSERT INTO it VALUES (NULL,'x'), (NULL,'f');
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	9	func,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`)))))
+
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey 
+FROM ot 
+WHERE col_varchar_nokey IN
+(SELECT col_varchar_key
+FROM it 
+WHERE coalesce(col_int_nokey, 1) );
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cik	idx_cvk_cik	4	func	1	100.00	Using index
+Warnings:
+Note	1276	Field or reference 'test.ot.col_int_nokey' of SELECT #2 was resolved in SELECT #1
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>('x',<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cik)))
+DROP TABLE it;
+CREATE TABLE it (
+col_int_key int(11),
+col_varchar_key varchar(1),
+col_varchar_key2 varchar(1),
+KEY idx_cvk_cvk2_cik (col_varchar_key, col_varchar_key2, col_int_key),
+KEY idx_cvk_cik (col_varchar_key, col_int_key)
+);
+INSERT INTO it VALUES (NULL,'x','x'), (NULL,'f','f');
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+col_int_nokey
+1
+
+EXPLAIN EXTENDED
+SELECT col_int_nokey
+FROM ot
+WHERE (col_varchar_nokey, 'x') IN
+(SELECT col_varchar_key, col_varchar_key2
+FROM it
+WHERE col_int_key IS NULL);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	ot	system	NULL	NULL	NULL	NULL	1	100.00	
+2	DEPENDENT SUBQUERY	it	index_subquery	idx_cvk_cvk2_cik,idx_cvk_cik	idx_cvk_cvk2_cik	13	func,const,const	2	100.00	Using index; Using where
+Warnings:
+Note	1003	/* select#1 */ select '1' AS `col_int_nokey` from dual where <in_optimizer>(('x','x'),<exists>(<index_lookup>(<cache>('x') in it on idx_cvk_cvk2_cik where (isnull(`test`.`it`.`col_int_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key`) and (<cache>('x') = `test`.`it`.`col_varchar_key2`)))))
+
+DROP TABLE it, ot;
 set optimizer_switch=default;
 set optimizer_switch=default;

=== modified file 'mysql-test/suite/binlog/r/binlog_stm_blackhole.result'
--- a/mysql-test/suite/binlog/r/binlog_stm_blackhole.result	2011-07-19 15:11:15 +0000
+++ b/mysql-test/suite/binlog/r/binlog_stm_blackhole.result	2011-09-29 10:42:53 +0000
@@ -99,6 +99,8 @@ alter table t1 drop b;
 create table t3 like t1;
 insert into t1 select * from t3;
 replace into t1 select * from t3;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
 select * from t1;
 a
 select * from t2;

=== modified file 'mysql-test/suite/binlog/r/binlog_unsafe.result'
--- a/mysql-test/suite/binlog/r/binlog_unsafe.result	2010-12-23 11:41:50 +0000
+++ b/mysql-test/suite/binlog/r/binlog_unsafe.result	2011-09-29 10:42:53 +0000
@@ -2352,6 +2352,7 @@ Note	1592	Unsafe statement written to th
 REPLACE INTO t1 SELECT * FROM t1 LIMIT 1;
 Warnings:
 Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
 UPDATE t1 SET a=1 LIMIT 1;
 Warnings:
 Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.
@@ -2368,6 +2369,7 @@ END|
 CALL p1();
 Warnings:
 Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
 DROP PROCEDURE p1;
 DROP TABLE t1;
 DROP TABLE IF EXISTS t1;
@@ -2651,4 +2653,40 @@ a
 13:46:40
 1970-01-12 13:46:40
 DROP TABLE t1;
+CREATE TABLE filler_table (a INT, b INT);
+INSERT INTO filler_table values (1,1),(1,2);
+CREATE TABLE insert_table (a INT, b INT, PRIMARY KEY(a));
+CREATE TABLE replace_table (a INT, b INT, PRIMARY KEY(a));
+INSERT INTO replace_table values (1,1),(2,2);
+CREATE TABLE update_table (a INT, b INT, PRIMARY KEY(a));
+INSERT INTO update_table values (1,1),(2,2);
+INSERT IGNORE INTO insert_table SELECT * FROM filler_table;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.
+TRUNCATE TABLE insert_table;
+INSERT INTO insert_table SELECT * FROM filler_table ON DUPLICATE KEY UPDATE a = 1;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.
+TRUNCATE TABLE insert_table;
+REPLACE INTO replace_table SELECT * FROM filler_table;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
+UPDATE IGNORE update_table SET a=2;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.
+CREATE TABLE create_ignore_test (a INT, b INT, PRIMARY KEY(b)) IGNORE SELECT * FROM filler_table;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.
+CREATE TABLE create_replace_test (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
+CREATE TEMPORARY TABLE temp1 (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
+DROP TABLE filler_table;
+DROP TABLE insert_table;
+DROP TABLE update_table;
+DROP TABLE replace_table;
+DROP TABLE create_ignore_test;
+DROP TABLE create_replace_test;
 "End of tests"

=== modified file 'mysql-test/suite/binlog/t/binlog_unsafe.test'
--- a/mysql-test/suite/binlog/t/binlog_unsafe.test	2010-12-23 11:41:50 +0000
+++ b/mysql-test/suite/binlog/t/binlog_unsafe.test	2011-09-29 10:42:53 +0000
@@ -12,6 +12,11 @@
 #  - insert into two autoinc columns;
 #  - statements using UDF's.
 #  - statements reading from log tables in the mysql database.
+#  - INSERT ... SELECT ... ON DUPLICATE KEY UPDATE
+#  - REPLACE ... SELECT
+#  - CREATE TABLE [IGNORE/REPLACE] SELECT
+#  - INSERT IGNORE...SELECT
+#  - UPDATE IGNORE
 #
 # Note that statements that use stored functions, stored procedures,
 # triggers, views, or prepared statements that invoke unsafe
@@ -79,6 +84,7 @@
 # BUG#45785: LIMIT in SP does not cause RBL if binlog_format=MIXED
 # BUG#47995: Mark user functions as unsafe
 # BUG#49222: Mark RAND() unsafe
+# BUG#11758262: MARK INSERT...SEL...ON DUP KEY UPD,REPLACE...SEL,CREATE...[IGN|REPL] SEL
 #
 # ==== Related test cases ====
 #
@@ -699,5 +705,47 @@ INSERT INTO t1 VALUES
 SELECT * FROM t1;
 
 DROP TABLE t1;
+#
+#BUG#11758262-50439: MARK INSERT...SEL...ON DUP KEY UPD,REPLACE..
+#The following statement may be unsafe when logged in statement format.
+#INSERT IGNORE...SELECT 
+#INSERT ... SELECT ... ON DUPLICATE KEY UPDATE 
+#REPLACE ... SELECT 
+#UPDATE IGNORE 
+#CREATE TABLE... IGNORE SELECT 
+#CREATE TABLE... REPLACE SELECT
+
+#setup tables
+CREATE TABLE filler_table (a INT, b INT);
+INSERT INTO filler_table values (1,1),(1,2);
+CREATE TABLE insert_table (a INT, b INT, PRIMARY KEY(a));
+CREATE TABLE replace_table (a INT, b INT, PRIMARY KEY(a));
+INSERT INTO replace_table values (1,1),(2,2);
+CREATE TABLE update_table (a INT, b INT, PRIMARY KEY(a));
+INSERT INTO update_table values (1,1),(2,2);
+
+#INSERT IGNORE... SELECT
+INSERT IGNORE INTO insert_table SELECT * FROM filler_table;
+TRUNCATE TABLE insert_table;
+#INSERT ... SELECT ... ON DUPLICATE KEY UPDATE 
+INSERT INTO insert_table SELECT * FROM filler_table ON DUPLICATE KEY UPDATE a = 1;
+TRUNCATE TABLE insert_table;
+#REPLACE...SELECT
+REPLACE INTO replace_table SELECT * FROM filler_table;
+#UPDATE IGNORE
+UPDATE IGNORE update_table SET a=2;
+#CREATE TABLE [IGNORE/REPLACE] SELECT
+CREATE TABLE create_ignore_test (a INT, b INT, PRIMARY KEY(b)) IGNORE SELECT * FROM filler_table;
+CREATE TABLE create_replace_test (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
+#temporary tables should not throw the warning.
+CREATE TEMPORARY TABLE temp1 (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
+
+###clean up
+DROP TABLE filler_table;
+DROP TABLE insert_table;
+DROP TABLE update_table;
+DROP TABLE replace_table;
+DROP TABLE create_ignore_test;
+DROP TABLE create_replace_test;
 
 --echo "End of tests"

=== modified file 'mysql-test/suite/rpl/r/rpl_checksum.result'
--- a/mysql-test/suite/rpl/r/rpl_checksum.result	2011-05-17 22:52:04 +0000
+++ b/mysql-test/suite/rpl/r/rpl_checksum.result	2011-09-30 13:14:37 +0000
@@ -64,7 +64,7 @@ insert into t1 values (1) /* will not be
 set @@global.debug='d,simulate_slave_unaware_checksum';
 start slave;
 include/wait_for_slave_io_error.inc [errno=1236]
-Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'Slave can not handle replication events with the checksum that master is configured to log''
+Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'Slave can not handle replication events with the checksum that master is configured to log; the last event was read from 'master-bin.000010' at 114, the last byte read was read from 'master-bin.000010' at 114.''
 select count(*) as zero from t1;
 zero
 0

=== removed file 'mysql-test/suite/rpl/r/rpl_insert_duplicate.result'
--- a/mysql-test/suite/rpl/r/rpl_insert_duplicate.result	2011-01-28 12:09:15 +0000
+++ b/mysql-test/suite/rpl/r/rpl_insert_duplicate.result	1970-01-01 00:00:00 +0000
@@ -1,29 +0,0 @@
-include/master-slave.inc
-[connection master]
-CREATE TABLE t1 (
-a INT UNSIGNED NOT NULL PRIMARY KEY
-) ENGINE=innodb;
-CREATE TABLE t2 (
-a INT UNSIGNED
-) ENGINE=innodb;
-INSERT INTO t1 VALUES (1);
-INSERT INTO t2 VALUES (1);
-INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
-include/assert.inc [Sum of elements in t1 should be 1.]
-include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
-include/diff_tables.inc [master:test.t1 , slave:test.t1]
-drop table t1, t2;
-CREATE TABLE t1 (
-a INT UNSIGNED NOT NULL PRIMARY KEY
-) ENGINE=myisam;
-CREATE TABLE t2 (
-a INT UNSIGNED
-) ENGINE=myisam;
-INSERT INTO t1 VALUES (1);
-INSERT INTO t2 VALUES (1);
-INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
-include/assert.inc [Sum of elements in t1 should be 1.]
-include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
-include/diff_tables.inc [master:test.t1 , slave:test.t1]
-drop table t1, t2;
-include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl/r/rpl_insert_ignore.result'
--- a/mysql-test/suite/rpl/r/rpl_insert_ignore.result	2011-01-31 14:34:04 +0000
+++ b/mysql-test/suite/rpl/r/rpl_insert_ignore.result	2011-09-29 09:17:27 +0000
@@ -26,6 +26,19 @@ include/diff_tables.inc [master:test.t1 
 INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
 include/assert.inc [Count of elements in t1 should be 6.]
 include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
+DROP TABLE t1;
+DROP TABLE t2;
+CREATE TABLE t1 (
+a INT UNSIGNED NOT NULL PRIMARY KEY
+) ENGINE=innodb;
+CREATE TABLE t2 (
+a INT UNSIGNED
+) ENGINE=innodb;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (1);
+INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
+include/assert.inc [Sum of elements in t1 should be 1.]
+include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
 drop table t1, t2;
 CREATE TABLE t1 (
 a int unsigned not null auto_increment primary key,
@@ -52,5 +65,18 @@ include/diff_tables.inc [master:test.t1 
 INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
 include/assert.inc [Count of elements in t1 should be 6.]
 include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
+DROP TABLE t1;
+DROP TABLE t2;
+CREATE TABLE t1 (
+a INT UNSIGNED NOT NULL PRIMARY KEY
+) ENGINE=myisam;
+CREATE TABLE t2 (
+a INT UNSIGNED
+) ENGINE=myisam;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (1);
+INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
+include/assert.inc [Sum of elements in t1 should be 1.]
+include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
 drop table t1, t2;
 include/rpl_end.inc

=== removed file 'mysql-test/suite/rpl/r/rpl_insert_select.result'
--- a/mysql-test/suite/rpl/r/rpl_insert_select.result	2010-12-19 17:07:28 +0000
+++ b/mysql-test/suite/rpl/r/rpl_insert_select.result	1970-01-01 00:00:00 +0000
@@ -1,14 +0,0 @@
-include/master-slave.inc
-[connection master]
-create table t1 (n int not null primary key);
-insert into t1 values (1);
-create table t2 (n int);
-insert into t2 values (1);
-insert ignore into t1 select * from t2;
-insert into t1 values (2);
-select * from t1;
-n
-1
-2
-drop table t1,t2;
-include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl/r/rpl_known_bugs_detection.result'
--- a/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result	2011-08-19 13:04:28 +0000
+++ b/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result	2011-09-29 10:42:53 +0000
@@ -43,12 +43,16 @@ SELECT t2.field_a, t2.field_b, t2.field_
 FROM t2
 ON DUPLICATE KEY UPDATE
 t1.field_3 = t2.field_c;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.
 INSERT INTO t2 (field_a, field_b, field_c) VALUES (6, 'f', '6f');
 INSERT INTO t1 (field_1, field_2, field_3)
 SELECT t2.field_a, t2.field_b, t2.field_c
 FROM t2
 ON DUPLICATE KEY UPDATE
 t1.field_3 = t2.field_c;
+Warnings:
+Note	1592	Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.
 SELECT * FROM t1;
 id	field_1	field_2	field_3
 1	1	a	1a

=== modified file 'mysql-test/suite/rpl/r/rpl_log_pos.result'
--- a/mysql-test/suite/rpl/r/rpl_log_pos.result	2011-01-31 13:44:38 +0000
+++ b/mysql-test/suite/rpl/r/rpl_log_pos.result	2011-09-30 13:14:37 +0000
@@ -9,7 +9,7 @@ change master to master_log_pos=MASTER_L
 Read_Master_Log_Pos = '75'
 start slave;
 include/wait_for_slave_io_error.inc [errno=1236]
-Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master''
+Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master; the last event was read from 'master-bin.000001' at 75, the last byte read was read from 'master-bin.000001' at 94.''
 include/stop_slave_sql.inc
 show master status;
 File	Position	Binlog_Do_DB	Binlog_Ignore_DB

=== modified file 'mysql-test/suite/rpl/r/rpl_manual_change_index_file.result'
--- a/mysql-test/suite/rpl/r/rpl_manual_change_index_file.result	2011-01-17 10:10:06 +0000
+++ b/mysql-test/suite/rpl/r/rpl_manual_change_index_file.result	2011-10-03 11:49:38 +0000
@@ -5,7 +5,6 @@ CREATE TABLE t1(c1 INT);
 FLUSH LOGS;
 call mtr.add_suppression('Got fatal error 1236 from master when reading data from binary log: .*could not find next log');
 include/wait_for_slave_io_error.inc [errno=1236]
-Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'could not find next log''
 CREATE TABLE t2(c1 INT);
 FLUSH LOGS;
 CREATE TABLE t3(c1 INT);

=== modified file 'mysql-test/suite/rpl/r/rpl_packet.result'
--- a/mysql-test/suite/rpl/r/rpl_packet.result	2011-08-19 13:04:28 +0000
+++ b/mysql-test/suite/rpl/r/rpl_packet.result	2011-10-03 11:49:38 +0000
@@ -37,7 +37,6 @@ DROP TABLE t1;
 CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM;
 INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet));
 include/wait_for_slave_io_error.inc [errno=1236]
-Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master''
 STOP SLAVE;
 RESET SLAVE;
 RESET MASTER;

=== added file 'mysql-test/suite/rpl/r/rpl_replicate_rewrite_db.result'
--- a/mysql-test/suite/rpl/r/rpl_replicate_rewrite_db.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_replicate_rewrite_db.result	2011-10-03 11:16:06 +0000
@@ -0,0 +1,14 @@
+include/master-slave.inc
+[connection master]
+testing for normal fuctionality
+include/rpl_start_server.inc [server_number=1 parameters: --replicate-rewrite-db='mysql->test']
+[PASS]
+testing with single letter databases name.
+include/rpl_start_server.inc [server_number=1 parameters: --replicate-rewrite-db='a->b']
+[PASS]
+check for '->' operator not found. Should FAIL with error
+[FAIL]
+check for empty "from" db name. Should FAIL with error
+[FAIL]
+check for empty "to" db name. Should FAIL with error
+[FAIL]

=== modified file 'mysql-test/suite/rpl/r/rpl_row_event_max_size.result'
--- a/mysql-test/suite/rpl/r/rpl_row_event_max_size.result	2011-08-19 13:04:28 +0000
+++ b/mysql-test/suite/rpl/r/rpl_row_event_max_size.result	2011-10-03 11:49:38 +0000
@@ -63,7 +63,6 @@ call mtr.add_suppression("Found invalid 
 call mtr.add_suppression("The slave coordinator and worker threads are stopped, possibly leaving data in inconsistent state");
 drop table t1;
 include/wait_for_slave_io_error.inc [errno=1236]
-Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master''
 ==== clean up ====
 include/stop_slave_sql.inc
 RESET SLAVE;

=== removed file 'mysql-test/suite/rpl/t/rpl_insert_duplicate.test'
--- a/mysql-test/suite/rpl/t/rpl_insert_duplicate.test	2011-01-28 12:09:15 +0000
+++ b/mysql-test/suite/rpl/t/rpl_insert_duplicate.test	1970-01-01 00:00:00 +0000
@@ -1,14 +0,0 @@
-#########################################
-# Wrapper for rpl_insert_duplicate.test #
-#########################################
--- source include/master-slave.inc
--- source include/have_innodb.inc
-#-- source include/have_binlog_format_mixed_or_statement.inc
-
-let $engine_type=innodb;
--- source extra/rpl_tests/rpl_insert_duplicate.test
-
-let $engine_type=myisam;
--- source extra/rpl_tests/rpl_insert_duplicate.test
-
---source include/rpl_end.inc

=== removed file 'mysql-test/suite/rpl/t/rpl_insert_select.test'
--- a/mysql-test/suite/rpl/t/rpl_insert_select.test	2010-12-19 17:07:28 +0000
+++ b/mysql-test/suite/rpl/t/rpl_insert_select.test	1970-01-01 00:00:00 +0000
@@ -1,20 +0,0 @@
-# Testcase for BUG#10456 - INSERT INTO ... SELECT violating a primary key
-# breaks replication
-
--- source include/master-slave.inc
-connection master;
-
-create table t1 (n int not null primary key);
-insert into t1 values (1);
-create table t2 (n int);
-insert into t2 values (1);
-insert ignore into t1 select * from t2;
-insert into t1 values (2);
-sync_slave_with_master;
-connection slave;
-select * from t1;
-
-connection master;
-drop table t1,t2;
-sync_slave_with_master;
---source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl/t/rpl_known_bugs_detection.test'
--- a/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test	2011-08-19 13:04:28 +0000
+++ b/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test	2011-09-29 10:42:53 +0000
@@ -9,7 +9,7 @@ source include/have_binlog_checksum_off.
 source include/master-slave.inc;
 
 # Currently only statement-based-specific bugs are here
--- source include/have_binlog_format_mixed_or_statement.inc
+-- source include/have_binlog_format_statement.inc
 
 
 #

=== modified file 'mysql-test/suite/rpl/t/rpl_manual_change_index_file.test'
--- a/mysql-test/suite/rpl/t/rpl_manual_change_index_file.test	2011-01-17 10:10:06 +0000
+++ b/mysql-test/suite/rpl/t/rpl_manual_change_index_file.test	2011-10-03 11:49:38 +0000
@@ -61,7 +61,7 @@ call mtr.add_suppression('Got fatal erro
 connection slave;
 # 1236 = ER_MASTER_FATAL_ERROR_READING_BINLOG
 --let $slave_io_errno= 1236
---let $show_slave_io_error= 1
+--let $show_slave_io_error= 0
 --source include/wait_for_slave_io_error.inc
 
 connection master;

=== modified file 'mysql-test/suite/rpl/t/rpl_packet.test'
--- a/mysql-test/suite/rpl/t/rpl_packet.test	2011-08-19 13:04:28 +0000
+++ b/mysql-test/suite/rpl/t/rpl_packet.test	2011-10-03 11:49:38 +0000
@@ -144,7 +144,7 @@ connection slave;
 # The slave I/O thread must stop after receiving
 # 1236=ER_MASTER_FATAL_ERROR_READING_BINLOG error message from master.
 --let $slave_io_errno= 1236
---let $show_slave_io_error= 1
+--let $show_slave_io_error= 0
 --source include/wait_for_slave_io_error.inc
 
 # Remove the bad binlog and clear error status on slave.

=== added file 'mysql-test/suite/rpl/t/rpl_replicate_rewrite_db.test'
--- a/mysql-test/suite/rpl/t/rpl_replicate_rewrite_db.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_replicate_rewrite_db.test	2011-10-03 11:16:06 +0000
@@ -0,0 +1,49 @@
+##---------------------------------------------------------------------------
+# Purpose: Testing error messages for --replicate-rewrite-db option
+##--------------------------------------------------------------------------
+
+#check single character databases. Bug#11747866
+--let $rpl_skip_reset_master_and_slave= 1
+--let $rpl_skip_change_master= 1
+--let $rpl_skip_start_slave= 1
+--source include/master-slave.inc
+--connection master
+
+#check for normal functionality
+--echo testing for normal fuctionality
+--let $rpl_server_number= 1
+--let $rpl_server_parameters= --replicate-rewrite-db='mysql->test'
+--source include/rpl_start_server.inc
+--echo [PASS]
+
+#check for single characters databases. BUG#11747866
+--echo testing with single letter databases name.
+--let $rpl_server_number= 1
+--let $rpl_server_parameters= --replicate-rewrite-db='a->b'
+--source include/rpl_start_server.inc
+--echo [PASS]
+
+#check for '->' operator not found. Should fail with error
+--echo check for '->' operator not found. Should FAIL with error
+--disable_result_log
+--error 1 
+exec $MYSQLD --replicate-rewrite-db='mysql' 2>&1;
+--enable_result_log
+--echo [FAIL]
+
+#check for empty "from" db name. Should fail with error
+--echo check for empty "from" db name. Should FAIL with error
+--disable_result_log
+--error 1
+exec $MYSQLD --replicate-rewrite-db= '->test ' 2>&1;
+--enable_result_log
+--echo [FAIL]
+
+#check for empty "to" db name. Should fail with error.
+--echo check for empty "to" db name. Should FAIL with error
+--disable_result_log
+--error 1
+exec $MYSQLD --replicate-rewrite-db='acc-> ' 2>&1;
+--enable_result_log
+--echo [FAIL]
+

=== modified file 'mysql-test/suite/rpl/t/rpl_row_event_max_size.test'
--- a/mysql-test/suite/rpl/t/rpl_row_event_max_size.test	2011-08-19 13:04:28 +0000
+++ b/mysql-test/suite/rpl/t/rpl_row_event_max_size.test	2011-10-03 11:49:38 +0000
@@ -73,7 +73,7 @@ call mtr.add_suppression("The slave coor
 # Show slave last IO errno
 drop table t1;
 let $slave_io_errno= 1236;
-let $show_slave_io_error= 1;
+let $show_slave_io_error= 0;
 source include/wait_for_slave_io_error.inc;
 
 --echo ==== clean up ====

=== modified file 'mysql-test/t/join_outer.test'
--- a/mysql-test/t/join_outer.test	2011-06-11 13:38:32 +0000
+++ b/mysql-test/t/join_outer.test	2011-09-30 12:20:04 +0000
@@ -1342,3 +1342,42 @@ execute prep_stmt_9846;
 deallocate prepare prep_stmt_9846;
 drop table t1;
 
+--echo #
+--echo # Bug#13040136 - ASSERT IN PLAN_CHANGE_WATCHDOG::~PLAN_CHANGE_WATCHDOG()
+--echo #
+CREATE TABLE t1 (
+  col_varchar_10 VARCHAR(10),
+  col_int_key INTEGER,
+  col_varchar_10_key VARCHAR(10),
+  pk INTEGER NOT NULL,
+  PRIMARY KEY (pk),
+  KEY (col_int_key),
+  KEY (col_varchar_10_key)
+);
+INSERT INTO t1 VALUES ('q',NULL,'o',1);
+
+CREATE TABLE t2 (
+  pk INTEGER NOT NULL AUTO_INCREMENT,
+  col_varchar_10_key VARCHAR(10),
+  col_int_key INTEGER,
+  col_varchar_10 VARCHAR(10),
+  PRIMARY KEY (pk),
+  KEY (col_varchar_10_key),
+  KEY col_int_key (col_int_key)
+);
+INSERT INTO t2 VALUES
+(1,'r',NULL,'would'),(2,'tell',-655032320,'t'),
+(3,'d',9,'a'),(4,'gvafasdkiy',6,'ugvafasdki'),
+(5,'that\'s',NULL,'she'),(6,'bwftwugvaf',7,'cbwftwugva'),
+(7,'f',-700055552,'mkacbwftwu'),(8,'a',9,'be'),
+(9,'d',NULL,'u'),(10,'ckiixcsxmk',NULL,'o');
+
+SELECT DISTINCT t2.col_int_key 
+FROM
+t1
+LEFT JOIN t2
+ON t1.col_varchar_10 = t2.col_varchar_10_key 
+WHERE t2.pk
+ORDER BY t2.col_int_key;
+
+DROP TABLE t1,t2;

=== modified file 'mysql-test/t/partition.test'
--- a/mysql-test/t/partition.test	2011-06-13 10:55:58 +0000
+++ b/mysql-test/t/partition.test	2011-10-03 12:22:21 +0000
@@ -2421,3 +2421,15 @@ unlock tables;
 drop table t1;
 
 --echo # End of 5.5 tests
+
+#
+# Bug #12330344 Crash and/or valgrind errors in free_io_cache with join, view,
+# partitioned table
+#
+
+CREATE TABLE t1(a INT PRIMARY KEY) PARTITION BY LINEAR KEY (a);
+CREATE ALGORITHM=TEMPTABLE VIEW vtmp AS
+SELECT 1 FROM t1 AS t1_0 JOIN t1 ON t1_0.a LIKE (SELECT 1 FROM t1);
+SELECT * FROM vtmp;
+DROP VIEW vtmp;
+DROP TABLE t1;

=== modified file 'sql/item_subselect.cc'
--- a/sql/item_subselect.cc	2011-08-29 11:57:44 +0000
+++ b/sql/item_subselect.cc	2011-09-29 13:25:22 +0000
@@ -2558,6 +2558,7 @@ bool subselect_uniquesubquery_engine::sc
 
     if (!cond || cond->val_int())
     {
+      static_cast<Item_in_subselect*>(item)->value= true;
       empty_result_set= FALSE;
       break;
     }
@@ -2568,79 +2569,86 @@ bool subselect_uniquesubquery_engine::sc
 }
 
 
-/*
+/**
   Copy ref key and check for null parts in it
 
-  SYNOPSIS
-    subselect_uniquesubquery_engine::copy_ref_key()
+  Construct a search tuple to be used for index lookup. If one of the
+  key parts have a NULL value, the following logic applies:
 
-  DESCRIPTION
-    Copy ref key and check for null parts in it.
-    Depending on the nullability and conversion problems this function
-    recognizes and processes the following states :
-      1. Partial match on top level. This means IN has a value of FALSE
-         regardless of the data in the subquery table.
-         Detected by finding a NULL in the left IN operand of a top level
-         expression.
-         We may actually skip reading the subquery, so return TRUE to skip
-         the table scan in subselect_uniquesubquery_engine::exec and make
-         the value of the IN predicate a NULL (that is equal to FALSE on
-         top level).
-      2. No exact match when IN is nested inside another predicate.
-         Detected by finding a NULL in the left IN operand when IN is not
-         a top level predicate.
-         We cannot have an exact match. But we must proceed further with a
-         table scan to find out if it's a partial match (and IN has a value
-         of NULL) or no match (and IN has a value of FALSE).
-         So we return FALSE to continue with the scan and see if there are
-         any record that would constitute a partial match (as we cannot
-         determine that from the index).
-      3. Error converting the left IN operand to the column type of the
-         right IN operand. This counts as no match (and IN has the value of
-         FALSE). We mark the subquery table cursor as having no more rows
-         (to ensure that the processing that follows will not find a match)
-         and return FALSE, so IN is not treated as returning NULL.
+  For top level items, e.g.
 
+     "WHERE <outer_value_list> IN (SELECT <inner_value_list>...)"
 
-  RETURN
-    FALSE - The value of the IN predicate is not known. Proceed to find the
-            value of the IN predicate using the determined values of
-            null_keypart and table->status.
-    TRUE  - IN predicate has a value of NULL. Stop the processing right there
-            and return NULL to the outer predicates.
+  where one of the outer values are NULL, the IN predicate evaluates
+  to false/UNKNOWN (we don't care) and it's not necessary to evaluate
+  the subquery. That shortcut is taken in
+  Item_in_optimizer::val_int(). Thus, if a key part with a NULL value
+  is found here, the NULL is either not outer or this subquery is not
+  top level. Therefore we cannot shortcut subquery execution if a NULL
+  is found here.
+
+  Thus, if one of the key parts have a NULL value there are two
+  possibilities:
+
+  a) The NULL is from the outer_value_list. Since this is not a top
+     level item (see above) we need to check whether this predicate
+     evaluates to NULL or false. That is done by checking if the
+     subquery has a row if the conditions based on outer NULL values
+     are disabled. Index lookup cannot be used for this, so a table
+     scan must be done.
+
+  b) The NULL is local to the subquery, e.g.:
+
+        "WHERE ... IN (SELECT ... WHERE inner_col IS NULL)"
+
+     In this case we're looking for rows with the exact inner_col
+     value of NULL, not rows that match if the "inner_col IS NULL"
+     condition is disabled. Index lookup can be used for this.
+
+  @see subselect_uniquesubquery_engine::exec()
+  @see Item_in_optimizer::val_int()
+
+  @param[out] require_scan   true if a NULL value is found that falls 
+                             into category a) above, false if index 
+                             lookup can be used.
+  @param[out] convert_error  true if an error occured during conversion
+                             of values from one type to another, false
+                             otherwise.
+  
 */
-
-bool subselect_uniquesubquery_engine::copy_ref_key()
+void subselect_uniquesubquery_engine::copy_ref_key(bool *require_scan, 
+                                                   bool *convert_error)
 {
   DBUG_ENTER("subselect_uniquesubquery_engine::copy_ref_key");
 
-  for (store_key **copy= tab->ref.key_copy ; *copy ; copy++)
+  *require_scan= false;
+  *convert_error= false;
+  for (uint part_no= 0; part_no < tab->ref.key_parts; part_no++)
   {
-    enum store_key::store_key_result store_res;
-    store_res= (*copy)->copy();
-    tab->ref.key_err= store_res;
+    store_key *s_key= tab->ref.key_copy[part_no];
+    if (s_key == NULL)
+      continue; // key is const and does not need to be reevaluated
 
-    /*
-      When there is a NULL part in the key we don't need to make index
-      lookup for such key thus we don't need to copy whole key.
-      If we later should do a sequential scan return OK. Fail otherwise.
+    const enum store_key::store_key_result store_res= s_key->copy();
+    tab->ref.key_err= store_res;
 
-      See also the comment for the subselect_uniquesubquery_engine::exec()
-      function.
-    */
-    null_keypart= (*copy)->null_key;
-    if (null_keypart)
+    if (s_key->null_key)
     {
-      bool top_level= ((Item_in_subselect *) item)->is_top_level_item();
-      if (top_level)
-      {
-        /* Partial match on top level */
-        DBUG_RETURN(1);
-      }
-      else
+      const bool *cond_guard= tab->ref.cond_guards[part_no];
+
+      /*
+        NULL value is from the outer_value_list if the key part has a
+        cond guard that deactivates the condition. @see
+        TABLE_REF::cond_guards
+
+      */
+      if (cond_guard && !*cond_guard)
       {
-        /* No exact match when IN is nested inside another predicate */
-        break;
+        DBUG_ASSERT(!(static_cast <Item_in_subselect*>(item)
+                      ->is_top_level_item()));
+
+        *require_scan= true;
+        DBUG_VOID_RETURN;
       }
     }
 
@@ -2659,10 +2667,11 @@ bool subselect_uniquesubquery_engine::co
        IN operand. 
       */
       tab->table->status= STATUS_NOT_FOUND;
-      break;
+      *convert_error= true;
+      DBUG_VOID_RETURN;
     }
   }
-  DBUG_RETURN(0);
+  DBUG_VOID_RETURN;
 }
 
 
@@ -2719,22 +2728,20 @@ bool subselect_uniquesubquery_engine::ex
       DBUG_RETURN(1);
   }
 
-  /* TODO: change to use of 'full_scan' here? */
-  if (copy_ref_key())
-    DBUG_RETURN(1);
-  if (table->status)
-  {
-    /* 
-      We know that there will be no rows even if we scan. 
-      Can be set in copy_ref_key.
-    */
+  /* Copy the ref key and check for nulls... */
+  bool require_scan, convert_error;
+  copy_ref_key(&require_scan, &convert_error);
+  if (convert_error)
+  {
     ((Item_in_subselect *) item)->value= 0;
     DBUG_RETURN(0);
   }
 
-  if (null_keypart)
-    DBUG_RETURN(scan_table());
- 
+  if (require_scan)
+  {
+    const bool scan_result= scan_table();
+    DBUG_RETURN(scan_result);
+  }
   if (!table->file->inited)
     table->file->ha_index_init(tab->ref.key, 0);
   error= table->file->ha_index_read_map(table->record[0],
@@ -2824,7 +2831,6 @@ bool subselect_indexsubquery_engine::exe
 
   ((Item_in_subselect *) item)->value= 0;
   empty_result_set= TRUE;
-  null_keypart= 0;
   table->status= 0;
 
   if (tl->uses_materialization() && !tl->materialized)
@@ -2848,21 +2854,19 @@ bool subselect_indexsubquery_engine::exe
   }
 
   /* Copy the ref key and check for nulls... */
-  if (copy_ref_key())
-    DBUG_RETURN(1);
-
-  if (table->status)
+  bool require_scan, convert_error;
+  copy_ref_key(&require_scan, &convert_error);
+  if (convert_error)
   {
-    /* 
-      We know that there will be no rows even if we scan. 
-      Can be set in copy_ref_key.
-    */
     ((Item_in_subselect *) item)->value= 0;
     DBUG_RETURN(0);
   }
 
-  if (null_keypart)
-    DBUG_RETURN(scan_table());
+  if (require_scan)
+  {
+    const bool scan_result= scan_table();
+    DBUG_RETURN(scan_result);
+  }
 
   if (!table->file->inited)
     table->file->ha_index_init(tab->ref.key, 1);
@@ -3365,14 +3369,11 @@ bool subselect_hash_sj_engine::setup(Lis
   if (!(tmp_tab->ref.key_buff=
         (uchar*) thd->calloc(ALIGN_SIZE(tmp_key->key_length) * 2)) ||
       !(tmp_tab->ref.key_copy=
-        (store_key**) thd->alloc((sizeof(store_key*) *
-                                  (tmp_key_parts + 1)))) ||
+        (store_key**) thd->alloc((sizeof(store_key*) * tmp_key_parts))) ||
       !(tmp_tab->ref.items=
         (Item**) thd->alloc(sizeof(Item*) * tmp_key_parts)))
     DBUG_RETURN(TRUE);
 
-  KEY_PART_INFO *cur_key_part= tmp_key->key_part;
-  store_key **ref_key= tmp_tab->ref.key_copy;
   uchar *cur_ref_buff= tmp_tab->ref.key_buff;
 
   /*
@@ -3405,16 +3406,20 @@ bool subselect_hash_sj_engine::setup(Lis
   context->first_name_resolution_table=
     context->last_name_resolution_table= tmp_table_ref;
   
-  for (uint i= 0; i < tmp_key_parts; i++, cur_key_part++, ref_key++)
+  KEY_PART_INFO *key_parts= tmp_key->key_part;
+  for (uint part_no= 0; part_no < tmp_key_parts; part_no++)
   {
-    Item_func_eq *eq_cond; /* New equi-join condition for the current column. */
+    /* New equi-join condition for the current column. */
+    Item_func_eq *eq_cond; 
     /* Item for the corresponding field from the materialized temp table. */
     Item_field *right_col_item;
-    int null_count= test(cur_key_part->field->real_maybe_null());
-    tmp_tab->ref.items[i]= item_in->left_expr->element_index(i);
+    int null_count= test(key_parts[part_no].field->real_maybe_null());
+    tmp_tab->ref.items[part_no]= item_in->left_expr->element_index(part_no);
 
-    if (!(right_col_item= new Item_field(thd, context, cur_key_part->field)) ||
-        !(eq_cond= new Item_func_eq(tmp_tab->ref.items[i], right_col_item)) ||
+    if (!(right_col_item= new Item_field(thd, context, 
+                                         key_parts[part_no].field)) ||
+        !(eq_cond= new Item_func_eq(tmp_tab->ref.items[part_no],
+                                    right_col_item)) ||
         ((Item_cond_and*)cond)->add(eq_cond))
     {
       delete cond;
@@ -3422,19 +3427,20 @@ bool subselect_hash_sj_engine::setup(Lis
       DBUG_RETURN(TRUE);
     }
 
-    *ref_key= new store_key_item(thd, cur_key_part->field,
-                                 /* TODO:
-                                    the NULL byte is taken into account in
-                                    cur_key_part->store_length, so instead of
-                                    cur_ref_buff + test(maybe_null), we could
-                                    use that information instead.
-                                 */
-                                 cur_ref_buff + null_count,
-                                 null_count ? cur_ref_buff : 0,
-                                 cur_key_part->length, tmp_tab->ref.items[i]);
-    cur_ref_buff+= cur_key_part->store_length;
+    tmp_tab->ref.key_copy[part_no]= 
+      new store_key_item(thd, key_parts[part_no].field,
+                         /* TODO:
+                            the NULL byte is taken into account in
+                            key_parts[part_no].store_length, so instead of
+                            cur_ref_buff + test(maybe_null), we could
+                            use that information instead.
+                         */
+                         cur_ref_buff + null_count,
+                         null_count ? cur_ref_buff : 0,
+                         key_parts[part_no].length,
+                         tmp_tab->ref.items[part_no]);
+    cur_ref_buff+= key_parts[part_no].store_length;
   }
-  *ref_key= NULL; /* End marker. */
   tmp_tab->ref.key_err= 1;
   tmp_tab->ref.key_parts= tmp_key_parts;
 

=== modified file 'sql/item_subselect.h'
--- a/sql/item_subselect.h	2011-07-19 15:11:15 +0000
+++ b/sql/item_subselect.h	2011-09-29 12:47:32 +0000
@@ -612,7 +612,6 @@ protected:
     expression is NULL.
   */
   bool empty_result_set;
-  bool null_keypart; /* TRUE <=> constructed search tuple has a NULL */
 public:
 
   // constructor can assign THD because it will be called after JOIN::prepare
@@ -632,7 +631,7 @@ public:
                              select_result_interceptor *result);
   virtual bool no_tables() const;
   bool scan_table();
-  bool copy_ref_key();
+  void copy_ref_key(bool *require_scan, bool *convert_error);
   virtual bool no_rows() const { return empty_result_set; }
   virtual enum_engine_type engine_type() const { return UNIQUESUBQUERY_ENGINE; }
 };

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2011-08-20 00:36:03 +0000
+++ b/sql/mysqld.cc	2011-10-03 11:16:06 +0000
@@ -7552,16 +7552,15 @@ mysqld_get_one_option(int optid,
       sql_print_error("Bad syntax in replicate-rewrite-db - missing '->'!\n");
       return 1;
     }
-    val= p--;
-    while (my_isspace(mysqld_charset, *p) && p > argument)
-      *p-- = 0;
-    if (p == argument)
+    val= p + 2;
+    while(p > argument && my_isspace(mysqld_charset, p[-1]))
+      p--;
+    *p= 0;
+    if (!*key)
     {
       sql_print_error("Bad syntax in replicate-rewrite-db - empty FROM db!\n");
       return 1;
     }
-    *val= 0;
-    val+= 2;
     while (*val && my_isspace(mysqld_charset, *val))
       val++;
     if (!*val)

=== modified file 'sql/opt_explain.cc'
--- a/sql/opt_explain.cc	2011-09-20 13:07:55 +0000
+++ b/sql/opt_explain.cc	2011-09-29 12:29:17 +0000
@@ -888,11 +888,16 @@ bool Explain_join::explain_ref()
   if (tab->ref.key_parts)
   {
     StringBuffer<512> str_ref(cs);
-    for (const store_key *const *ref= tab->ref.key_copy; *ref; ref++)
+
+    for (uint part_no= 0; part_no < tab->ref.key_parts; part_no++)
     {
+      const store_key *const s_key= tab->ref.key_copy[part_no];
+      if (s_key == NULL)
+        continue;
+
       if (str_ref.length())
         str_ref.append(',');
-      str_ref.append((*ref)->name(), strlen((*ref)->name()), cs);
+      str_ref.append(s_key->name(), strlen(s_key->name()), cs);
     }
     return col_ref.set(str_ref);
   }

=== modified file 'sql/opt_range.cc'
--- a/sql/opt_range.cc	2011-10-04 05:55:38 +0000
+++ b/sql/opt_range.cc	2011-10-05 08:04:47 +0000
@@ -6088,8 +6088,21 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_P
   if (cond_func->functype() == Item_func::BETWEEN ||
       cond_func->functype() == Item_func::IN_FUNC)
     inv= ((Item_func_opt_neg *) cond_func)->negated;
-  else if (cond_func->select_optimize() == Item_func::OPTIMIZE_NONE)
-    DBUG_RETURN(0);			       
+  else
+  {
+    /*
+      During the cond_func->select_optimize() evaluation we can come across a
+      subselect item which may allocate memory on the thd->mem_root and assumes
+      all the memory allocated has the same life span as the subselect item
+      itself. So we have to restore the thread's mem_root here.
+    */
+    MEM_ROOT *tmp_root= param->mem_root;
+    param->thd->mem_root= param->old_root;
+    Item_func::optimize_type opt_type= cond_func->select_optimize();
+    param->thd->mem_root= tmp_root;
+    if (opt_type == Item_func::OPTIMIZE_NONE)
+      DBUG_RETURN(NULL);
+  }
 
   param->cond= cond;
 

=== modified file 'sql/rpl_master.cc'
--- a/sql/rpl_master.cc	2011-07-21 16:27:14 +0000
+++ b/sql/rpl_master.cc	2011-09-30 13:14:37 +0000
@@ -517,7 +517,7 @@ Increase max_allowed_packet on master";
     *errmsg = "memory allocation failed reading log event";
     break;
   case LOG_READ_TRUNC:
-    *errmsg = "binlog truncated in the middle of event";
+    *errmsg = "binlog truncated in the middle of event; consider out of disk space on master";
     break;
   case LOG_READ_CHECKSUM_FAILURE:
     *errmsg = "event read from binlog did not pass crc check";
@@ -629,6 +629,9 @@ void mysql_binlog_send(THD* thd, char* l
   String* packet = &thd->packet;
   int error;
   const char *errmsg = "Unknown error";
+  const char *fmt= "%s; the last event was read from '%s' at %s, the last byte read was read from '%s' at %s.";
+  char llbuff1[22], llbuff2[22];
+  char error_text[MAX_SLAVE_ERRMSG]; // to be send to slave via my_message()
   NET* net = &thd->net;
   mysql_mutex_t *log_lock;
   mysql_cond_t *log_cond;
@@ -897,11 +900,9 @@ impossible position";
     if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
       goto err;
 
-    my_off_t prev_pos= pos;
-    while (!(error = Log_event::read_log_event(&log, packet, log_lock,
+    while (!(error= Log_event::read_log_event(&log, packet, log_lock,
                                                current_checksum_alg)))
     {
-      prev_pos= my_b_tell(&log);
 #ifndef DBUG_OFF
       if (max_binlog_dump_events && !left_events--)
       {
@@ -1005,18 +1006,6 @@ impossible position";
     }
 
     /*
-      here we were reading binlog that was not closed properly (as a result
-      of a crash ?). treat any corruption as EOF
-    */
-    if (binlog_can_be_corrupted &&
-        error != LOG_READ_MEM &&
-        error != LOG_READ_CHECKSUM_FAILURE &&
-        error != LOG_READ_EOF)
-    {
-      my_b_seek(&log, prev_pos);
-      error=LOG_READ_EOF;
-    }
-    /*
       TODO: now that we are logging the offset, check to make sure
       the recorded offset and the actual match.
       Guilhem 2003-06: this is not true if this master is a slave
@@ -1263,6 +1252,21 @@ end:
 
 err:
   THD_STAGE_INFO(thd, stage_waiting_to_finalize_termination);
+  if (my_errno == ER_MASTER_FATAL_ERROR_READING_BINLOG && my_b_inited(&log))
+  {
+    /* 
+       detailing the fatal error message with coordinates 
+       of the last position read.
+    */
+    char b_start[FN_REFLEN], b_end[FN_REFLEN];
+    fn_format(b_start, coord->file_name, "", "", MY_REPLACE_DIR);
+    fn_format(b_end,   log_file_name,    "", "", MY_REPLACE_DIR);
+    my_snprintf(error_text, sizeof(error_text), fmt, errmsg,
+                b_start, (llstr(coord->pos, llbuff1), llbuff1),
+                b_end, (llstr(my_b_tell(&log), llbuff2), llbuff2));
+  }
+  else
+    strcpy(error_text, errmsg);
   end_io_cache(&log);
   (void) RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
   /*
@@ -1280,7 +1284,7 @@ err:
   thd->variables.max_allowed_packet= old_max_allowed_packet;
 
   thd->set_stmt_da(saved_da);
-  my_message(my_errno, errmsg, MYF(0));
+  my_message(my_errno, error_text, MYF(0));
   DBUG_VOID_RETURN;
 }
 

=== modified file 'sql/share/errmsg-utf8.txt'
--- a/sql/share/errmsg-utf8.txt	2011-09-23 10:55:10 +0000
+++ b/sql/share/errmsg-utf8.txt	2011-09-29 12:55:06 +0000
@@ -4701,14 +4701,14 @@ ER_NOT_SUPPORTED_YET 42000 
         spa "Esta versión de MySQL no soporta todavia '%s'"
         swe "Denna version av MySQL kan ännu inte utföra '%s'"
 ER_MASTER_FATAL_ERROR_READING_BINLOG  
-        nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log"
-        eng "Got fatal error %d from master when reading data from binary log: '%-.128s'"
-        ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs"
-        ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
-        por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
-        rus "Пол28s' от головного сервера в процессе выборки данных из двоичног'%-.128s' del master cuando leyendo datos del binary log"
-        swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen"
+        nla "Kreeg fatale fout %d: '%-.256s' van master tijdens lezen van data uit binaire log"
+        eng "Got fatal error %d from master when reading data from binary log: '%-.256s'"
+        ger "Schwerer Fehler %d: '%-.256s vom Master beim Lesen des binären Logs"
+        ita "Errore fatale %d: '%-.256s' dal master leggendo i dati dal log binario"
+        por "Obteve fatal erro %d: '%-.256s' do master quando lendo dados do binary log"
+        rus "Получена норки данных из двоичного журна master cuando leyendo datos del binary log"
+        swe "Fick fatalt fel %d: '%-.256s' från master vid läsning av binärloggen"
 ER_SLAVE_IGNORED_TABLE  
         eng "Slave SQL thread ignored the query because of replicate-*-table rules"
         ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
@@ -5036,7 +5036,7 @@ ER_FEATURE_DISABLED  
         ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MySQL mit '%s' übersetzen, damit es verfügbar ist"
         por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando"
         spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando"
-        swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad"
+        swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definierad"
 ER_OPTION_PREVENTS_STATEMENT  
         eng "The MySQL server is running with the %s option so it cannot execute this statement"
         ger "Der MySQL-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen"
@@ -5577,7 +5577,8 @@ ER_VIEW_OTHER_USER
         eng "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer"
         ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.192s'@'%-.192s' zu erzeugen"
 ER_NO_SUCH_USER
-        eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
+  eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
+  ger "Der als Definierer angegebene Benutzer ('%-.64s'@'%-.64s') existiert nicht"
 ER_FORBID_SCHEMA_CHANGE
         eng "Changing schema from '%-.192s' to '%-.192s' is not allowed."
         ger "Wechsel des Schemas von '%-.192s' auf '%-.192s' ist nicht erlaubt"
@@ -5618,7 +5619,7 @@ ER_VIEW_RECURSIVE
         eng "`%-.192s`.`%-.192s` contains view recursion"
         ger "`%-.192s`.`%-.192s` enthält View-Rekursion"
 ER_NON_GROUPING_FIELD_USED 42000
-        eng "non-grouping field '%-.192s' is used in %-.64s clause"
+        eng "Non-grouping field '%-.192s' is used in %-.64s clause"
         ger "In der %-.192s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet"
 ER_TABLE_CANT_HANDLE_SPKEYS
         eng "The used table type doesn't support SPATIAL indexes"
@@ -5645,15 +5646,20 @@ ER_NON_INSERTABLE_TABLE  
         eng "The target table %-.100s of the %s is not insertable-into"
         ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar"
 ER_ADMIN_WRONG_MRG_TABLE
-	eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
+  eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
+  ger "Tabelle '%-.64s' ist unterschiedlich definiert, nicht vom Typ MyISAM oder existiert nicht"
 ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT
-	eng "Too high level of nesting for select"
+  eng "Too high level of nesting for select"
+  ger "Zu tief verschachtelte SELECT-Anweisungen"
 ER_NAME_BECOMES_EMPTY
-        eng "Name '%-.64s' has become ''"
+  eng "Name '%-.64s' has become ''"
+  ger "Name '%-.64s' wurde zu ''"
 ER_AMBIGUOUS_FIELD_TERM
-	eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"
+  eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"
+  ger "Das erste Zeichen der Zeichenkette FIELDS TERMINATED ist mehrdeutig; bitte benutzen Sie nicht optionale und nicht leere FIELDS ENCLOSED BY"
 ER_FOREIGN_SERVER_EXISTS
-        eng "The foreign server, %s, you are trying to create already exists."
+  eng "The foreign server, %s, you are trying to create already exists."
+  ger "Der entfernte Server %s, den Sie versuchen zu erzeugen, existiert schon."
 ER_FOREIGN_SERVER_DOESNT_EXIST
         eng "The foreign server name you are trying to reference does not exist. Data source error:  %-.64s"
 	ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung:  %-.64s"
@@ -5678,8 +5684,8 @@ ER_PARTITION_SUBPARTITION_ERROR
         swe "Subpartitioner kan bara vara hash och key partitioner"
 ER_PARTITION_SUBPART_MIX_ERROR
         eng "Must define subpartitions on all partitions if on one partition"
-        ger "Unterpartitionen können nur Hash- oder Key-Partitionen sein"
-        swe "Subpartitioner måste definieras på alla partitioner om på en"
+        ger "Wenn Sie Unterpartitionen auf einer Partition definieren, müssen Sie das für alle Partitionen tun"
+        swe "Subpartitioner måste definieras på alla partitioner om på en"
 ER_PARTITION_WRONG_NO_PART_ERROR
         eng "Wrong number of partitions defined, mismatch with previous setting"
         ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen überein"
@@ -5854,7 +5860,7 @@ ER_CREATE_FILEGROUP_FAILED
         ger "Anlegen von %s fehlgeschlagen"
 ER_DROP_FILEGROUP_FAILED
         eng "Failed to drop %s"
-        ger "Löschen (drop) von %s fehlgeschlagen"
+        ger "LERROR
         eng "The handler doesn't support autoextend of tablespaces"
         ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces"
@@ -5898,7 +5904,8 @@ ER_EVENT_ENDS_BEFORE_STARTS
         eng "ENDS is either invalid or before STARTS"
         ger "ENDS ist entweder ungültig oder liegt vor STARTS"
 ER_EVENT_EXEC_TIME_IN_THE_PAST
-        eng "Event execution time is in the past. Event has been disabled"
+  eng "Event execution time is in the past. Event has been disabled"
+  ger "Ausführungszeit des Events liegt in der Vergangenheit. Event wurde deaktiviert"
 ER_EVENT_OPEN_TABLE_FAILED
         eng "Failed to open mysql.event"
         ger "Öffnen von mysql.event fehlgeschlagen"
@@ -5930,7 +5937,7 @@ ER_EVENT_DATA_TOO_LONG
         ger "Daten der Spalte '%s' zu lang"
 ER_DROP_INDEX_FK
         eng "Cannot drop index '%-.192s': needed in a foreign key constraint"
-        ger "Kann Index '%-.192s' nicht löschen: wird für einen Fremdschlüssel benötigt"
+        ger "Kann Index '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung benötigt"
 # When using this error message, use the ER_WARN_DEPRECATED_SYNTAX error
 # code.
 ER_WARN_DEPRECATED_SYNTAX_WITH_VER  
@@ -5941,10 +5948,10 @@ ER_CANT_WRITE_LOCK_LOG_TABLE
         ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff möglich"
 ER_CANT_LOCK_LOG_TABLE
         eng "You can't use locks with log tables."
-        ger "Log-Tabellen können nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL"
+        ger "Log-Tabellen können nicht gesperrt werden."
 ER_FOREIGN_DUPLICATE_KEY 23000 S1009
         eng "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry"
-        ger "Aufrechterhalten der Fremdschlüssel-Constraints für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen"
+        ger "Aufrechterhalten der Fremdschlüssel-Beschränkungen für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen"
 ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
         eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error."
         ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
@@ -6000,6 +6007,7 @@ ER_CANT_ACTIVATE_LOG
         ger "Kann Logdatei '%-.64s' nicht aktivieren"
 ER_RBR_NOT_AVAILABLE
         eng "The server was not built with row-based replication"
+        ger "Der Server wurde nicht mit zeilenbasierter Replikation gebaut"
 ER_BASE64_DECODE_ERROR
         eng "Decoding of base64 string failed"
         swe "Avkodning av base64 sträng misslyckades"
@@ -6021,7 +6029,7 @@ ER_BAD_LOG_STATEMENT
         ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
 ER_CANT_RENAME_LOG_TABLE
         eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'"
-        ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen beim Umbenennen zu/von einer Logtabelle zwei Tabellen angegeben werden: die Logtabelle zu einer Archivtabelle und eine weitere Tabelle zurück zu  '%s'"
+        ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen zwei Tabellen umbenannt werden: die Logtabelle zu einer Archivtabelle, und eine weitere Tabelle zu '%s'"
 ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT 42000
         eng "Incorrect parameter count in the call to native function '%-.192s'"
         ger "Falsche Anzahl von Parametern beim Aufruf der nativen Funktion '%-.192s'"
@@ -6062,206 +6070,240 @@ ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009
         swe "Dubbel nyckel '%-.64s' för nyckel '%-.192s'"
         ukr "Дублюючий запис '%-.64s' для ключа '%-.192s'"
 ER_BINLOG_PURGE_EMFILE
-        eng "Too many files opened, please execute the command again"
-        ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
+  eng "Too many files opened, please execute the command again"
+  ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
 ER_EVENT_CANNOT_CREATE_IN_THE_PAST
-        eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
+  eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
+  ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht."
 ER_EVENT_CANNOT_ALTER_IN_THE_PAST
-        eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
+  eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
+  ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht."
 ER_SLAVE_INCIDENT
-        eng "The incident %s occured on the master. Message: %-.64s"
+  eng "The incident %s occured on the master. Message: %-.64s"
+  ger "Der Vorfall %s passierte auf dem Master. Meldung: %-.64s"
 ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT
-        eng "Table has no partition for some existing values"
+  eng "Table has no partition for some existing values"
+  ger "Tabelle hat für einige bestehende Werte keine Partition"
 ER_BINLOG_UNSAFE_STATEMENT
-        eng "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s"
+  eng "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s"
+  swe "Detta är inte säkert att logga i statement-format, för BINLOG_FORMAT = STATEMENT. %s"
+  ger "Unsichere Anweisung ins Binärlog geschrieben, weil Anweisungsformat BINLOG_FORMAT = STATEMENT. %s"
 ER_SLAVE_FATAL_ERROR
-        eng "Fatal error: %s"
+  eng "Fatal error: %s"
+  ger "Fataler Fehler: %s"
 ER_SLAVE_RELAY_LOG_READ_FAILURE
-        eng "Relay log read failure: %s"
+  eng "Relay log read failure: %s"
+  ger "Relaylog-Lesefehler: %s"
 ER_SLAVE_RELAY_LOG_WRITE_FAILURE
-        eng "Relay log write failure: %s"
+  eng "Relay log write failure: %s"
+  ger "Relaylog-Schreibfehler: %s"
 ER_SLAVE_CREATE_EVENT_FAILURE
-        eng "Failed to create %s"
+  eng "Failed to create %s"
+  ger "Erzeugen von %s fehlgeschlagen"
 ER_SLAVE_MASTER_COM_FAILURE
-        eng "Master command %s failed: %s"
+  eng "Master command %s failed: %s"
+  ger "Master-Befehl %s fehlgeschlagen: %s"
 ER_BINLOG_LOGGING_IMPOSSIBLE
-        eng "Binary logging not possible. Message: %s"
-
+  eng "Binary logging not possible. Message: %s"
+  ger "Binärlogging nicht möglich. Meldung: %s"
 ER_VIEW_NO_CREATION_CTX
   eng "View `%-.64s`.`%-.64s` has no creation context"
+  ger "View `%-.64s`.`%-.64s` hat keinen Erzeugungskontext"
 ER_VIEW_INVALID_CREATION_CTX
   eng "Creation context of view `%-.64s`.`%-.64s' is invalid"
-
+  ger "Erzeugungskontext des Views`%-.64s`.`%-.64s' ist ungültig"
 ER_SR_INVALID_CREATION_CTX
   eng "Creation context of stored routine `%-.64s`.`%-.64s` is invalid"
-
+  ger "Erzeugungskontext der gespeicherten Routine`%-.64s`.`%-.64s` ist ungültig"
 ER_TRG_CORRUPTED_FILE
   eng "Corrupted TRG file for table `%-.64s`.`%-.64s`"
+  ger "Beschädigte TRG-Datei für Tabelle `%-.64s`.`%-.64s`"
 ER_TRG_NO_CREATION_CTX
   eng "Triggers for table `%-.64s`.`%-.64s` have no creation context"
+  ger "Trigger für Tabelle `%-.64s`.`%-.64s` haben keinen Erzeugungskontext"
 ER_TRG_INVALID_CREATION_CTX
   eng "Trigger creation context of table `%-.64s`.`%-.64s` is invalid"
-
+  ger "Trigger-Erzeugungskontext der Tabelle `%-.64s`.`%-.64s` ist ungültig"
 ER_EVENT_INVALID_CREATION_CTX
   eng "Creation context of event `%-.64s`.`%-.64s` is invalid"
-
+  ger "Erzeugungskontext des Events `%-.64s`.`%-.64s` ist ungültig"
 ER_TRG_CANT_OPEN_TABLE
   eng "Cannot open table for trigger `%-.64s`.`%-.64s`"
-
+  ger "Kann Tabelle für den Trigger `%-.64s`.`%-.64s` nicht öffnen"
 ER_CANT_CREATE_SROUTINE
   eng "Cannot create stored routine `%-.64s`. Check warnings"
+  ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen"
 ER_NEVER_USED
   eng "Ambiguous slave modes combination. %s"
-
+  ger "Mehrdeutige Kombination von Slave-Modi. %s"
 ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT
   eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement."
+  ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran."
 ER_SLAVE_CORRUPT_EVENT
   eng "Corrupted replication event was detected"
-
+  ger "Beschädigtes Replikationsereignis entdeckt"
 ER_LOAD_DATA_INVALID_COLUMN
   eng "Invalid column reference (%-.64s) in LOAD DATA"
-
-ER_LOG_PURGE_NO_FILE  
+  ger "Ungültige Spaltenreferenz (%-.64s) bei LOAD DATA"
+ER_LOG_PURGE_NO_FILE
   eng "Being purged log %s was not found"
-
+  ger "Zu bereinigende Logdatei %s wurde nicht gefunden"
 ER_XA_RBTIMEOUT XA106
-	eng "XA_RBTIMEOUT: Transaction branch was rolled back: took too long"
-
+  eng "XA_RBTIMEOUT: Transaction branch was rolled back: took too long"
+  ger "XA_RBTIMEOUT: Transaktionszweig wurde zurückgerollt: Zeitüberschreitung"
 ER_XA_RBDEADLOCK XA102
-	eng "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected"
-
+  eng "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected"
+  ger "XA_RBDEADLOCK: Transaktionszweig wurde zurückgerollt: Deadlock entdeckt"
 ER_NEED_REPREPARE
   eng "Prepared statement needs to be re-prepared"
-
-ER_DELAYED_NOT_SUPPORTED  
+  ger "Vorbereitete Anweisungen mPORTED
   eng "DELAYED option not supported for table '%-.192s'"
-
+  ger "Die DELAYED-Option wird für Tabelle '%-.192s' nicht unterstützt"
 WARN_NO_MASTER_INFO  
   eng "The master info structure does not exist"
-
+  ger "Die Master-Info-Struktur existiert nicht"
 WARN_OPTION_IGNORED
   eng "<%-.64s> option ignored"
-
+  ger "Option <%-.64s> ignoriert"
 WARN_PLUGIN_DELETE_BUILTIN
   eng "Built-in plugins cannot be deleted"
-
+  ger "Eingebaute Plugins können nicht gelöscht werden"
 WARN_PLUGIN_BUSY
   eng "Plugin is busy and will be uninstalled on shutdown"
-
+  ger "Plugin wird verwendet und wird erst beim Herunterfahren deinstalliert"
 ER_VARIABLE_IS_READONLY
   eng "%s variable '%s' is read-only. Use SET %s to assign the value"
-
+  ger "%s Variable '%s' ist nur lesbar. Benutzen Sie SET %s, um einen Wert zuzuweisen"
 ER_WARN_ENGINE_TRANSACTION_ROLLBACK
   eng "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted"
-
+  ger "Speicher-Engine %s unterstützt für diese Anweisung kein Rollback. Transaktion wurde zurückgerollt und muss neu gestartet werden"
 ER_SLAVE_HEARTBEAT_FAILURE
   eng "Unexpected master's heartbeat data: %s"
+  ger "Unerwartete Daten vom Heartbeat des Masters: %s"
 ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE
   eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds)."
-
 ER_NDB_REPLICATION_SCHEMA_ERROR
-        eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
+  eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
+  ger "Fehlerhaftes Schema für mysql.ndb_replication table. Meldung: %-.64s"
 ER_CONFLICT_FN_PARSE_ERROR
-        eng "Error in parsing conflict function. Message: %-.64s"
+  eng "Error in parsing conflict function. Message: %-.64s"
+  ger "Fehler beim Parsen einer Konflikt-Funktion. Meldung: %-.64s"
 ER_EXCEPTIONS_WRITE_ERROR
-        eng "Write to exceptions table failed. Message: %-.128s""
-
+  eng "Write to exceptions table failed. Message: %-.128s""
+  ger "Schreiben in Ausnahme-Tabelle fehlgeschlagen. Meldung: %-.128s""
 ER_TOO_LONG_TABLE_COMMENT
   eng "Comment for table '%-.64s' is too long (max = %lu)"
   por "Comentário para a tabela '%-.64s' é longo demais (max = %lu)"
-
+  ger "Kommentar für Tabelle '%-.64s' ist zu lang (max = %lu)"
 ER_TOO_LONG_FIELD_COMMENT
   eng "Comment for field '%-.64s' is too long (max = %lu)"
   por "Comentário para o campo '%-.64s' é longo demais (max = %lu)"
-
+  ger "Kommentar für Feld '%-.64s' ist zu lang (max = %lu)"
 ER_FUNC_INEXISTENT_NAME_COLLISION 42000 
   eng "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual"
-
+  ger "FUNCTION %s existiert nicht. Erläuterungen im Abschnitt 'Function Name Parsing and Resolution' im Referenzhandbuch"
 # When updating these, please update EXPLAIN_FILENAME_MAX_EXTRA_LENGTH in
 # sql_table.h with the new maximal additional length for explain_filename.
 ER_DATABASE_NAME
   eng "Database"
   swe "Databas"
+  ger "Datenbank"
 ER_TABLE_NAME
   eng "Table"
   swe "Tabell"
+  ger "Tabelle"
 ER_PARTITION_NAME
   eng "Partition"
   swe "Partition"
+  ger "Partition"
 ER_SUBPARTITION_NAME
   eng "Subpartition"
   swe "Subpartition"
+  ger "Unterpartition"
 ER_TEMPORARY_NAME
   eng "Temporary"
   swe "Temporär"
+  ger "Temporär"
 ER_RENAMED_NAME
   eng "Renamed"
   swe "Namnändrad"
+  ger "Umbenannt"
 ER_TOO_MANY_CONCURRENT_TRXS
   eng  "Too many active concurrent transactions"
-
+  ger  "Zu viele aktive simultane Transaktionen"
 WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED
   eng "Non-ASCII separator arguments are not fully supported"
-
+  ger "Nicht-ASCII-Trennargumente werden nicht vollständig unterstützt"
 ER_DEBUG_SYNC_TIMEOUT
   eng "debug sync point wait timed out"
   ger "Debug Sync Point Wartezeit überschritten"
 ER_DEBUG_SYNC_HIT_LIMIT
   eng "debug sync point hit limit reached"
   ger "Debug Sync Point Hit Limit erreicht"
-
 ER_DUP_SIGNAL_SET 42000
-        eng "Duplicate condition information item '%s'"
-
+  eng "Duplicate condition information item '%s'"
+  ger "Informationselement '%s' für Duplikatbedingung"
 # Note that the SQLSTATE is not 01000, it is provided by SIGNAL/RESIGNAL
 ER_SIGNAL_WARN 01000
-        eng "Unhandled user-defined warning condition"
-
+  eng "Unhandled user-defined warning condition"
+  ger "Unbehandelte benutzerdefinierte Warnbedingung"
 # Note that the SQLSTATE is not 02000, it is provided by SIGNAL/RESIGNAL
 ER_SIGNAL_NOT_FOUND 02000
-        eng "Unhandled user-defined not found condition"
-
+  eng "Unhandled user-defined not found condition"
+  ger "Unbehandelte benutzerdefinierte Nicht-gefunden-Bedingung"
 # Note that the SQLSTATE is not HY000, it is provided by SIGNAL/RESIGNAL
 ER_SIGNAL_EXCEPTION HY000
-        eng "Unhandled user-defined exception condition"
-
+  eng "Unhandled user-defined exception condition"
+  ger "Unbehandelte benutzerdefinierte Ausnahmebedingung"
 ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER 0K000
-        eng "RESIGNAL when handler not active"
-
+  eng "RESIGNAL when handler not active"
+  ger "RESIGNAL bei nicht aktivem Handler"
 ER_SIGNAL_BAD_CONDITION_TYPE
-        eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE"
-
+  eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE"
+  ger "SIGNAL/RESIGNAL kann nur mit einer Bedingung (CONDITION) benutzt werden, die bei SQLSTATE definiert wurde"
 WARN_COND_ITEM_TRUNCATED
-        eng "Data truncated for condition item '%s'"
-
+  eng "Data truncated for condition item '%s'"
+  ger "Daten gek-        eng "Data too long for condition item '%s'"
-
+  eng "Data too long for condition item '%s'"
+  ger "Daten zu lang für Bedingungselement '%s'"
 ER_UNKNOWN_LOCALE
-        eng "Unknown locale: '%-.64s'"
-
+  eng "Unknown locale: '%-.64s'"
+  ger "Unbekannte Locale: '%-.64s'"
 ER_SLAVE_IGNORE_SERVER_IDS
   eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id"
+  ger "Die angeforderte Server-ID %d steht im Konflikt mit der Startoption --replicate-same-server-id für den Slave"
 ER_QUERY_CACHE_DISABLED
   eng "Query cache is disabled; restart the server with query_cache_type=1 to enable it"
+  ger "Abfragen-Cache ist deaktiviert. Starten Sie den Server neu mit query_cache_type=1, um ihn zu aktivieren"
 ER_SAME_NAME_PARTITION_FIELD
   eng "Duplicate partition field name '%-.192s'"
+  ger "Partitionsfeld '%-.192s' ist ein Duplikat"
 ER_PARTITION_COLUMN_LIST_ERROR
   eng "Inconsistency in usage of column lists for partitioning"
+  ger "Inkonsistenz bei der Benutzung von Spaltenlisten für Partitionierung"
 ER_WRONG_TYPE_COLUMN_VALUE_ERROR
   eng "Partition column values of incorrect type"
+  ger "Partitionsspaltenwerte sind vom falschen Typ"
 ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR
   eng "Too many fields in '%-.192s'"
+  ger "Zu viele Felder in '%-.192s'"
 ER_MAXVALUE_IN_VALUES_IN
   eng "Cannot use MAXVALUE as value in VALUES IN"
+  ger "MAXVALUE kann nicht als Wert in VALUES IN verwendet werden"
 ER_TOO_MANY_VALUES_ERROR
   eng "Cannot have more than one value for this type of %-.64s partitioning"
+  ger "Für den Partionierungstyp %-.64s darf es nicht mehr als einen Wert geben"
 ER_ROW_SINGLE_PARTITION_FIELD_ERROR
   eng "Row expressions in VALUES IN only allowed for multi-field column partitioning"
+  ger "Zeilenausdrücke in VALUES IN sind nur für Mehrfeld-Spaltenpartionierung erlaubt"
 ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD
   eng "Field '%-.192s' is of a not allowed type for this type of partitioning"
+  ger "Feld '%-.192s' ist für diese Art von Partitionierung von einem nicht zulässigen Typ"
 ER_PARTITION_FIELDS_TOO_LONG
   eng "The total length of the partitioning fields is too large"
+  ger "Die Gesamtlänge der Partitionsfelder ist zu groß"
 ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE
   eng "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved."
 ER_BINLOG_ROW_MODE_AND_STMT_ENGINE
@@ -6323,7 +6365,7 @@ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLO
   eng "Cannot change the binlog direct flag inside a stored function or trigger"
 ER_SPATIAL_MUST_HAVE_GEOM_COL 42000
   eng "A SPATIAL index may only contain a geometrical type column"
-
+  ger "Ein raumbezogener Index (SPATIAL) darf nur Spalten geometrischen Typs enthalten"
 ER_TOO_LONG_INDEX_COMMENT
   eng "Comment for index '%-.64s' is too long (max = %lu)"
 
@@ -6527,6 +6569,24 @@ ER_MTS_INCONSISTENT_DATA
 ER_UNDO_RECORD_TOO_BIG
   eng "Undo log record is too big."
 
+ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT
+  eng "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
+
+ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE
+  eng "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave."
+
+ER_BINLOG_UNSAFE_REPLACE_SELECT
+ eng "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave."
+
+ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT
+  eng "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
+
+ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT
+  eng "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave."
+
+ER_BINLOG_UNSAFE_UPDATE_IGNORE
+  eng "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
+
 ER_PLUGIN_NO_UNINSTALL
   eng "Plugin '%s' is marked as not dynamically uninstallable. You have to stop the server to uninstall it."
 

=== modified file 'sql/sql_lex.cc'
--- a/sql/sql_lex.cc	2011-10-04 05:55:38 +0000
+++ b/sql/sql_lex.cc	2011-10-05 08:04:47 +0000
@@ -58,7 +58,13 @@ Query_tables_list::binlog_stmt_unsafe_er
   ER_BINLOG_UNSAFE_SYSTEM_FUNCTION,
   ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS,
   ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE,
-  ER_BINLOG_UNSAFE_MIXED_STATEMENT
+  ER_BINLOG_UNSAFE_MIXED_STATEMENT,
+  ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT,
+  ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE,
+  ER_BINLOG_UNSAFE_REPLACE_SELECT,
+  ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT,
+  ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT,
+  ER_BINLOG_UNSAFE_UPDATE_IGNORE
 };
 
 

=== modified file 'sql/sql_lex.h'
--- a/sql/sql_lex.h	2011-09-08 12:48:08 +0000
+++ b/sql/sql_lex.h	2011-09-29 10:42:53 +0000
@@ -1232,6 +1232,48 @@ public:
     */
     BINLOG_STMT_UNSAFE_MIXED_STATEMENT,
 
+    /**
+      INSERT...IGNORE SELECT is unsafe because which rows are ignored depends
+      on the order that rows are retrieved by SELECT. This order cannot be
+      predicted and may differ on master and the slave.
+    */
+    BINLOG_STMT_UNSAFE_INSERT_IGNORE_SELECT,
+
+    /**
+      INSERT...SELECT...UPDATE is unsafe because which rows are updated depends
+      on the order that rows are retrieved by SELECT. This order cannot be
+      predicted and may differ on master and the slave.
+    */
+    BINLOG_STMT_UNSAFE_INSERT_SELECT_UPDATE,
+
+    /**
+      INSERT...REPLACE SELECT is unsafe because which rows are replaced depends
+      on the order that rows are retrieved by SELECT. This order cannot be
+      predicted and may differ on master and the slave.
+    */
+    BINLOG_STMT_UNSAFE_REPLACE_SELECT,
+
+    /**
+      CREATE TABLE... IGNORE... SELECT is unsafe because which rows are ignored
+      depends on the order that rows are retrieved by SELECT. This order cannot
+      be predicted and may differ on master and the slave.
+    */
+    BINLOG_STMT_UNSAFE_CREATE_IGNORE_SELECT,
+
+    /**
+      CREATE TABLE...REPLACE... SELECT is unsafe because which rows are replaced
+      depends on the order that rows are retrieved from SELECT. This order
+      cannot be predicted and may differ on master and the slave
+    */
+    BINLOG_STMT_UNSAFE_CREATE_REPLACE_SELECT,
+
+    /**
+      UPDATE...IGNORE is unsafe because which rows are ignored depends on the
+      order that rows are updated. This order cannot be predicted and may differ
+      on master and the slave.
+    */
+    BINLOG_STMT_UNSAFE_UPDATE_IGNORE,
+
     /* The last element of this enumeration type. */
     BINLOG_STMT_UNSAFE_COUNT
   };

=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc	2011-09-27 08:24:44 +0000
+++ b/sql/sql_parse.cc	2011-09-29 10:42:53 +0000
@@ -2605,6 +2605,19 @@ case SQLCOM_PREPARE:
       select_result *result;
 
       /*
+        CREATE TABLE...IGNORE/REPLACE SELECT... can be unsafe, unless
+        ORDER BY PRIMARY KEY clause is used in SELECT statement. We therefore
+        use row based logging if mixed or row based logging is available.
+        TODO: Check if the order of the output of the select statement is
+        deterministic. Waiting for BUG#42415
+      */
+      if(lex->ignore)
+        lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_IGNORE_SELECT);
+      
+      if(lex->duplicates == DUP_REPLACE)
+        lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_REPLACE_SELECT);
+
+      /*
         If:
         a) we inside an SP and there was NAME_CONST substitution,
         b) binlogging is on (STMT mode),
@@ -2949,6 +2962,16 @@ end_with_restore_list:
     DBUG_ASSERT(first_table == all_tables && first_table != 0);
     if (update_precheck(thd, all_tables))
       break;
+
+    /*
+      UPDATE IGNORE can be unsafe. We therefore use row based
+      logging if mixed or row based logging is available.
+      TODO: Check if the order of the output of the select statement is
+      deterministic. Waiting for BUG#42415
+    */
+    if (lex->ignore)
+      lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_UPDATE_IGNORE);
+
     DBUG_ASSERT(select_lex->offset_limit == 0);
     unit->set_limit(select_lex);
     MYSQL_UPDATE_START(thd->query());
@@ -3114,6 +3137,23 @@ end_with_restore_list:
     DBUG_ASSERT(first_table == all_tables && first_table != 0);
     if ((res= insert_precheck(thd, all_tables)))
       break;
+    /*
+      INSERT...SELECT...ON DUPLICATE KEY UPDATE/REPLACE SELECT/
+      INSERT...IGNORE...SELECT can be unsafe, unless ORDER BY PRIMARY KEY
+      clause is used in SELECT statement. We therefore use row based
+      logging if mixed or row based logging is available.
+      TODO: Check if the order of the output of the select statement is
+      deterministic. Waiting for BUG#42415
+    */
+    if (lex->sql_command == SQLCOM_INSERT_SELECT &&
+        lex->duplicates == DUP_UPDATE)
+      lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_INSERT_SELECT_UPDATE);
+
+    if (lex->sql_command == SQLCOM_INSERT_SELECT && lex->ignore)
+      lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_INSERT_IGNORE_SELECT);
+
+    if (lex->sql_command == SQLCOM_REPLACE_SELECT)
+      lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_REPLACE_SELECT);
 
     /* Fix lock for first table */
     if (first_table->lock_type == TL_WRITE_DELAYED)

=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc	2011-10-04 06:53:01 +0000
+++ b/sql/sql_select.cc	2011-10-05 08:04:47 +0000
@@ -10048,7 +10048,7 @@ static bool create_ref_for_key(JOIN *joi
   j->ref.key=(int) key;
   if (!(j->ref.key_buff= (uchar*) thd->calloc(ALIGN_SIZE(length)*2)) ||
       !(j->ref.key_copy= (store_key**) thd->alloc((sizeof(store_key*) *
-						   (keyparts+1)))) ||
+                                                   (keyparts)))) ||
       !(j->ref.items=    (Item**) thd->alloc(sizeof(Item*)*keyparts)) ||
       !(j->ref.cond_guards= (bool**) thd->alloc(sizeof(uint*)*keyparts)))
   {
@@ -10062,7 +10062,6 @@ static bool create_ref_for_key(JOIN *joi
   j->ref.disable_cache= FALSE;
   keyuse=org_keyuse;
 
-  store_key **ref_key= j->ref.key_copy;
   uchar *key_buff=j->ref.key_buff, *null_ref_key= 0;
   bool keyuse_uses_no_tables= TRUE;
   if (ftkey)
@@ -10074,27 +10073,27 @@ static bool create_ref_for_key(JOIN *joi
       DBUG_RETURN(TRUE);                        // not supported yet. SerG
 
     j->type=JT_FT;
+    memset(j->ref.key_copy, 0, sizeof(j->ref.key_copy[0]) * keyparts);
   }
   else
   {
-    uint i;
-    for (i=0 ; i < keyparts ; keyuse++,i++)
+    for (uint part_no= 0 ; part_no < keyparts ; keyuse++, part_no++)
     {
-      while (keyuse->keypart != i ||
-	     ((~used_tables) & keyuse->used_tables))
-	keyuse++;				/* Skip other parts */
-
-      uint maybe_null= test(keyinfo->key_part[i].null_bit);
-      j->ref.items[i]=keyuse->val;		// Save for cond removal
-      j->ref.cond_guards[i]= keyuse->cond_guard;
+      while (keyuse->keypart != part_no ||
+             ((~used_tables) & keyuse->used_tables))
+        keyuse++;                               // Skip other parts
+
+      uint maybe_null= test(keyinfo->key_part[part_no].null_bit);
+      j->ref.items[part_no]=keyuse->val;        // Save for cond removal
+      j->ref.cond_guards[part_no]= keyuse->cond_guard;
       if (keyuse->null_rejecting) 
-        j->ref.null_rejecting |= 1 << i;
+        j->ref.null_rejecting |= 1 << part_no;
       keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
 
       store_key* key= get_store_key(thd,
-				    keyuse,join->const_table_map,
-				    &keyinfo->key_part[i],
-				    key_buff, maybe_null);
+                                    keyuse,join->const_table_map,
+                                    &keyinfo->key_part[part_no],
+                                    key_buff, maybe_null);
       if (unlikely(!key || thd->is_fatal_error))
         DBUG_RETURN(TRUE);
 
@@ -10104,7 +10103,7 @@ static bool create_ref_for_key(JOIN *joi
           query (which refers to this info when printing the 'ref'
           column of the query plan)
         */
-        *ref_key++= key;
+        j->ref.key_copy[part_no]= key;
       else
       {
         /* key is const, copy value now and possibly skip it while ::exec() */
@@ -10119,9 +10118,9 @@ static bool create_ref_for_key(JOIN *joi
          */
         if (result!=store_key::STORE_KEY_OK  ||    // 1)
             key->null_key)                         // 2)
-        {
-	  *ref_key++= key;  // Reevaluate in JOIN::exec() 
-        }
+          j->ref.key_copy[part_no]= key; // Reevaluate in JOIN::exec()
+        else
+          j->ref.key_copy[part_no]= NULL;
       }
       /*
 	Remember if we are going to use REF_OR_NULL
@@ -10130,10 +10129,9 @@ static bool create_ref_for_key(JOIN *joi
       */
       if ((keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL) && maybe_null)
 	null_ref_key= key_buff;
-      key_buff+=keyinfo->key_part[i].store_length;
+      key_buff+=keyinfo->key_part[part_no].store_length;
     }
   } /* not ftkey */
-  *ref_key=0;				// end_marker
   if (j->type == JT_FT)
     DBUG_RETURN(0);
   if (j->type == JT_CONST)
@@ -12160,7 +12158,6 @@ Item *create_subquery_equalities(THD *th
 
 bool setup_sj_materialization(JOIN_TAB *tab)
 {
-  uint i;
   DBUG_ENTER("setup_sj_materialization");
   TABLE_LIST *emb_sj_nest= tab->emb_sj_nest;
   Semijoin_mat_exec *sjm= emb_sj_nest->sj_mat_exec;
@@ -12211,8 +12208,7 @@ bool setup_sj_materialization(JOIN_TAB *
     if (!(tab_ref->key_buff=
           (uchar*) thd->calloc(ALIGN_SIZE(tmp_key->key_length) * 2)) ||
         !(tab_ref->key_copy=
-          (store_key**) thd->alloc((sizeof(store_key*) *
-                                    (tmp_key_parts + 1)))) ||
+          (store_key**) thd->alloc((sizeof(store_key*) * tmp_key_parts))) ||
         !(tab_ref->items=
           (Item**) thd->alloc(sizeof(Item*) * tmp_key_parts)))
       DBUG_RETURN(TRUE); /* purecov: inspected */
@@ -12225,9 +12221,10 @@ bool setup_sj_materialization(JOIN_TAB *
     uchar *cur_ref_buff= tab_ref->key_buff;
     List_iterator<Item> outer_expr(emb_sj_nest->nested_join->sj_outer_exprs);
 
-    for (i= 0; i < tmp_key_parts; i++, cur_key_part++, ref_key++)
+    for (uint part_no= 0; part_no < tmp_key_parts; 
+         part_no++, cur_key_part++, ref_key++)
     {
-      tab_ref->items[i]= outer_expr++;
+      tab_ref->items[part_no]= outer_expr++;
       int null_count= test(cur_key_part->field->real_maybe_null());
       *ref_key= new store_key_item(thd, cur_key_part->field,
                                    /* TODO:
@@ -12238,10 +12235,10 @@ bool setup_sj_materialization(JOIN_TAB *
                                    */
                                    cur_ref_buff + null_count,
                                    null_count ? cur_ref_buff : 0,
-                                   cur_key_part->length, tab_ref->items[i]);
+                                   cur_key_part->length, 
+                                   tab_ref->items[part_no]);
       cur_ref_buff+= cur_key_part->store_length;
     }
-    *ref_key= NULL; /* End marker. */
     tab_ref->key_err= 1;
     tab_ref->key_parts= tmp_key_parts;
     sjm->tab_ref= tab_ref;
@@ -12252,7 +12249,7 @@ bool setup_sj_materialization(JOIN_TAB *
       sj-inner tables which are not available after the materialization
       has been finished.
     */
-    for (i= 0; i < sjm->table_count; i++)
+    for (uint i= 0; i < sjm->table_count; i++)
     {
       tab[i].set_condition(remove_sj_conds(tab[i].condition()), __LINE__);
       if (tab[i].select)
@@ -21629,7 +21626,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,OR
   uint used_key_parts;
   TABLE *table=tab->table;
   SQL_SELECT *select=tab->select;
-  QUICK_SELECT_I *save_quick= 0;
+  QUICK_SELECT_I *save_quick= select ? select->quick : NULL;
   int best_key= -1;
   Item *orig_cond;
   bool orig_cond_saved= false, ret;
@@ -21671,7 +21668,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,OR
   else if (select && select->quick)		// Range found by opt_range
   {
     int quick_type= select->quick->get_type();
-    save_quick= select->quick;
     /* 
       assume results are not ordered when index merge is used 
       TODO: sergeyp: Results of all index merge selects actually are ordered 
@@ -22577,9 +22573,13 @@ cp_buffer_from_ref(THD *thd, TABLE *tabl
   my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
   bool result= 0;
 
-  for (store_key **copy=ref->key_copy ; *copy ; copy++)
+  for (uint part_no= 0; part_no < ref->key_parts; part_no++)
   {
-    if ((*copy)->copy() & 1)
+    store_key *s_key= ref->key_copy[part_no];
+    if (!s_key)
+      continue;
+
+    if (s_key->copy() & 1)
     {
       result= 1;
       break;

=== modified file 'sql/sql_select.h'
--- a/sql/sql_select.h	2011-09-30 10:22:38 +0000
+++ b/sql/sql_select.h	2011-10-05 08:04:47 +0000
@@ -123,7 +123,12 @@ typedef struct st_table_ref : public Sql
   int           key;                      ///< key no
   uchar         *key_buff;                ///< value to look for with key
   uchar         *key_buff2;               ///< key_buff+key_length
-  store_key     **key_copy;               //
+  /**
+     Used to store the value from each keypart field. These values are
+     used for ref access. If key_copy[key_part] == NULL it means that
+     the value is constant and does not need to be reevaluated
+  */
+  store_key     **key_copy;
   Item          **items;                  ///< val()'s for each keypart
   /*  
     Array of pointers to trigger variables. Some/all of the pointers may be

=== modified file 'storage/innobase/buf/buf0buf.c'
--- a/storage/innobase/buf/buf0buf.c	2011-09-08 13:29:05 +0000
+++ b/storage/innobase/buf/buf0buf.c	2011-10-03 17:53:54 +0000
@@ -4255,6 +4255,9 @@ assert_s_latched:
 					ut_a(rw_lock_is_locked(&block->lock,
 							       RW_LOCK_EX));
 					break;
+
+				case BUF_IO_PIN:
+					break;
 				}
 
 				n_lru++;
@@ -4284,6 +4287,7 @@ assert_s_latched:
 		ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
 		switch (buf_page_get_io_fix(b)) {
 		case BUF_IO_NONE:
+		case BUF_IO_PIN:
 			/* All clean blocks should be I/O-unfixed. */
 			break;
 		case BUF_IO_READ:
@@ -4324,6 +4328,7 @@ assert_s_latched:
 			switch (buf_page_get_io_fix(b)) {
 			case BUF_IO_NONE:
 			case BUF_IO_READ:
+			case BUF_IO_PIN:
 				break;
 			case BUF_IO_WRITE:
 				switch (buf_page_get_flush_type(b)) {

=== modified file 'storage/innobase/buf/buf0lru.c'
--- a/storage/innobase/buf/buf0lru.c	2011-08-22 07:46:51 +0000
+++ b/storage/innobase/buf/buf0lru.c	2011-10-03 17:53:54 +0000
@@ -70,8 +70,12 @@ allowed to point to either end of the LR
 
 /** When dropping the search hash index entries before deleting an ibd
 file, we build a local array of pages belonging to that tablespace
-in the buffer pool. Following is the size of that array. */
-#define BUF_LRU_DROP_SEARCH_HASH_SIZE	1024
+in the buffer pool. Following is the size of that array.
+We also release buf_pool->mutex after scanning this many pages of the
+flush_list when dropping a table. This is to ensure that other threads
+are not blocked for extended period of time when using very large
+buffer pools. */
+#define BUF_LRU_DROP_SEARCH_SIZE	1024
 
 /** If we switch on the InnoDB monitor because there are too few available
 frames in the buffer pool, we set this to TRUE */
@@ -216,7 +220,7 @@ buf_LRU_drop_page_hash_batch(
 	ulint	i;
 
 	ut_ad(arr != NULL);
-	ut_ad(count <= BUF_LRU_DROP_SEARCH_HASH_SIZE);
+	ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
 
 	for (i = 0; i < count; ++i) {
 		btr_search_drop_page_hash_when_freed(space_id, zip_size,
@@ -250,7 +254,7 @@ buf_LRU_drop_page_hash_for_tablespace(
 	}
 
 	page_arr = ut_malloc(
-		sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE);
+		sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE);
 
 	buf_pool_mutex_enter(buf_pool);
 	num_entries = 0;
@@ -289,10 +293,10 @@ next_page:
 		/* Store the page number so that we can drop the hash
 		index in a batch later. */
 		page_arr[num_entries] = bpage->offset;
-		ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE);
+		ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE);
 		++num_entries;
 
-		if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
+		if (num_entries < BUF_LRU_DROP_SEARCH_SIZE) {
 			goto next_page;
 		}
 
@@ -337,38 +341,39 @@ next_page:
 }
 
 /******************************************************************//**
-Invalidates all pages belonging to a given tablespace inside a specific
+Remove all dirty pages belonging to a given tablespace inside a specific
 buffer pool instance when we are deleting the data file(s) of that
-tablespace. */
+tablespace. The pages still remain a part of LRU and are evicted from
+the list as they age towards the tail of the LRU. */
 static
 void
-buf_LRU_invalidate_tablespace_buf_pool_instance(
-/*============================================*/
+buf_LRU_remove_dirty_pages_for_tablespace(
+/*======================================*/
 	buf_pool_t*	buf_pool,	/*!< buffer pool instance */
 	ulint		id)		/*!< in: space id */
 {
 	buf_page_t*	bpage;
 	ibool		all_freed;
+	ulint		i;
 
 scan_again:
 	buf_pool_mutex_enter(buf_pool);
+	buf_flush_list_mutex_enter(buf_pool);
 
 	all_freed = TRUE;
 
-	bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+	for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list), i = 0;
+	     bpage != NULL; ++i) {
 
-	while (bpage != NULL) {
 		buf_page_t*	prev_bpage;
-		ulint		fold;
-		rw_lock_t* 	hash_lock = NULL;
-		mutex_t* 	block_mutex = NULL;
+		mutex_t*	block_mutex = NULL;
 
 		ut_a(buf_page_in_file(bpage));
 
-		prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
+		prev_bpage = UT_LIST_GET_PREV(list, bpage);
 
 		/* bpage->space and bpage->io_fix are protected by
-		buf_pool->mutex and block_mutex.  It is safe to check
+		buf_pool->mutex and block_mutex. It is safe to check
 		them while holding buf_pool->mutex only. */
 
 		if (buf_page_get_space(bpage) != id) {
@@ -382,95 +387,83 @@ scan_again:
 
 			all_freed = FALSE;
 			goto next_page;
-		} else {
-			fold = buf_page_address_fold(bpage->space,
-						     bpage->offset);
-			hash_lock = buf_page_hash_lock_get(buf_pool, fold);
-			block_mutex = buf_page_get_mutex(bpage);
-
-			rw_lock_x_lock(hash_lock);
-			mutex_enter(block_mutex);
-
-			if (bpage->buf_fix_count > 0) {
+		}
 
-				rw_lock_x_unlock(hash_lock);
-				mutex_exit(block_mutex);
+		/* We have to release the flush_list_mutex to obey the
+		latching order. We are however guaranteed that the page
+		will stay in the flush_list because buf_flush_remove()
+		needs buf_pool->mutex as well. */
+		buf_flush_list_mutex_exit(buf_pool);
+		block_mutex = buf_page_get_mutex(bpage);
+		mutex_enter(block_mutex);
 
-				/* We cannot remove this page during
-				this scan yet; maybe the system is
-				currently reading it in, or flushing
-				the modifications to the file */
+		if (bpage->buf_fix_count > 0) {
+			mutex_exit(block_mutex);
+			buf_flush_list_mutex_enter(buf_pool);
 
-				all_freed = FALSE;
-				goto next_page;
-			}
-		}
+			/* We cannot remove this page during
+			this scan yet; maybe the system is
+			currently reading it in, or flushing
+			the modifications to the file */
 
-		ut_ad(mutex_own(block_mutex));
-#ifdef UNIV_SYNC_DEBUG
-		ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX));
-#endif /* UNIV_SYNC_DEBUG */
-
-#ifdef UNIV_DEBUG
-		if (buf_debug_prints) {
-			fprintf(stderr,
-				"Dropping space %lu page %lu\n",
-				(ulong) buf_page_get_space(bpage),
-				(ulong) buf_page_get_page_no(bpage));
+			all_freed = FALSE;
+			goto next_page;
 		}
-#endif
-		if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
-			/* This is a compressed-only block
-			descriptor. Do nothing. */
-		} else if (((buf_block_t*) bpage)->is_hashed) {
-			ulint	page_no;
-			ulint	zip_size;
-
-			buf_pool_mutex_exit(buf_pool);
 
-			zip_size = buf_page_get_zip_size(bpage);
-			page_no = buf_page_get_page_no(bpage);
+		ut_ad(bpage->oldest_modification != 0);
 
-			rw_lock_x_unlock(hash_lock);
-			mutex_exit(block_mutex);
+		buf_flush_remove(bpage);
 
-			/* Note that the following call will acquire
-			an S-latch on the page */
+		mutex_exit(block_mutex);
+		buf_flush_list_mutex_enter(buf_pool);
+next_page:
+		bpage = prev_bpage;
 
-			btr_search_drop_page_hash_when_freed(
-				id, zip_size, page_no);
-			goto scan_again;
+		if (!bpage) {
+			break;
 		}
 
-		if (bpage->oldest_modification != 0) {
+		/* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
+		loop we release buf_pool->mutex to let other threads
+		do their job. */
+		if (i < BUF_LRU_DROP_SEARCH_SIZE) {
+			continue;
+		}
 
-			buf_flush_remove(bpage);
+		/* We IO-fix the block to make sure that the block
+		stays in its position in the flush_list. */
+		if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
+			/* Block is already IO-fixed. We don't
+			want to change the value. Lets leave
+			this block alone. */
+			continue;
 		}
 
-		/* Remove from the LRU list. */
+		buf_flush_list_mutex_exit(buf_pool);
+		block_mutex = buf_page_get_mutex(bpage);
+		mutex_enter(block_mutex);
+		buf_page_set_sticky(bpage);
+		mutex_exit(block_mutex);
 
-		if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
-		    != BUF_BLOCK_ZIP_FREE) {
-			buf_LRU_block_free_hashed_page((buf_block_t*)
-						       bpage);
-		} else {
-			/* The block_mutex should have been
-			released by buf_LRU_block_remove_hashed_page() */
-			ut_ad(block_mutex == &buf_pool->zip_mutex);
-		}
+		/* Now it is safe to release the buf_pool->mutex. */
+		buf_pool_mutex_exit(buf_pool);
+		os_thread_yield();
+		buf_pool_mutex_enter(buf_pool);
 
-		ut_ad(!mutex_own(block_mutex));
-#ifdef UNIV_SYNC_DEBUG
-		ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)
-		      && !rw_lock_own(hash_lock, RW_LOCK_SHARED));
-#endif /* UNIV_SYNC_DEBUG */
-		ut_ad(!mutex_own(block_mutex));
-next_page:
-		bpage = prev_bpage;
+		mutex_enter(block_mutex);
+		buf_page_unset_sticky(bpage);
+		mutex_exit(block_mutex);
 
+		buf_flush_list_mutex_enter(buf_pool);
+		ut_ad(bpage->in_flush_list);
+
+		i = 0;
 	}
 
 	buf_pool_mutex_exit(buf_pool);
+	buf_flush_list_mutex_exit(buf_pool);
+
+	ut_ad(buf_flush_validate(buf_pool));
 
 	if (!all_freed) {
 		os_thread_sleep(20000);
@@ -501,7 +494,7 @@ buf_LRU_invalidate_tablespace(
 
 		buf_pool = buf_pool_from_array(i);
 		buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
-		buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
+		buf_LRU_remove_dirty_pages_for_tablespace(buf_pool, id);
 	}
 }
 
@@ -1581,15 +1574,18 @@ func_exit:
 
 		bpage->zip.data = NULL;
 		page_zip_set_size(&bpage->zip, 0);
+		mutex_exit(block_mutex);
 
 		/* Prevent buf_page_get_gen() from
 		decompressing the block while we release
 		buf_pool->mutex and block_mutex. */
-		b->buf_fix_count++;
-		b->io_fix = BUF_IO_READ;
+		block_mutex = buf_page_get_mutex(b);
+		mutex_enter(block_mutex);
+		buf_page_set_sticky(b);
+		mutex_exit(block_mutex);
 
 		rw_lock_x_unlock(hash_lock);
-		mutex_exit(block_mutex);
+
 	} else {
 
 		/* There can be multiple threads doing an LRU scan to
@@ -1600,11 +1596,9 @@ func_exit:
 		else considers this block as a victim for page
 		replacement. This block is already out of page_hash
 		and we are about to remove it from the LRU list and put
-		it on the free list. To avoid this situation we set the
-		buf_fix_count and io_fix fields here. */
+		it on the free list. */
 		mutex_enter(block_mutex);
-		buf_block_buf_fix_inc((buf_block_t*) bpage, __FILE__, __LINE__);
-		buf_page_set_io_fix(bpage, BUF_IO_READ);
+		buf_page_set_sticky(bpage);
 		mutex_exit(block_mutex);
 	}
 
@@ -1642,19 +1636,9 @@ func_exit:
 
 	buf_pool_mutex_enter(buf_pool);
 
-	if (b) {
-		mutex_enter(&buf_pool->zip_mutex);
-		b->buf_fix_count--;
-		buf_page_set_io_fix(b, BUF_IO_NONE);
-		mutex_exit(&buf_pool->zip_mutex);
-	} else {
-		mutex_enter(block_mutex);
-		ut_ad(bpage->buf_fix_count > 0);
-		ut_ad(bpage->io_fix == BUF_IO_READ);
-		buf_block_buf_fix_dec((buf_block_t*) bpage);
-		buf_page_set_io_fix(bpage, BUF_IO_NONE);
-		mutex_exit(block_mutex);
-	}
+	mutex_enter(block_mutex);
+	buf_page_unset_sticky(b != NULL ? b : bpage);
+	mutex_exit(block_mutex);
 
 	buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
 	return(TRUE);

=== modified file 'storage/innobase/dict/dict0stats.c'
--- a/storage/innobase/dict/dict0stats.c	2011-08-29 12:58:11 +0000
+++ b/storage/innobase/dict/dict0stats.c	2011-10-04 07:06:34 +0000
@@ -35,17 +35,14 @@ Created Jan 06, 2010 Vasil Dimov
 #include "data0type.h" /* dtype_t */
 #include "db0err.h" /* db_err */
 #include "dyn0dyn.h" /* dyn_array* */
-#include "lock0lock.h" /* lock_table_by_name() */
 #include "pars0pars.h" /* pars_info_create() */
 #include "pars0types.h" /* pars_info_t */
 #include "que0que.h" /* que_eval_sql() */
-#include "rem0cmp.h" /* cmp_rec_rec_with_match() */
-#include "rem0types.h" /* REC_MAX_N_FIELDS */
+#include "rem0cmp.h" /* REC_MAX_N_FIELDS,cmp_rec_rec_with_match() */
 #include "row0sel.h" /* sel_node_struct */
 #include "row0types.h" /* sel_node_t */
 #include "trx0trx.h" /* trx_create() */
 #include "trx0roll.h" /* trx_rollback_to_savepoint() */
-#include "usr0types.h" /* sess_t */
 #include "ut0rnd.h" /* ut_rnd_interval() */
 
 #include "ha_prototypes.h" /* innobase_strcasecmp() */
@@ -346,7 +343,7 @@ dict_stats_analyze_index()
     dict_stats_analyze_index_for_n_prefix(that level, stats collected above)
       // full scan of the level in one mtr
       dive below some records and analyze the leaf page there:
-      dict_stats_analyze_index_below_pcur()
+      dict_stats_analyze_index_below_cur()
 @} */
 
 /*********************************************************************//**
@@ -378,9 +375,9 @@ dict_stats_analyze_index_level(
 	dtuple_t*	dtuple;
 	btr_pcur_t	pcur;
 	mtr_t		mtr;
-	page_t*		page;
-	rec_t*		rec;
-	rec_t*		prev_rec;
+	const page_t*	page;
+	const rec_t*	rec;
+	const rec_t*	prev_rec;
 	byte*		prev_rec_buf = NULL;
 	ulint		prev_rec_buf_size = 0;
 	ulint		i;
@@ -430,7 +427,7 @@ dict_stats_analyze_index_level(
 	as such, if we are on a non-leaf level */
 	ut_a(level == 0
 	     || (REC_INFO_MIN_REC_FLAG & rec_get_info_bits(
-		     page_rec_get_next(page_get_infimum_rec(page)),
+		     page_rec_get_next_const(page_get_infimum_rec(page)),
 		     page_is_comp(page))));
 
 	if (btr_pcur_is_before_first_on_page(&pcur)) {
@@ -465,7 +462,7 @@ dict_stats_analyze_index_level(
 		rec = btr_pcur_get_rec(&pcur);
 
 		/* increment the pages counter at the end of each page */
-		if (page_rec_is_supremum(page_rec_get_next(rec))) {
+		if (page_rec_is_supremum(page_rec_get_next_const(rec))) {
 
 			(*total_pages)++;
 		}
@@ -540,7 +537,7 @@ dict_stats_analyze_index_level(
 			}
 		}
 
-		if (page_rec_is_supremum(page_rec_get_next(rec))) {
+		if (page_rec_is_supremum(page_rec_get_next_const(rec))) {
 			/* end of a page has been reached */
 
 			/* we need to copy the record instead of assigning
@@ -654,52 +651,55 @@ will return as soon as it finds a record
 to the right, which means that in the case of QUIT_ON_FIRST_NON_BORING the
 returned n_diff can either be 0 (empty page), 1 (the whole page has all keys
 equal) or 2 (the function found a non-boring record and returned).
-@return the last user record which was read or NULL if the page is empty and
-does not contain user records.
+@return offsets1 or offsets2 (the offsets of *out_rec),
+or NULL if the page is empty and does not contain user records.
 dict_stats_scan_page() @{ */
-UNIV_INLINE
-rec_t*
+UNIV_INLINE __attribute__((nonnull))
+ulint*
 dict_stats_scan_page(
 /*=================*/
+	const rec_t**		out_rec,	/*!< out: record, or NULL */
+	ulint*			offsets1,	/*!< out: rec_get_offsets()
+						working space (must be big
+						enough) */
+	ulint*			offsets2,	/*!< out: rec_get_offsets()
+						working space (must be big
+						enough) */
 	dict_index_t*		index,		/*!< in: index of the page */
-	page_t*			page,		/*!< in: the page to scan */
+	const page_t*		page,		/*!< in: the page to scan */
 	ulint			n_prefix,	/*!< in: look at the first
 						n_prefix columns */
-	mem_heap_t*		heap,		/*!< in: aux memory heap to
-						use from the caller */
 	page_scan_method_t	scan_method,	/*!< in: scan to the end of
 						the page or not */
 	ib_uint64_t*		n_diff)		/*!< out: number of distinct
 						records encountered */
 {
-	ulint	offsets_onstack1[REC_OFFS_NORMAL_SIZE];
-	ulint	offsets_onstack2[REC_OFFS_NORMAL_SIZE];
-	ulint*	offsets_rec = offsets_onstack1;
-	ulint*	offsets_next_rec = offsets_onstack2;
-	rec_t*	rec;
-	rec_t*	next_rec;
-	rec_t*	supremum;
-
-	rec_offs_init(offsets_onstack1);
-	rec_offs_init(offsets_onstack2);
+	ulint*		offsets_rec		= offsets1;
+	ulint*		offsets_next_rec	= offsets2;
+	const rec_t*	rec;
+	const rec_t*	next_rec;
+	/* A dummy heap, to be passed to rec_get_offsets().
+	Because offsets1,offsets2 should be big enough,
+	this memory heap should never be used. */
+	mem_heap_t*	heap			= NULL;
 
-	supremum = page_get_supremum_rec(page);
-	rec = page_rec_get_next(page_get_infimum_rec(page));
+	rec = page_rec_get_next_const(page_get_infimum_rec(page));
 
-	if (rec == supremum) {
+	if (page_rec_is_supremum(rec)) {
 		/* the page is empty */
 		*n_diff = 0;
+		*out_rec = NULL;
 		return(NULL);
 	}
 
 	offsets_rec = rec_get_offsets(rec, index, offsets_rec,
 				      ULINT_UNDEFINED, &heap);
 
-	next_rec = page_rec_get_next(rec);
+	next_rec = page_rec_get_next_const(rec);
 
 	*n_diff = 1;
 
-	while (next_rec != supremum) {
+	while (!page_rec_is_supremum(next_rec)) {
 
 		ulint	matched_fields = 0;
 		ulint	matched_bytes = 0;
@@ -723,7 +723,7 @@ dict_stats_scan_page(
 			(*n_diff)++;
 
 			if (scan_method == QUIT_ON_FIRST_NON_BORING) {
-				return(rec);
+				goto func_exit;
 			}
 		}
 
@@ -745,10 +745,14 @@ dict_stats_scan_page(
 			offsets_rec = offsets_next_rec;
 			offsets_next_rec = offsets_tmp;
 		}
-		next_rec = page_rec_get_next(next_rec);
+		next_rec = page_rec_get_next_const(next_rec);
 	}
 
-	return(rec);
+func_exit:
+	/* offsets1,offsets2 should have been big enough */
+	ut_a(heap == NULL);
+	*out_rec = rec;
+	return(offsets_rec);
 }
 /* @} */
 
@@ -756,13 +760,13 @@ dict_stats_scan_page(
 Dive below the current position of a cursor and calculate the number of
 distinct records on the leaf page, when looking at the fist n_prefix
 columns.
-dict_stats_analyze_index_below_pcur() @{
+dict_stats_analyze_index_below_cur() @{
 @return number of distinct records on the leaf page */
 static
 ib_uint64_t
-dict_stats_analyze_index_below_pcur(
-/*================================*/
-	btr_pcur_t*	pcur,		/*!< in: cursor, not modified */
+dict_stats_analyze_index_below_cur(
+/*===============================*/
+	const btr_cur_t*cur,		/*!< in: cursor */
 	ulint		n_prefix,	/*!< in: look at the first n_prefix
 					columns when comparing records */
 	mtr_t*		mtr)		/*!< in/out: mini-transaction */
@@ -772,28 +776,42 @@ dict_stats_analyze_index_below_pcur(
 	ulint		zip_size;
 	buf_block_t*	block;
 	ulint		page_no;
-	page_t*		page;
+	const page_t*	page;
 	mem_heap_t*	heap;
-	rec_t*		rec;
-	ulint		offsets_onstack[REC_OFFS_NORMAL_SIZE];
-	ulint*		offsets_rec = offsets_onstack;
+	const rec_t*	rec;
+	ulint*		offsets1;
+	ulint*		offsets2;
+	ulint*		offsets_rec;
 	ulint		root_height;
 	ib_uint64_t	n_diff; /* the result */
+	ulint		size;
 
-	rec_offs_init(offsets_onstack);
+	index = btr_cur_get_index(cur);
 
-	index = btr_cur_get_index(btr_pcur_get_btr_cur(pcur));
-
-	heap = mem_heap_create(256);
+	/* Allocate offsets for the record and the node pointer, for
+	node pointer records. In a secondary index, the node pointer
+	record will consist of all index fields followed by a child
+	page number.
+	Allocate space for the offsets header (the allocation size at
+	offsets[0] and the REC_OFFS_HEADER_SIZE bytes), and n_fields + 1,
+	so that this will never be less than the size calculated in
+	rec_get_offsets_func(). */
+	size = (1 + REC_OFFS_HEADER_SIZE) + 1 + dict_index_get_n_fields(index);
+
+	heap = mem_heap_create(size * (sizeof *offsets1 + sizeof *offsets2));
+	offsets1 = mem_heap_alloc(heap, size * sizeof *offsets1);
+	offsets2 = mem_heap_alloc(heap, size * sizeof *offsets2);
+	rec_offs_set_n_alloc(offsets1, size);
+	rec_offs_set_n_alloc(offsets2, size);
 
 	root_height = btr_page_get_level(btr_root_get(index, mtr), mtr);
 
 	space = dict_index_get_space(index);
 	zip_size = dict_table_zip_size(index->table);
 
-	rec = btr_pcur_get_rec(pcur);
+	rec = btr_cur_get_rec(cur);
 
-	offsets_rec = rec_get_offsets(rec, index, offsets_rec,
+	offsets_rec = rec_get_offsets(rec, index, offsets1,
 				      ULINT_UNDEFINED, &heap);
 
 	page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec);
@@ -814,12 +832,12 @@ dict_stats_analyze_index_below_pcur(
 		/* else */
 
 		/* search for the first non-boring record on the page */
-		rec = dict_stats_scan_page(index, page, n_prefix, heap,
-					   QUIT_ON_FIRST_NON_BORING,
-					   &n_diff);
+		offsets_rec = dict_stats_scan_page(
+			&rec, offsets1, offsets2, index, page, n_prefix,
+			QUIT_ON_FIRST_NON_BORING, &n_diff);
 
 		/* pages on level > 0 are not allowed to be empty */
-		ut_a(rec != NULL);
+		ut_a(offsets_rec != NULL);
 		/* if page is not empty (rec != NULL) then n_diff must
 		be > 0, otherwise there is a bug in dict_stats_scan_page() */
 		ut_a(n_diff > 0);
@@ -850,15 +868,15 @@ dict_stats_analyze_index_below_pcur(
 	/* scan the leaf page and find the number of distinct keys,
 	when looking only at the first n_prefix columns */
 
-	rec = dict_stats_scan_page(index, page, n_prefix, heap,
-				   COUNT_ALL_NON_BORING,
-				   &n_diff);
+	offsets_rec = dict_stats_scan_page(
+		&rec, offsets1, offsets2, index, page, n_prefix,
+		COUNT_ALL_NON_BORING, &n_diff);
 
 	if (root_height > 0) {
 
 		/* empty pages are allowed only if the whole B-tree is empty
-		and contains a signle empty page */
-		ut_a(rec != NULL);
+		and contains a single empty page */
+		ut_a(offsets_rec != NULL);
 	}
 
 #if 0
@@ -910,7 +928,7 @@ dict_stats_analyze_index_for_n_prefix(
 	dtuple_t*	dtuple;
 	btr_pcur_t	pcur;
 	mtr_t		mtr;
-	page_t*		page;
+	const page_t*	page;
 	ib_uint64_t	rec_idx;
 	ib_uint64_t	last_idx_on_level;
 	ib_uint64_t	n_recs_to_dive_below;
@@ -958,7 +976,8 @@ dict_stats_analyze_index_for_n_prefix(
 	/* check whether the first record on the leftmost page is marked
 	as such, if we are on a non-leaf level */
 	ut_a(level == 0 || REC_INFO_MIN_REC_FLAG
-	     & rec_get_info_bits(page_rec_get_next(page_get_infimum_rec(page)),
+	     & rec_get_info_bits(page_rec_get_next_const(
+					 page_get_infimum_rec(page)),
 				 page_is_comp(page)));
 
 	if (btr_pcur_is_before_first_on_page(&pcur)) {
@@ -1055,8 +1074,8 @@ dict_stats_analyze_index_for_n_prefix(
 		ut_a(rec_idx == dive_below_idx);
 
 		n_diff_sum_of_all_analyzed_pages
-			+= dict_stats_analyze_index_below_pcur(
-				&pcur, n_prefix, &mtr);
+			+= dict_stats_analyze_index_below_cur(
+				btr_pcur_get_btr_cur(&pcur), n_prefix, &mtr);
 	}
 
 	index->stat_n_diff_key_vals[n_prefix]
@@ -1334,7 +1353,7 @@ dict_stats_update_persistent(
 
 	if (index == NULL) {
 		/* Table definition is corrupt */
-		return DB_CORRUPTION;
+		return(DB_CORRUPTION);
 	}
 
 	dict_stats_analyze_index(index);

=== modified file 'storage/innobase/handler/i_s.cc'
--- a/storage/innobase/handler/i_s.cc	2011-09-23 11:09:58 +0000
+++ b/storage/innobase/handler/i_s.cc	2011-10-03 17:53:54 +0000
@@ -3285,6 +3285,10 @@ i_s_innodb_buffer_page_fill(
 			OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
 					      "IO_WRITE"));
 			break;
+		case BUF_IO_PIN:
+			OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX],
+					      "IO_PIN"));
+			break;
 		}
 
 		OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD],

=== modified file 'storage/innobase/include/btr0cur.h'
--- a/storage/innobase/include/btr0cur.h	2011-08-29 08:45:30 +0000
+++ b/storage/innobase/include/btr0cur.h	2011-09-30 12:49:33 +0000
@@ -103,12 +103,9 @@ btr_cur_get_page(
 	btr_cur_t*	cursor);/*!< in: tree cursor */
 /*********************************************************//**
 Returns the index of a cursor.
+@param cursor	b-tree cursor
 @return	index */
-UNIV_INLINE
-dict_index_t*
-btr_cur_get_index(
-/*==============*/
-	btr_cur_t*	cursor);/*!< in: B-tree cursor */
+#define btr_cur_get_index(cursor) ((cursor)->index)
 /*********************************************************//**
 Positions a tree cursor at a given record. */
 UNIV_INLINE

=== modified file 'storage/innobase/include/btr0cur.ic'
--- a/storage/innobase/include/btr0cur.ic	2011-06-16 09:26:09 +0000
+++ b/storage/innobase/include/btr0cur.ic	2011-09-30 12:49:33 +0000
@@ -100,18 +100,6 @@ btr_cur_get_page(
 }
 
 /*********************************************************//**
-Returns the index of a cursor.
-@return	index */
-UNIV_INLINE
-dict_index_t*
-btr_cur_get_index(
-/*==============*/
-	btr_cur_t*	cursor)	/*!< in: B-tree cursor */
-{
-	return(cursor->index);
-}
-
-/*********************************************************//**
 Positions a tree cursor at a given record. */
 UNIV_INLINE
 void

=== modified file 'storage/innobase/include/buf0buf.h'
--- a/storage/innobase/include/buf0buf.h	2011-09-08 06:46:51 +0000
+++ b/storage/innobase/include/buf0buf.h	2011-10-03 17:53:54 +0000
@@ -938,7 +938,27 @@ buf_block_set_io_fix(
 /*=================*/
 	buf_block_t*	block,	/*!< in/out: control block */
 	enum buf_io_fix	io_fix);/*!< in: io_fix state */
-
+/*********************************************************************//**
+Makes a block sticky. A sticky block implies that even after we release
+the buf_pool->mutex and the block->mutex:
+* it cannot be removed from the flush_list
+* the block descriptor cannot be relocated
+* it cannot be removed from the LRU list
+Note that:
+* the block can still change its position in the LRU list
+* the next and previous pointers can change. */
+UNIV_INLINE
+void
+buf_page_set_sticky(
+/*================*/
+	buf_page_t*	bpage);	/*!< in/out: control block */
+/*********************************************************************//**
+Removes stickiness of a block. */
+UNIV_INLINE
+void
+buf_page_unset_sticky(
+/*==================*/
+	buf_page_t*	bpage);	/*!< in/out: control block */
 /********************************************************************//**
 Determine if a buffer block can be relocated in memory.  The block
 can be dirty, but it must not be I/O-fixed or bufferfixed. */

=== modified file 'storage/innobase/include/buf0buf.ic'
--- a/storage/innobase/include/buf0buf.ic	2011-09-08 06:46:51 +0000
+++ b/storage/innobase/include/buf0buf.ic	2011-10-03 17:53:54 +0000
@@ -425,6 +425,7 @@ buf_page_get_io_fix(
 	case BUF_IO_NONE:
 	case BUF_IO_READ:
 	case BUF_IO_WRITE:
+	case BUF_IO_PIN:
 		return(io_fix);
 	}
 	ut_error;
@@ -475,6 +476,49 @@ buf_block_set_io_fix(
 	buf_page_set_io_fix(&block->page, io_fix);
 }
 
+/*********************************************************************//**
+Makes a block sticky. A sticky block implies that even after we release
+the buf_pool->mutex and the block->mutex:
+* it cannot be removed from the flush_list
+* the block descriptor cannot be relocated
+* it cannot be removed from the LRU list
+Note that:
+* the block can still change its position in the LRU list
+* the next and previous pointers can change. */
+UNIV_INLINE
+void
+buf_page_set_sticky(
+/*================*/
+	buf_page_t*	bpage)	/*!< in/out: control block */
+{
+#ifdef UNIV_DEBUG
+	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
+	ut_ad(buf_pool_mutex_own(buf_pool));
+#endif
+	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
+	ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
+
+	bpage->io_fix = BUF_IO_PIN;
+}
+
+/*********************************************************************//**
+Removes stickiness of a block. */
+UNIV_INLINE
+void
+buf_page_unset_sticky(
+/*==================*/
+	buf_page_t*	bpage)	/*!< in/out: control block */
+{
+#ifdef UNIV_DEBUG
+	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
+	ut_ad(buf_pool_mutex_own(buf_pool));
+#endif
+	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
+	ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
+
+	bpage->io_fix = BUF_IO_NONE;
+}
+
 /********************************************************************//**
 Determine if a buffer block can be relocated in memory.  The block
 can be dirty, but it must not be I/O-fixed or bufferfixed. */

=== modified file 'storage/innobase/include/buf0types.h'
--- a/storage/innobase/include/buf0types.h	2011-08-29 07:14:48 +0000
+++ b/storage/innobase/include/buf0types.h	2011-10-03 17:53:54 +0000
@@ -56,7 +56,10 @@ enum buf_flush {
 enum buf_io_fix {
 	BUF_IO_NONE = 0,		/**< no pending I/O */
 	BUF_IO_READ,			/**< read pending */
-	BUF_IO_WRITE			/**< write pending */
+	BUF_IO_WRITE,			/**< write pending */
+	BUF_IO_PIN			/**< disallow relocation of
+					block and its removal of from
+					the flush_list */
 };
 
 /** Alternatives for srv_checksum_algorithm, which can be changed by

=== modified file 'storage/innobase/rem/rem0rec.c'
--- a/storage/innobase/rem/rem0rec.c	2011-06-16 12:33:20 +0000
+++ b/storage/innobase/rem/rem0rec.c	2011-10-04 07:06:34 +0000
@@ -550,6 +550,9 @@ rec_get_offsets_func(
 			n = dict_index_get_n_fields(index);
 			break;
 		case REC_STATUS_NODE_PTR:
+			/* Node pointer records consist of the
+			uniquely identifying fields of the record
+			followed by a child page number field. */
 			n = dict_index_get_n_unique_in_tree(index) + 1;
 			break;
 		case REC_STATUS_INFIMUM:
@@ -569,6 +572,8 @@ rec_get_offsets_func(
 		n = n_fields;
 	}
 
+	/* The offsets header consists of the allocation size at
+	offsets[0] and the REC_OFFS_HEADER_SIZE bytes. */
 	size = n + (1 + REC_OFFS_HEADER_SIZE);
 
 	if (UNIV_UNLIKELY(!offsets)

=== modified file 'storage/innobase/srv/srv0start.c'
--- a/storage/innobase/srv/srv0start.c	2011-09-06 03:13:44 +0000
+++ b/storage/innobase/srv/srv0start.c	2011-10-03 08:43:06 +0000
@@ -1701,6 +1701,21 @@ innobase_start_or_create_for_mysql(void)
 	}
 #endif /* UNIV_LOG_ARCHIVE */
 
+	if (srv_n_log_files * srv_log_file_size * UNIV_PAGE_SIZE
+	    >= 549755813888ULL /* 512G */) {
+		/* log_block_convert_lsn_to_no() limits the returned block
+		number to 1G and given that OS_FILE_LOG_BLOCK_SIZE is 512
+		bytes, then we have a limit of 512 GB. If that limit is to
+		be raised, then log_block_convert_lsn_to_no() must be
+		modified. */
+		ut_print_timestamp(stderr);
+		fprintf(stderr,
+			" InnoDB: Error: combined size of log files"
+			" must be < 512 GB\n");
+
+		return(DB_ERROR);
+	}
+
 	if (srv_n_log_files * srv_log_file_size >= ULINT_MAX) {
 		/* fil_io() takes ulint as an argument and we are passing
 		(next_offset / UNIV_PAGE_SIZE) to it in log_group_write_buf().

=== modified file 'unittest/mysys/my_atomic-t.c'
--- a/unittest/mysys/my_atomic-t.c	2011-06-30 15:46:53 +0000
+++ b/unittest/mysys/my_atomic-t.c	2011-10-03 07:31:55 +0000
@@ -164,7 +164,14 @@ void do_tests()
   test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES);
 
   {
-    int64 b=0x1000200030004000LL;
+    /*
+      If b is not volatile, the wrong assembly code is generated on OSX Lion
+      as the variable is optimized away as a constant.
+      See Bug#62533 / Bug#13030056.
+      Another workaround is to specify architecture explicitly using e.g.
+      CFLAGS/CXXFLAGS= "-m64".
+    */
+    volatile int64 b=0x1000200030004000LL;
     a64=0;
     my_atomic_add64(&a64, b);
     ok(a64==b, "add64");

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (tor.didriksen:3393 to 3396) Tor Didriksen5 Oct