List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:November 16 2011 9:40am
Subject:bzr push into mysql-trunk-cluster branch (magnus.blaudd:3409 to 3410)
View as plain text  
 3410 magnus.blaudd@stripped	2011-11-16 [merge]
      Merge 5.5-cluster -> trunk-cluster

    added:
      mysql-test/suite/ndb/t/ndb_join_pushdown_default.test
      mysql-test/suite/rpl/r/rpl_row_basic_allow_batching.result
      mysql-test/suite/rpl/t/rpl_row_basic_allow_batching.test
    renamed:
      mysql-test/suite/ndb/r/ndb_join_pushdown.result => mysql-test/suite/ndb/r/ndb_join_pushdown_default.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.test => mysql-test/suite/ndb/t/ndb_join_pushdown.inc
    modified:
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/t/ndb_condition_pushdown.test
      mysql-test/suite/sys_vars/t/slave_allow_batching_basic.test
      sql/ha_ndbcluster.cc
      sql/handler.h
      sql/log_event.cc
      sql/mysqld.cc
      sql/mysqld.h
      sql/rpl_slave.cc
      sql/rpl_slave.h
      sql/sys_vars.cc
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/include/ndb_version.h.in
      storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
      storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
      storage/ndb/src/kernel/vm/GlobalData.hpp
      storage/ndb/src/kernel/vm/pc.hpp
      mysql-test/suite/ndb/t/ndb_join_pushdown.inc
 3409 magnus.blaudd@stripped	2011-11-15 [merge]
      Merge

    added:
      mysql-test/suite/ndb/r/ndb_multi_update_delete.result
      mysql-test/suite/ndb/t/ndb_multi_update_delete.test
      mysql-test/suite/ndb_memcache/r/unique_idx.result
      mysql-test/suite/ndb_memcache/t/unique_idx.test
      sql/ndb_component.cc
      sql/ndb_component.h
      sql/ndb_util_thread.h
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/SchemaChangeTest.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/SchemaChangeTest.java
      storage/ndb/include/kernel/statedesc.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhStateDesc.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcStateDesc.cpp
    modified:
      VERSION
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/r/ndb_index_stat.result
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/r/ndbinfo_dump.result
      mysql-test/suite/ndb/t/ndb_condition_pushdown.test
      mysql-test/suite/ndb/t/ndbinfo.test
      mysql-test/suite/ndb_big/my.cnf
      mysql-test/suite/perfschema/r/pfs_upgrade.result
      scripts/mysql_system_tables.sql
      sql/ha_ndb_index_stat.cc
      sql/ha_ndb_index_stat.h
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbcluster_cond.cc
      sql/ha_ndbcluster_cond.h
      sql/ha_ndbcluster_connection.cc
      sql/handler.cc
      sql/ndb_global_schema_lock.cc
      sql/ndb_ndbapi_util.cc
      sql/ndb_ndbapi_util.h
      sql/ndb_schema_dist.cc
      sql/ndb_schema_dist.h
      sql/ndb_share.cc
      sql/ndb_share.h
      sql/sql_select.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/VERSION
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Session.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/DomainTypeHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Dictionary.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractClusterJTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java
      storage/ndb/include/kernel/signaldata/ScanFrag.hpp
      storage/ndb/include/kernel/signaldata/TupKey.hpp
      storage/ndb/include/ndbapi/NdbReceiver.hpp
      storage/ndb/memcache/include/KeyPrefix.h
      storage/ndb/memcache/include/QueryPlan.h
      storage/ndb/memcache/include/Record.h
      storage/ndb/memcache/include/TableSpec.h
      storage/ndb/memcache/include/int3korr.h
      storage/ndb/memcache/include/workitem.h
      storage/ndb/memcache/scripts/pmpstack.awk
      storage/ndb/memcache/src/Config_v1.cc
      storage/ndb/memcache/src/QueryPlan.cc
      storage/ndb/memcache/src/Record.cc
      storage/ndb/memcache/src/TableSpec.cc
      storage/ndb/memcache/src/ndb_worker.cc
      storage/ndb/src/common/portlib/NdbThread.c
      storage/ndb/src/common/util/NdbPack.cpp
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
      storage/ndb/src/kernel/vm/DLFifoList.hpp
      storage/ndb/src/kernel/vm/DLHashTable.hpp
      storage/ndb/src/kernel/vm/Rope.cpp
      storage/ndb/src/kernel/vm/Rope.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbReceiver.cpp
      storage/ndb/src/ndbapi/NdbRecord.hpp
      storage/ndb/src/ndbapi/NdbScanOperation.cpp
      storage/ndb/test/ndbapi/flexAsynch.cpp
      storage/ndb/test/ndbapi/testNodeRestart.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/src/HugoQueries.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/ndbinfo_sql.cpp
=== modified file 'mysql-test/suite/ndb/r/ndb_condition_pushdown.result'
--- a/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2011-11-15 13:13:38 +0000
+++ b/mysql-test/suite/ndb/r/ndb_condition_pushdown.result	2011-11-16 09:29:49 +0000
@@ -2210,38 +2210,6 @@ select * from t where x not like 'ye%' o
 x
 no
 drop table t;
-set @@optimizer_switch = 'engine_condition_pushdown=on';
-create table t (pk int, i int) engine = ndb;
-insert into t values (1,3), (3,6), (6,9), (9,1);
-create table subq (pk int, i int) engine = ndb;
-insert into subq values (1,3), (3,6), (6,9), (9,1);
-explain extended 
-select * from t where exists
-(select * from t as subq where subq.i=3 and t.i=3);
-id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
-1	PRIMARY	t	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where
-2	DEPENDENT SUBQUERY	subq	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where with pushed condition: (`test`.`subq`.`i` = 3)
-Warnings:
-Note	1276	Field or reference 'test.t.i' of SELECT #2 was resolved in SELECT #1
-Note	1003	/* select#1 */ select `test`.`t`.`pk` AS `pk`,`test`.`t`.`i` AS `i` from `test`.`t` where exists(/* select#2 */ select 1 from `test`.`t` `subq` where ((`test`.`subq`.`i` = 3) and (`test`.`t`.`i` = 3)))
-explain extended 
-select * from t where exists
-(select * from subq where subq.i=3 and t.i=3);
-id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
-1	PRIMARY	t	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where
-2	DEPENDENT SUBQUERY	subq	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where with pushed condition: (`test`.`subq`.`i` = 3)
-Warnings:
-Note	1276	Field or reference 'test.t.i' of SELECT #2 was resolved in SELECT #1
-Note	1003	/* select#1 */ select `test`.`t`.`pk` AS `pk`,`test`.`t`.`i` AS `i` from `test`.`t` where exists(/* select#2 */ select 1 from `test`.`subq` where ((`test`.`subq`.`i` = 3) and (`test`.`t`.`i` = 3)))
-select * from t where exists
-(select * from t as subq where subq.i=3 and t.i=3);
-pk	i
-1	3
-select * from t where exists
-(select * from subq where subq.i=3 and t.i=3);
-pk	i
-1	3
-drop table t,subq;
 create table tx (
 a int not null,
 b int not null,
@@ -2303,6 +2271,40 @@ Note	9999	Table 't2' is not pushable: GR
 Note	9999	Table 'tx' is not pushable: GROUP BY cannot be done using index on grouped columns.
 Note	1003	/* select#1 */ select `test`.`t2`.`c` AS `c`,count(distinct `test`.`t2`.`a`) AS `count(distinct t2.a)` from `test`.`tx` join `test`.`tx` `t2` where ((`test`.`tx`.`b` = `test`.`t2`.`d`) and (`test`.`tx`.`a` = `test`.`t2`.`c`) and (`test`.`t2`.`a` = 4)) group by `test`.`t2`.`c`
 drop table tx;
+set engine_condition_pushdown = on;
+Warnings:
+Warning	1287	The syntax '@@engine_condition_pushdown' is deprecated and will be removed in MySQL 7.0. Please use '@@optimizer_switch' instead
+create table t (pk int, i int) engine = ndb;
+insert into t values (1,3), (3,6), (6,9), (9,1);
+create table subq (pk int, i int) engine = ndb;
+insert into subq values (1,3), (3,6), (6,9), (9,1);
+explain extended 
+select * from t where exists
+(select * from t as subq where subq.i=3 and t.i=3);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	t	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where
+2	DEPENDENT SUBQUERY	subq	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where with pushed condition: (`test`.`subq`.`i` = 3)
+Warnings:
+Note	1276	Field or reference 'test.t.i' of SELECT #2 was resolved in SELECT #1
+Note	1003	select `test`.`t`.`pk` AS `pk`,`test`.`t`.`i` AS `i` from `test`.`t` where exists(select 1 from `test`.`t` `subq` where ((`test`.`subq`.`i` = 3) and (`test`.`t`.`i` = 3)))
+explain extended 
+select * from t where exists
+(select * from subq where subq.i=3 and t.i=3);
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
+1	PRIMARY	t	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where
+2	DEPENDENT SUBQUERY	subq	ALL	NULL	NULL	NULL	NULL	4	100.00	Using where with pushed condition: (`test`.`subq`.`i` = 3)
+Warnings:
+Note	1276	Field or reference 'test.t.i' of SELECT #2 was resolved in SELECT #1
+Note	1003	select `test`.`t`.`pk` AS `pk`,`test`.`t`.`i` AS `i` from `test`.`t` where exists(select 1 from `test`.`subq` where ((`test`.`subq`.`i` = 3) and (`test`.`t`.`i` = 3)))
+select * from t where exists
+(select * from t as subq where subq.i=3 and t.i=3);
+pk	i
+1	3
+select * from t where exists
+(select * from subq where subq.i=3 and t.i=3);
+pk	i
+1	3
+drop table t,subq;
 create table t (pk1 int, pk2 int, primary key(pk1,pk2)) engine = ndb;
 insert into t values (1,0), (2,0), (3,0), (4,0);
 set @@optimizer_switch='engine_condition_pushdown=on';

=== renamed file 'mysql-test/suite/ndb/r/ndb_join_pushdown.result' => 'mysql-test/suite/ndb/r/ndb_join_pushdown_default.result'
=== modified file 'mysql-test/suite/ndb/t/ndb_condition_pushdown.test'
--- a/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2011-11-10 20:35:28 +0000
+++ b/mysql-test/suite/ndb/t/ndb_condition_pushdown.test	2011-11-16 08:17:17 +0000
@@ -2290,31 +2290,6 @@ explain select * from t where x not like
 select * from t where x not like 'ye%' order by x;
 drop table t;
 
-# Bug#58134: Incorrectly condition pushdown inside subquery to NDB engine
-set @@optimizer_switch = 'engine_condition_pushdown=on';
-
-create table t (pk int, i int) engine = ndb;
-insert into t values (1,3), (3,6), (6,9), (9,1);
-create table subq (pk int, i int) engine = ndb;
-insert into subq values (1,3), (3,6), (6,9), (9,1);
-
-# 'Explain extended' to verify that only 'subq.i=3' is pushed
-explain extended 
-select * from t where exists
-  (select * from t as subq where subq.i=3 and t.i=3);
-explain extended 
-  select * from t where exists
-    (select * from subq where subq.i=3 and t.i=3);
-
---sorted_result
-select * from t where exists
-  (select * from t as subq where subq.i=3 and t.i=3);
---sorted_result
-select * from t where exists
-  (select * from subq where subq.i=3 and t.i=3);
-
-drop table t,subq;
-
 # Bug#58553 Queries with pushed conditions causes 'explain extended' to crash mysqld
 create table tx (
   a int not null,
@@ -2351,29 +2326,8 @@ group by t2.c;
 
 drop table tx;
 
-# Bug#58791 Incorrect result as Cluster may fail to reject an unpushable condition
-
-create table t (pk1 int, pk2 int, primary key(pk1,pk2)) engine = ndb;
-insert into t values (1,0), (2,0), (3,0), (4,0);
-
-set @@optimizer_switch='engine_condition_pushdown=on';
-
-# Multiple instances of same table (t as table<n>, ) confused 
-# ha_ndbcluster::cond_push() which accepted
-# '(table1.pk1 = 7 or table2.pk1 = 3)' as a pushable cond.
-# for 'table2'
-#
-
---sorted_result
-select table1.pk1, table2.pk1, table1.pk2, table2.pk2
- from t as table1, t as table2
- where table2.pk1 in (0,3) and
-   (table1.pk1 = 7 or table2.pk1 = 3);
-
-drop table t;
-
 # Bug#58134: Incorrectly condition pushdown inside subquery to NDB engine
-set @@optimizer_switch = 'engine_condition_pushdown=on';
+set engine_condition_pushdown = on;
 
 create table t (pk int, i int) engine = ndb;
 insert into t values (1,3), (3,6), (6,9), (9,1);
@@ -2395,13 +2349,31 @@ select * from t where exists
 select * from t where exists
   (select * from subq where subq.i=3 and t.i=3);
 
-# extra test of subquery
-explain extended
-select * from t
-where i = (select max(i) from t);
-
 drop table t,subq;
 
+
+
+# Bug#58791 Incorrect result as Cluster may fail to reject an unpushable condition
+
+create table t (pk1 int, pk2 int, primary key(pk1,pk2)) engine = ndb;
+insert into t values (1,0), (2,0), (3,0), (4,0);
+
+set @@optimizer_switch='engine_condition_pushdown=on';
+
+# Multiple instances of same table (t as table<n>, ) confused 
+# ha_ndbcluster::cond_push() which accepted
+# '(table1.pk1 = 7 or table2.pk1 = 3)' as a pushable cond.
+# for 'table2'
+#
+
+--sorted_result
+select table1.pk1, table2.pk1, table1.pk2, table2.pk2
+ from t as table1, t as table2
+ where table2.pk1 in (0,3) and
+   (table1.pk1 = 7 or table2.pk1 = 3);
+
+drop table t;
+
 # Bug#11765142 58073: CONCAT AND OR GIVE INCORRECT QUERY RESULTS
 create table mytable(i int, s varchar(255) ) engine = ndb;
 insert into mytable values(0,"Text Hej"),(1, "xText aaja");

=== renamed file 'mysql-test/suite/ndb/t/ndb_join_pushdown.test' => 'mysql-test/suite/ndb/t/ndb_join_pushdown.inc'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.test	2011-10-24 08:50:10 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2011-11-16 08:17:17 +0000
@@ -1,5 +1,3 @@
--- source include/have_ndb.inc
-
 ########################################
 # Define two connections as we want DDL to use its own connection
 # in order to keep DDL statistics counting out of the way

=== added file 'mysql-test/suite/ndb/t/ndb_join_pushdown_default.test'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown_default.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown_default.test	2011-11-15 13:57:10 +0000
@@ -0,0 +1,7 @@
+#
+# Test of pushed joins, aka SPJ, or AQL.
+# (Runs with default set of switches)
+#
+
+--source include/have_ndb.inc
+--source ndb_join_pushdown.inc 

=== added file 'mysql-test/suite/rpl/r/rpl_row_basic_allow_batching.result'
--- a/mysql-test/suite/rpl/r/rpl_row_basic_allow_batching.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_row_basic_allow_batching.result	2011-11-15 14:25:58 +0000
@@ -0,0 +1,648 @@
+include/master-slave.inc
+[connection master]
+show variables like 'slave_allow_batching';
+Variable_name	Value
+slave_allow_batching	OFF
+Show that slave_allow_batching cannot be changed while slave is running
+set global slave_allow_batching=ON;
+ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+show warnings;
+Level	Code	Message
+Error	1198	This operation cannot be performed with a running slave; run STOP SLAVE first
+show variables like 'slave_allow_batching';
+Variable_name	Value
+slave_allow_batching	OFF
+Now stop slave and change it
+stop slave;
+set global slave_allow_batching=ON;
+show variables like 'slave_allow_batching';
+Variable_name	Value
+slave_allow_batching	ON
+start slave;
+Now the normal test
+CREATE TABLE t1 (C1 CHAR(1), C2 CHAR(1), INDEX (C1)) ENGINE = 'INNODB'  ;
+SELECT * FROM t1;
+C1	C2
+SELECT * FROM t1;
+C1	C2
+INSERT INTO t1 VALUES ('A','B'), ('X','Y'), ('X','X');
+INSERT INTO t1 VALUES ('A','C'), ('X','Z'), ('A','A');
+SELECT * FROM t1 ORDER BY C1,C2;
+C1	C2
+A	A
+A	B
+A	C
+X	X
+X	Y
+X	Z
+SELECT * FROM t1 ORDER BY C1,C2;
+C1	C2
+A	A
+A	B
+A	C
+X	X
+X	Y
+X	Z
+DELETE FROM t1 WHERE C1 = C2;
+SELECT * FROM t1 ORDER BY C1,C2;
+C1	C2
+A	B
+A	C
+X	Y
+X	Z
+SELECT * FROM t1 ORDER BY C1,C2;
+C1	C2
+A	B
+A	C
+X	Y
+X	Z
+UPDATE t1 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C';
+SELECT * FROM t1 ORDER BY C1,C2;
+C1	C2
+A	B
+A	I
+X	Y
+X	Z
+SELECT * FROM t1 ORDER BY C1,C2;
+C1	C2
+A	B
+A	I
+X	Y
+X	Z
+UPDATE t1 SET c2 = 'Q' WHERE c1 = 'A' AND c2 = 'N';
+SELECT * FROM t1 ORDER BY c1,c2;
+C1	C2
+A	B
+A	I
+X	Y
+X	Z
+SELECT * FROM t1 ORDER BY c1,c2;
+C1	C2
+A	B
+A	I
+X	Y
+X	Z
+CREATE TABLE t2 (c1 INT, c12 char(1), c2 INT, PRIMARY KEY (c1)) ENGINE = 'INNODB'  ;
+INSERT INTO t2
+VALUES (1,'A',2),  (2,'A',4),  (3,'A',9),  (4,'A',15), (5,'A',25),
+(6,'A',35), (7,'A',50), (8,'A',64), (9,'A',81);
+SELECT * FROM t2 ORDER BY c1,c2;
+c1	c12	c2
+1	A	2
+2	A	4
+3	A	9
+4	A	15
+5	A	25
+6	A	35
+7	A	50
+8	A	64
+9	A	81
+SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
+c1	c12	c2
+2	A	4
+3	A	9
+5	A	25
+8	A	64
+9	A	81
+SELECT * FROM t2 ORDER BY c1,c2;
+c1	c12	c2
+1	A	2
+2	A	4
+3	A	9
+4	A	15
+5	A	25
+6	A	35
+7	A	50
+8	A	64
+9	A	81
+SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
+c1	c12	c2
+2	A	4
+3	A	9
+5	A	25
+8	A	64
+9	A	81
+UPDATE t2 SET c2 = c1*c1 WHERE c2 != c1*c1;
+SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
+c1	c12	c2
+1	A	1
+2	A	4
+3	A	9
+4	A	16
+5	A	25
+6	A	36
+7	A	49
+8	A	64
+9	A	81
+SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
+c1	c12	c2
+1	A	1
+2	A	4
+3	A	9
+4	A	16
+5	A	25
+6	A	36
+7	A	49
+8	A	64
+9	A	81
+UPDATE t2 SET c12 = 'Q' WHERE c1 = 1 AND c2 = 999;
+SELECT * FROM t2 ORDER BY c1,c2;
+c1	c12	c2
+1	A	1
+2	A	4
+3	A	9
+4	A	16
+5	A	25
+6	A	36
+7	A	49
+8	A	64
+9	A	81
+SELECT * FROM t2 ORDER BY c1,c2;
+c1	c12	c2
+1	A	1
+2	A	4
+3	A	9
+4	A	16
+5	A	25
+6	A	36
+7	A	49
+8	A	64
+9	A	81
+DELETE FROM t2 WHERE c1 % 4 = 0;
+SELECT * FROM t2 ORDER BY c1,c2;
+c1	c12	c2
+1	A	1
+2	A	4
+3	A	9
+5	A	25
+6	A	36
+7	A	49
+9	A	81
+SELECT * FROM t2 ORDER BY c1,c2;
+c1	c12	c2
+1	A	1
+2	A	4
+3	A	9
+5	A	25
+6	A	36
+7	A	49
+9	A	81
+UPDATE t2 SET c12='X';
+CREATE TABLE t3 (C1 CHAR(1), C2 CHAR(1), pk1 INT, C3 CHAR(1), pk2 INT, PRIMARY KEY (pk1,pk2)) ENGINE = 'INNODB'  ;
+INSERT INTO t3 VALUES ('A','B',1,'B',1), ('X','Y',2,'B',1), ('X','X',3,'B',1);
+INSERT INTO t3 VALUES ('A','C',1,'B',2), ('X','Z',2,'B',2), ('A','A',3,'B',2);
+SELECT * FROM t3 ORDER BY C1,C2;
+C1	C2	pk1	C3	pk2
+A	A	3	B	2
+A	B	1	B	1
+A	C	1	B	2
+X	X	3	B	1
+X	Y	2	B	1
+X	Z	2	B	2
+SELECT * FROM t3 ORDER BY C1,C2;
+C1	C2	pk1	C3	pk2
+A	A	3	B	2
+A	B	1	B	1
+A	C	1	B	2
+X	X	3	B	1
+X	Y	2	B	1
+X	Z	2	B	2
+DELETE FROM t3 WHERE C1 = C2;
+SELECT * FROM t3 ORDER BY C1,C2;
+C1	C2	pk1	C3	pk2
+A	B	1	B	1
+A	C	1	B	2
+X	Y	2	B	1
+X	Z	2	B	2
+SELECT * FROM t3 ORDER BY C1,C2;
+C1	C2	pk1	C3	pk2
+A	B	1	B	1
+A	C	1	B	2
+X	Y	2	B	1
+X	Z	2	B	2
+UPDATE t3 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C';
+SELECT * FROM t3 ORDER BY C1,C2;
+C1	C2	pk1	C3	pk2
+A	B	1	B	1
+A	I	1	B	2
+X	Y	2	B	1
+X	Z	2	B	2
+SELECT * FROM t3 ORDER BY C1,C2;
+C1	C2	pk1	C3	pk2
+A	B	1	B	1
+A	I	1	B	2
+X	Y	2	B	1
+X	Z	2	B	2
+CREATE TABLE t6 (C1 CHAR(1), C2 CHAR(1), C3 INT) ENGINE = 'INNODB' ;
+INSERT INTO t6 VALUES ('A','B',1), ('X','Y',2), ('X','X',3);
+INSERT INTO t6 VALUES ('A','C',4), ('X','Z',5), ('A','A',6);
+SELECT * FROM t6 ORDER BY C3;
+C1	C2	C3
+A	B	1
+X	Y	2
+X	X	3
+A	C	4
+X	Z	5
+A	A	6
+SELECT * FROM t6 ORDER BY C3;
+C1	C2	C3
+A	B	1
+X	Y	2
+X	X	3
+A	C	4
+X	Z	5
+A	A	6
+DELETE FROM t6 WHERE C1 = C2;
+SELECT * FROM t6 ORDER BY C3;
+C1	C2	C3
+A	B	1
+X	Y	2
+A	C	4
+X	Z	5
+SELECT * FROM t6 ORDER BY C3;
+C1	C2	C3
+A	B	1
+X	Y	2
+A	C	4
+X	Z	5
+UPDATE t6 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C';
+SELECT * FROM t6 ORDER BY C3;
+C1	C2	C3
+A	B	1
+X	Y	2
+A	I	4
+X	Z	5
+SELECT * FROM t6 ORDER BY C3;
+C1	C2	C3
+A	B	1
+X	Y	2
+A	I	4
+X	Z	5
+CREATE TABLE t5 (C1 CHAR(1), C2 CHAR(1), C3 INT PRIMARY KEY) ENGINE = 'INNODB'  ;
+INSERT INTO t5 VALUES ('A','B',1), ('X','Y',2), ('X','X',3);
+INSERT INTO t5 VALUES ('A','C',4), ('X','Z',5), ('A','A',6);
+UPDATE t5,t2,t3 SET t5.C2='Q', t2.c12='R', t3.C3 ='S' WHERE t5.C1 = t2.c12 AND t5.C1 = t3.C1;
+SELECT * FROM t5,t2,t3 WHERE t5.C2='Q' AND t2.c12='R' AND t3.C3 ='S' ORDER BY t5.C3,t2.c1,t3.pk1,t3.pk2;
+C1	C2	C3	c1	c12	c2	C1	C2	pk1	C3	pk2
+X	Q	2	1	R	1	X	Y	2	S	1
+X	Q	2	1	R	1	X	Z	2	S	2
+X	Q	2	2	R	4	X	Y	2	S	1
+X	Q	2	2	R	4	X	Z	2	S	2
+X	Q	2	3	R	9	X	Y	2	S	1
+X	Q	2	3	R	9	X	Z	2	S	2
+X	Q	2	5	R	25	X	Y	2	S	1
+X	Q	2	5	R	25	X	Z	2	S	2
+X	Q	2	6	R	36	X	Y	2	S	1
+X	Q	2	6	R	36	X	Z	2	S	2
+X	Q	2	7	R	49	X	Y	2	S	1
+X	Q	2	7	R	49	X	Z	2	S	2
+X	Q	2	9	R	81	X	Y	2	S	1
+X	Q	2	9	R	81	X	Z	2	S	2
+X	Q	3	1	R	1	X	Y	2	S	1
+X	Q	3	1	R	1	X	Z	2	S	2
+X	Q	3	2	R	4	X	Y	2	S	1
+X	Q	3	2	R	4	X	Z	2	S	2
+X	Q	3	3	R	9	X	Y	2	S	1
+X	Q	3	3	R	9	X	Z	2	S	2
+X	Q	3	5	R	25	X	Y	2	S	1
+X	Q	3	5	R	25	X	Z	2	S	2
+X	Q	3	6	R	36	X	Y	2	S	1
+X	Q	3	6	R	36	X	Z	2	S	2
+X	Q	3	7	R	49	X	Y	2	S	1
+X	Q	3	7	R	49	X	Z	2	S	2
+X	Q	3	9	R	81	X	Y	2	S	1
+X	Q	3	9	R	81	X	Z	2	S	2
+X	Q	5	1	R	1	X	Y	2	S	1
+X	Q	5	1	R	1	X	Z	2	S	2
+X	Q	5	2	R	4	X	Y	2	S	1
+X	Q	5	2	R	4	X	Z	2	S	2
+X	Q	5	3	R	9	X	Y	2	S	1
+X	Q	5	3	R	9	X	Z	2	S	2
+X	Q	5	5	R	25	X	Y	2	S	1
+X	Q	5	5	R	25	X	Z	2	S	2
+X	Q	5	6	R	36	X	Y	2	S	1
+X	Q	5	6	R	36	X	Z	2	S	2
+X	Q	5	7	R	49	X	Y	2	S	1
+X	Q	5	7	R	49	X	Z	2	S	2
+X	Q	5	9	R	81	X	Y	2	S	1
+X	Q	5	9	R	81	X	Z	2	S	2
+SELECT * FROM t5,t2,t3 WHERE t5.C2='Q' AND t2.c12='R' AND t3.C3 ='S' ORDER BY t5.C3,t2.c1,t3.pk1,t3.pk2;
+C1	C2	C3	c1	c12	c2	C1	C2	pk1	C3	pk2
+X	Q	2	1	R	1	X	Y	2	S	1
+X	Q	2	1	R	1	X	Z	2	S	2
+X	Q	2	2	R	4	X	Y	2	S	1
+X	Q	2	2	R	4	X	Z	2	S	2
+X	Q	2	3	R	9	X	Y	2	S	1
+X	Q	2	3	R	9	X	Z	2	S	2
+X	Q	2	5	R	25	X	Y	2	S	1
+X	Q	2	5	R	25	X	Z	2	S	2
+X	Q	2	6	R	36	X	Y	2	S	1
+X	Q	2	6	R	36	X	Z	2	S	2
+X	Q	2	7	R	49	X	Y	2	S	1
+X	Q	2	7	R	49	X	Z	2	S	2
+X	Q	2	9	R	81	X	Y	2	S	1
+X	Q	2	9	R	81	X	Z	2	S	2
+X	Q	3	1	R	1	X	Y	2	S	1
+X	Q	3	1	R	1	X	Z	2	S	2
+X	Q	3	2	R	4	X	Y	2	S	1
+X	Q	3	2	R	4	X	Z	2	S	2
+X	Q	3	3	R	9	X	Y	2	S	1
+X	Q	3	3	R	9	X	Z	2	S	2
+X	Q	3	5	R	25	X	Y	2	S	1
+X	Q	3	5	R	25	X	Z	2	S	2
+X	Q	3	6	R	36	X	Y	2	S	1
+X	Q	3	6	R	36	X	Z	2	S	2
+X	Q	3	7	R	49	X	Y	2	S	1
+X	Q	3	7	R	49	X	Z	2	S	2
+X	Q	3	9	R	81	X	Y	2	S	1
+X	Q	3	9	R	81	X	Z	2	S	2
+X	Q	5	1	R	1	X	Y	2	S	1
+X	Q	5	1	R	1	X	Z	2	S	2
+X	Q	5	2	R	4	X	Y	2	S	1
+X	Q	5	2	R	4	X	Z	2	S	2
+X	Q	5	3	R	9	X	Y	2	S	1
+X	Q	5	3	R	9	X	Z	2	S	2
+X	Q	5	5	R	25	X	Y	2	S	1
+X	Q	5	5	R	25	X	Z	2	S	2
+X	Q	5	6	R	36	X	Y	2	S	1
+X	Q	5	6	R	36	X	Z	2	S	2
+X	Q	5	7	R	49	X	Y	2	S	1
+X	Q	5	7	R	49	X	Z	2	S	2
+X	Q	5	9	R	81	X	Y	2	S	1
+X	Q	5	9	R	81	X	Z	2	S	2
+CREATE TABLE t4 (C1 CHAR(1) PRIMARY KEY, B1 BIT(1), B2 BIT(1) NOT NULL DEFAULT 0, C2 CHAR(1) NOT NULL DEFAULT 'A') ENGINE = 'INNODB'  ;
+INSERT INTO t4 SET C1 = 1;
+SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1;
+C1	HEX(B1)	HEX(B2)
+1	NULL	0
+SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1;
+C1	HEX(B1)	HEX(B2)
+1	NULL	0
+CREATE TABLE t7 (C1 INT PRIMARY KEY, C2 INT) ENGINE = 'INNODB'  ;
+--- on slave: original values ---
+INSERT INTO t7 VALUES (1,3), (2,6), (3,9);
+SELECT * FROM t7 ORDER BY C1;
+C1	C2
+1	3
+2	6
+3	9
+set @@global.slave_exec_mode= 'IDEMPOTENT';
+--- on master: new values inserted ---
+INSERT INTO t7 VALUES (1,2), (2,4), (3,6);
+SELECT * FROM t7 ORDER BY C1;
+C1	C2
+1	2
+2	4
+3	6
+set @@global.slave_exec_mode= default;
+--- on slave: old values should be overwritten by replicated values ---
+SELECT * FROM t7 ORDER BY C1;
+C1	C2
+1	2
+2	4
+3	6
+--- on master ---
+CREATE TABLE t8 (a INT PRIMARY KEY, b INT UNIQUE, c INT UNIQUE) ENGINE = 'INNODB'  ;
+INSERT INTO t8 VALUES (99,99,99);
+INSERT INTO t8 VALUES (99,22,33);
+ERROR 23000: Duplicate entry '99' for key 'PRIMARY'
+INSERT INTO t8 VALUES (11,99,33);
+ERROR 23000: Duplicate entry '99' for key 'b'
+INSERT INTO t8 VALUES (11,22,99);
+ERROR 23000: Duplicate entry '99' for key 'c'
+SELECT * FROM t8 ORDER BY a;
+a	b	c
+99	99	99
+--- on slave ---
+SELECT * FROM t8 ORDER BY a;
+a	b	c
+99	99	99
+INSERT INTO t8 VALUES (1,2,3), (2,4,6), (3,6,9);
+SELECT * FROM t8 ORDER BY a;
+a	b	c
+1	2	3
+2	4	6
+3	6	9
+99	99	99
+set @@global.slave_exec_mode= 'IDEMPOTENT';
+--- on master ---
+INSERT INTO t8 VALUES (2,4,8);
+set @@global.slave_exec_mode= default;
+--- on slave ---
+SELECT * FROM t8 ORDER BY a;
+a	b	c
+1	2	3
+2	4	8
+3	6	9
+99	99	99
+**** Test for BUG#31552 ****
+**** On Master ****
+DELETE FROM t1;
+include/rpl_reset.inc
+**** On Master ****
+INSERT INTO t1 VALUES ('K','K'), ('L','L'), ('M','M');
+**** On Master ****
+set @@global.slave_exec_mode= 'IDEMPOTENT';
+DELETE FROM t1 WHERE C1 = 'L';
+DELETE FROM t1;
+SELECT COUNT(*) FROM t1 ORDER BY c1,c2;
+COUNT(*)	0
+set @@global.slave_exec_mode= default;
+include/check_slave_is_running.inc
+SELECT COUNT(*) FROM t1 ORDER BY c1,c2;
+COUNT(*)	0
+**** Test for BUG#37076 ****
+**** On Master ****
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a TIMESTAMP, b DATETIME, c DATE);
+INSERT INTO t1 VALUES(
+'2005-11-14 01:01:01', '2005-11-14 01:01:02', '2005-11-14');
+**** On Slave ****
+SELECT * FROM t1;
+a	b	c
+2005-11-14 01:01:01	2005-11-14 01:01:02	2005-11-14
+DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7,t8;
+CREATE TABLE t1 (i INT NOT NULL,
+c CHAR(16) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+CREATE TABLE t2 (i INT NOT NULL,
+c CHAR(16) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+ALTER TABLE t2 MODIFY c CHAR(128) CHARACTER SET utf8 NOT NULL;
+CREATE TABLE t3 (i INT NOT NULL,
+c CHAR(128) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+ALTER TABLE t3 MODIFY c CHAR(16) CHARACTER SET utf8 NOT NULL;
+CREATE TABLE t4 (i INT NOT NULL,
+c CHAR(128) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+CREATE TABLE t5 (i INT NOT NULL,
+c CHAR(255) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+ALTER TABLE t5 MODIFY c CHAR(16) CHARACTER SET utf8 NOT NULL;
+CREATE TABLE t6 (i INT NOT NULL,
+c CHAR(255) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+ALTER TABLE t6 MODIFY c CHAR(128) CHARACTER SET utf8 NOT NULL;
+CREATE TABLE t7 (i INT NOT NULL,
+c CHAR(255) CHARACTER SET utf8 NOT NULL,
+j INT NOT NULL) ENGINE = 'INNODB'  ;
+SET @saved_slave_type_conversions = @@slave_type_conversions;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS = 'ALL_NON_LOSSY';
+[expecting slave to replicate correctly]
+INSERT INTO t1 VALUES (1, "", 1);
+INSERT INTO t1 VALUES (2, repeat(_utf8'a', 16), 2);
+include/diff_tables.inc [master:t1, slave:t1]
+[expecting slave to replicate correctly]
+INSERT INTO t2 VALUES (1, "", 1);
+INSERT INTO t2 VALUES (2, repeat(_utf8'a', 16), 2);
+include/diff_tables.inc [master:t2, slave:t2]
+SET GLOBAL SLAVE_TYPE_CONVERSIONS = @saved_slave_type_conversions;
+call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 1 size mismatch.* Error_code: 1535");
+call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.* Error_code: 1032");
+call mtr.add_suppression("Slave SQL.*Column 1 of table .test.t.. cannot be converted from type.*, Error_code: 1677");
+include/rpl_reset.inc
+[expecting slave to replicate correctly]
+INSERT INTO t4 VALUES (1, "", 1);
+INSERT INTO t4 VALUES (2, repeat(_utf8'a', 128), 2);
+include/diff_tables.inc [master:t4, slave:t4]
+[expecting slave to stop]
+INSERT INTO t5 VALUES (1, "", 1);
+INSERT INTO t5 VALUES (2, repeat(_utf8'a', 255), 2);
+include/wait_for_slave_sql_error.inc [errno=1677 ]
+Last_SQL_Error = 'Column 1 of table 'test.t5' cannot be converted from type 'char(255)' to type 'char(16)''
+include/rpl_reset.inc
+[expecting slave to stop]
+INSERT INTO t6 VALUES (1, "", 1);
+INSERT INTO t6 VALUES (2, repeat(_utf8'a', 255), 2);
+include/wait_for_slave_sql_error.inc [errno=1677 ]
+Last_SQL_Error = 'Column 1 of table 'test.t6' cannot be converted from type 'char(255)' to type 'char(128)''
+include/rpl_reset.inc
+[expecting slave to replicate correctly]
+INSERT INTO t7 VALUES (1, "", 1);
+INSERT INTO t7 VALUES (2, repeat(_utf8'a', 255), 2);
+include/diff_tables.inc [master:t7, slave:t7]
+drop table t1, t2, t3, t4, t5, t6, t7;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE='INNODB' ;
+INSERT INTO t1 VALUES (1), (2), (3);
+UPDATE t1 SET a = 10;
+ERROR 23000: Duplicate entry '10' for key 'PRIMARY'
+INSERT INTO t1 VALUES (4);
+include/diff_tables.inc [master:t1, slave:t1]
+drop table t1;
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+`pk` int(11) NOT NULL AUTO_INCREMENT,
+`int_nokey` int(11) NOT NULL,
+`int_key` int(11) NOT NULL,
+`date_key` date NOT NULL,
+`date_nokey` date NOT NULL,
+`time_key` time NOT NULL,
+`time_nokey` time NOT NULL,
+`datetime_key` datetime NOT NULL,
+`datetime_nokey` datetime NOT NULL,
+`varchar_key` varchar(1) NOT NULL,
+`varchar_nokey` varchar(1) NOT NULL,
+PRIMARY KEY (`pk`),
+KEY `int_key` (`int_key`),
+KEY `date_key` (`date_key`),
+KEY `time_key` (`time_key`),
+KEY `datetime_key` (`datetime_key`),
+KEY `varchar_key` (`varchar_key`)
+) ENGINE='INNODB' ;
+INSERT INTO t1 VALUES (1,8,5,'0000-00-00','0000-00-00','10:37:38','10:37:38','0000-00-00 00:00:00','0000-00-00 00:00:00','p','p'),(2,0,9,'0000-00-00','0000-00-00','00:00:00','00:00:00','2007-10-14 00:00:00','2007-10-14 00:00:00','d','d');
+CREATE TABLE t2 (
+`pk` int(11) NOT NULL AUTO_INCREMENT,
+`int_nokey` int(11) NOT NULL,
+`int_key` int(11) NOT NULL,
+`date_key` date NOT NULL,
+`date_nokey` date NOT NULL,
+`time_key` time NOT NULL,
+`time_nokey` time NOT NULL,
+`datetime_key` datetime NOT NULL,
+`datetime_nokey` datetime NOT NULL,
+`varchar_key` varchar(1) NOT NULL,
+`varchar_nokey` varchar(1) NOT NULL,
+PRIMARY KEY (`pk`),
+KEY `int_key` (`int_key`),
+KEY `date_key` (`date_key`),
+KEY `time_key` (`time_key`),
+KEY `datetime_key` (`datetime_key`),
+KEY `varchar_key` (`varchar_key`)
+) ENGINE='INNODB' ;
+INSERT INTO t2 VALUES (1,1,6,'2005-12-23','2005-12-23','02:24:28','02:24:28','0000-00-00 00:00:00','0000-00-00 00:00:00','g','g'),(2,0,3,'2009-09-14','2009-09-14','00:00:00','00:00:00','2000-01-30 16:39:40','2000-01-30 16:39:40','q','q'),(3,0,3,'0000-00-00','0000-00-00','00:00:00','00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','c','c'),(4,1,6,'2007-03-29','2007-03-29','15:49:00','15:49:00','0000-00-00 00:00:00','0000-00-00 00:00:00','m','m'),(5,4,0,'2002-12-04','2002-12-04','00:00:00','00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),(6,9,0,'2005-01-28','2005-01-28','00:00:00','00:00:00','2001-05-18 00:00:00','2001-05-18 00:00:00','w','w'),(7,6,0,'0000-00-00','0000-00-00','06:57:25','06:57:25','0000-00-00 00:00:00','0000-00-00 00:00:00','m','m'),(8,0,0,'0000-00-00','0000-00-00','00:00:00','00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','z','z'),(9,4,6,'2006-08-15','2006-08-15','00:00:00','00:00:00','2002-04-12 14:44:25','2002-04-12 14:44:25','j'!
 ,'j'),(10,0,5,'2006-12-20','2006-12-20','10:13:53','10:13:53','2008-07-22 00:00:00','2008-07-22 00:00:00','y','y'),(11,9,7,'0000-00-00','0000-00-00','00:00:00','00:00:00','2004-07-05 00:00:00','2004-07-05 00:00:00','{','{'),(12,4,3,'2007-01-26','2007-01-26','23:00:51','23:00:51','2001-05-16 00:00:00','2001-05-16 00:00:00','f','f'),(13,7,0,'2004-03-27','2004-03-27','00:00:00','00:00:00','2005-01-24 03:30:37','2005-01-24 03:30:37','',''),(14,6,0,'2006-07-26','2006-07-26','18:43:57','18:43:57','0000-00-00 00:00:00','0000-00-00 00:00:00','{','{'),(15,0,6,'2000-01-14','2000-01-14','00:00:00','00:00:00','2000-09-21 00:00:00','2000-09-21 00:00:00','o','o'),(16,9,8,'0000-00-00','0000-00-00','21:15:08','21:15:08','0000-00-00 00:00:00','0000-00-00 00:00:00','a','a'),(17,2,0,'2004-10-27','2004-10-27','00:00:00','00:00:00','2004-03-24 22:13:43','2004-03-24 22:13:43','',''),(18,7,4,'0000-00-00','0000-00-00','08:38:27','08:38:27','2002-03-18 19:51:44','2002-03-18 19:51:44','t','t'),(19,5!
 ,3,'2008-03-07','2008-03-07','03:29:07','03:29:07','2007-12-01 18:44:4
4','2007-12-01 18:44:44','t','t'),(20,0,0,'2002-04-09','2002-04-09','16:06:03','16:06:03','2009-04-22 00:00:00','2009-04-22 00:00:00','n','n');
+DELETE FROM t2 WHERE `int_key` < 3 LIMIT 1;
+UPDATE t1 SET `int_key` = 3 ORDER BY `pk` LIMIT 4;
+DELETE FROM t2 WHERE `int_key` < 3 LIMIT 1;
+DELETE FROM t2 WHERE `pk` < 6 LIMIT 1;
+UPDATE t1 SET `int_key` = 6 ORDER BY `pk` LIMIT 3;
+DELETE FROM t2 WHERE `pk` < 6 LIMIT 1;
+UPDATE t1 SET `pk` = 6 ORDER BY `int_key` LIMIT 6;
+ERROR 23000: Duplicate entry '6' for key 'PRIMARY'
+DELETE FROM t2 WHERE `pk` < 7 LIMIT 1;
+UPDATE t1 SET `int_key` = 4 ORDER BY `pk` LIMIT 6;
+*** results: t2 must be consistent ****
+include/diff_tables.inc [master:t2, slave:t2]
+DROP TABLE t1, t2;
+EOF OF TESTS
+CREATE TABLE t1 (a int) ENGINE='INNODB' ;
+INSERT IGNORE INTO t1 VALUES (NULL);
+INSERT INTO t1 ( a ) VALUES ( 0 );
+INSERT INTO t1 ( a ) VALUES ( 9 );
+INSERT INTO t1 ( a ) VALUES ( 2 );
+INSERT INTO t1 ( a ) VALUES ( 9 );
+INSERT INTO t1 ( a ) VALUES ( 5 );
+UPDATE t1 SET a = 5 WHERE a = 9;
+DELETE FROM t1 WHERE a < 6;
+UPDATE t1 SET a = 9 WHERE a < 3;
+INSERT INTO t1 ( a ) VALUES ( 3 );
+UPDATE t1 SET a = 0 WHERE a < 4;
+UPDATE t1 SET a = 8 WHERE a < 5;
+include/diff_tables.inc [master:t1, slave:t1]
+drop table t1;
+CREATE TABLE t1 (a bit) ENGINE='INNODB' ;
+INSERT IGNORE INTO t1 VALUES (NULL);
+INSERT INTO t1 ( a ) VALUES ( 0 );
+UPDATE t1 SET a = 0 WHERE a = 1 LIMIT 3;
+INSERT INTO t1 ( a ) VALUES ( 5 );
+DELETE FROM t1 WHERE a < 2 LIMIT 4;
+DELETE FROM t1 WHERE a < 9 LIMIT 4;
+INSERT INTO t1 ( a ) VALUES ( 9 );
+UPDATE t1 SET a = 8 WHERE a = 0 LIMIT 6;
+INSERT INTO t1 ( a ) VALUES ( 8 );
+UPDATE t1 SET a = 0 WHERE a < 6 LIMIT 0;
+INSERT INTO t1 ( a ) VALUES ( 4 );
+INSERT INTO t1 ( a ) VALUES ( 3 );
+UPDATE t1 SET a = 0 WHERE a = 7 LIMIT 6;
+DELETE FROM t1 WHERE a = 4 LIMIT 7;
+UPDATE t1 SET a = 9 WHERE a < 2 LIMIT 9;
+UPDATE t1 SET a = 0 WHERE a < 9 LIMIT 2;
+DELETE FROM t1 WHERE a < 0 LIMIT 5;
+INSERT INTO t1 ( a ) VALUES ( 5 );
+UPDATE t1 SET a = 4 WHERE a < 6 LIMIT 4;
+INSERT INTO t1 ( a ) VALUES ( 5 );
+UPDATE t1 SET a = 9 WHERE a < 5 LIMIT 8;
+DELETE FROM t1 WHERE a < 8 LIMIT 8;
+INSERT INTO t1 ( a ) VALUES ( 6 );
+DELETE FROM t1 WHERE a < 6 LIMIT 7;
+UPDATE t1 SET a = 7 WHERE a = 3 LIMIT 7;
+UPDATE t1 SET a = 8 WHERE a = 0 LIMIT 6;
+INSERT INTO t1 ( a ) VALUES ( 7 );
+DELETE FROM t1 WHERE a < 9 LIMIT 4;
+INSERT INTO t1 ( a ) VALUES ( 7 );
+INSERT INTO t1 ( a ) VALUES ( 6 );
+UPDATE t1 SET a = 8 WHERE a = 3 LIMIT 4;
+DELETE FROM t1 WHERE a = 2 LIMIT 9;
+DELETE FROM t1 WHERE a = 1 LIMIT 4;
+UPDATE t1 SET a = 4 WHERE a = 2 LIMIT 7;
+INSERT INTO t1 ( a ) VALUES ( 0 );
+DELETE FROM t1 WHERE a < 3 LIMIT 0;
+UPDATE t1 SET a = 8 WHERE a = 5 LIMIT 2;
+INSERT INTO t1 ( a ) VALUES ( 1 );
+UPDATE t1 SET a = 9 WHERE a < 5 LIMIT 3;
+include/diff_tables.inc [master:t1, slave:t1]
+drop table t1;
+stop slave;
+set global slave_allow_batching=OFF;
+start slave;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl/t/rpl_row_basic_allow_batching.test'
--- a/mysql-test/suite/rpl/t/rpl_row_basic_allow_batching.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_row_basic_allow_batching.test	2011-11-15 14:25:58 +0000
@@ -0,0 +1,32 @@
+-- source include/have_binlog_format_row.inc
+-- source include/master-slave.inc
+
+--connection slave
+show variables like 'slave_allow_batching';
+
+--echo Show that slave_allow_batching cannot be changed while slave is running
+--error ER_SLAVE_MUST_STOP
+set global slave_allow_batching=ON;
+show warnings;
+show variables like 'slave_allow_batching';
+
+--echo Now stop slave and change it
+stop slave;
+set global slave_allow_batching=ON;
+show variables like 'slave_allow_batching';
+start slave;
+
+--echo Now the normal test
+--connection master
+
+let $type= 'INNODB' ;
+let $extra_index= ;
+-- source extra/rpl_tests/rpl_row_basic.test
+
+--connection slave
+stop slave;
+set global slave_allow_batching=OFF;
+start slave;
+
+--source include/rpl_end.inc
+

=== modified file 'mysql-test/suite/sys_vars/t/slave_allow_batching_basic.test'
--- a/mysql-test/suite/sys_vars/t/slave_allow_batching_basic.test	2011-02-21 13:17:25 +0000
+++ b/mysql-test/suite/sys_vars/t/slave_allow_batching_basic.test	2011-11-15 18:46:04 +0000
@@ -22,7 +22,7 @@
 #  server-system-variables.html                                                #
 #                                                                              #
 ################################################################################
-
+--source include/not_embedded.inc
 --source include/load_sysvars.inc
 
 ######################################################################## 

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-11-15 13:56:53 +0000
+++ b/sql/ha_ndbcluster.cc	2011-11-16 09:29:49 +0000
@@ -13033,6 +13033,7 @@ ulonglong ha_ndbcluster::table_flags(voi
 #ifndef NDB_WITHOUT_ONLINE_ALTER
     HA_ONLINE_ALTER |
 #endif
+    HA_READ_BEFORE_WRITE_REMOVAL |
     0;
 
   /*

=== modified file 'sql/handler.h'
--- a/sql/handler.h	2011-11-10 13:08:24 +0000
+++ b/sql/handler.h	2011-11-16 09:29:49 +0000
@@ -228,6 +228,12 @@ typedef Bitmap<HA_MAX_ALTER_FLAGS> HA_AL
  */
 #define HA_BINLOG_FLAGS (HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE)
 
+#ifndef MCP_WL5906
+/*
+  The handler supports read before write removal optimization
+*/
+#define HA_READ_BEFORE_WRITE_REMOVAL  (LL(1) << 38)
+#endif
 
 /* bits in index_flags(index_number) for what you can do with index */
 #define HA_READ_NEXT            1       /* TODO really use this flag */

=== modified file 'sql/log_event.cc'
--- a/sql/log_event.cc	2011-10-05 09:49:18 +0000
+++ b/sql/log_event.cc	2011-11-16 09:29:49 +0000
@@ -9032,6 +9032,10 @@ int Rows_log_event::do_apply_event(Relay
     else
         thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
 #ifndef MCP_WL3733
+    /*
+      Note that unlike the other thd options set here, this one
+      comes from a global, and not from the incoming event.
+    */
     if (slave_allow_batching)
       thd->variables.option_bits|= OPTION_ALLOW_BATCH;
     else
@@ -11106,6 +11110,25 @@ int Rows_log_event::find_row(const Relay
 
   if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION))
   {
+
+#ifndef MCP_SLAVE_RBWR_OPTIMIZATION
+    if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL))
+    {
+      /*
+        Read removal is possible since the engine supports write without
+        previous read using full primary key
+      */
+      DBUG_PRINT("info", ("using read before write removal"));
+
+      /*
+        Tell the handler to ignore if key exists or not, since it's
+        not yet known if the key does exist(when using rbwr)
+      */
+      table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
+      DBUG_RETURN(0);
+    }
+#endif
+
     /*
       Use a more efficient method to fetch the record given by
       table->record[0] if the engine allows it.  We first compute a
@@ -11125,23 +11148,6 @@ int Rows_log_event::find_row(const Relay
 
     */
 
-#ifndef MCP_WL3733
-    /*
-      Ndb does not need read before delete/update (and no updates are sent)
-      if primary key specified
-
-      (Actually uniquekey will also do, but pk will be in each
-      row if table has pk)
-
-      Also set ignore no key, as we don't really know if row exists...
-    */
-    if (table->file->ht->db_type == DB_TYPE_NDBCLUSTER)
-    {
-      table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
-      DBUG_RETURN(0);
-    }
-#endif
-
     DBUG_PRINT("info",("locating record using primary key (position)"));
     int error;
     if (table->file->inited && (error= table->file->ha_index_end()))

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2011-10-05 09:49:18 +0000
+++ b/sql/mysqld.cc	2011-11-16 09:29:49 +0000
@@ -415,10 +415,6 @@ handlerton *partition_hton;
 uint opt_server_id_bits= 0;
 ulong opt_server_id_mask= 0;
 #endif
-#ifndef MCP_BUG46955
-extern int(*ndb_wait_setup_func)(ulong);
-extern ulong opt_ndb_wait_setup;
-#endif
 my_bool read_only= 0, opt_readonly= 0;
 my_bool use_temp_pool, relay_log_purge;
 my_bool relay_log_recovery;

=== modified file 'sql/mysqld.h'
--- a/sql/mysqld.h	2011-10-04 12:53:30 +0000
+++ b/sql/mysqld.h	2011-11-16 09:29:49 +0000
@@ -223,6 +223,14 @@ extern const char *opt_date_time_formats
 extern handlerton *partition_hton;
 extern handlerton *myisam_hton;
 extern handlerton *heap_hton;
+#ifndef MCP_BUG53205
+extern uint opt_server_id_bits;
+extern ulong opt_server_id_mask;
+#endif
+#ifndef MCP_BUG46955
+extern int(*ndb_wait_setup_func)(ulong);
+extern ulong opt_ndb_wait_setup;
+#endif
 extern const char *load_default_groups[];
 extern struct my_option my_long_options[];
 extern int mysqld_server_started;

=== modified file 'sql/rpl_slave.cc'
--- a/sql/rpl_slave.cc	2011-09-27 12:11:16 +0000
+++ b/sql/rpl_slave.cc	2011-11-16 09:29:49 +0000
@@ -4579,10 +4579,6 @@ void slave_stop_workers(Relay_log_info *
   free_root(&rli->mts_coor_mem_root, MYF(0));
 }
 
-#ifndef MCP_BUG46955
-extern int(*ndb_wait_setup_func)(ulong);
-extern ulong opt_ndb_wait_setup;
-#endif
 
 /**
   Slave SQL thread entry point.

=== modified file 'sql/rpl_slave.h'
--- a/sql/rpl_slave.h	2011-09-30 15:38:44 +0000
+++ b/sql/rpl_slave.h	2011-11-16 09:29:49 +0000
@@ -239,9 +239,6 @@ extern char *report_host, *report_passwo
 extern my_bool master_ssl;
 extern char *master_ssl_ca, *master_ssl_capath, *master_ssl_cert;
 extern char *master_ssl_cipher, *master_ssl_key;
-#ifndef MCP_BUG52305
-extern ulong opt_server_id_mask;
-#endif
        
 extern I_List<THD> threads;
 

=== modified file 'sql/sys_vars.cc'
--- a/sql/sys_vars.cc	2011-10-04 12:53:30 +0000
+++ b/sql/sys_vars.cc	2011-11-16 09:29:49 +0000
@@ -2180,7 +2180,6 @@ static Sys_var_charptr Sys_server_uuid(
        NO_CMD_LINE, IN_FS_CHARSET, DEFAULT(server_uuid));
 
 #ifndef MCP_BUG53205
-extern uint opt_server_id_bits;
 static Sys_var_uint Sys_server_id_bits(
        "server_id_bits",
        "Set number of significant bits in server-id",
@@ -2821,11 +2820,37 @@ static Sys_var_ulong Sys_profiling_histo
 #endif
 
 #ifndef MCP_WL3733
+#ifndef EMBEDDED_LIBRARY
 my_bool slave_allow_batching;
+
+/*
+  Take Active MI lock while checking/updating slave_allow_batching
+  to give atomicity w.r.t. slave state changes
+*/
+static PolyLock_mutex PLock_active_mi(&LOCK_active_mi);
+
+static bool slave_allow_batching_check(sys_var *self, THD *thd, set_var *var)
+{
+  /* Only allow a change if the slave SQL thread is currently stopped */
+  bool slave_sql_running = active_mi->rli->slave_running;
+
+  if (slave_sql_running)
+  {
+    my_error(ER_SLAVE_MUST_STOP, MYF(0));
+    return true;
+  }
+
+  return false;
+}
+
 static Sys_var_mybool Sys_slave_allow_batching(
        "slave_allow_batching", "Allow slave to batch requests",
        GLOBAL_VAR(slave_allow_batching),
-       CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+       CMD_LINE(OPT_ARG), DEFAULT(FALSE),
+       &PLock_active_mi,
+       NOT_IN_BINLOG,
+       ON_CHECK(slave_allow_batching_check));
+#endif
 #endif
 
 static Sys_var_harows Sys_select_limit(

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2011-11-16 08:17:17 +0000
@@ -194,8 +194,9 @@
 #define NDBMT_BLOCK_MASK ((1 << NDBMT_BLOCK_BITS) - 1)
 #define NDBMT_BLOCK_INSTANCE_BITS 7
 
-#define MAX_NDBMT_LQH_WORKERS 4
-#define MAX_NDBMT_LQH_THREADS 4
+#define NDB_MAX_LOG_PARTS     4
+#define MAX_NDBMT_LQH_WORKERS NDB_MAX_LOG_PARTS
+#define MAX_NDBMT_LQH_THREADS NDB_MAX_LOG_PARTS
 #define MAX_NDBMT_TC_THREADS  2
 
 #define NDB_FILE_BUFFER_SIZE (256*1024)

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-10-07 18:15:59 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-11-16 05:47:02 +0000
@@ -68,6 +68,7 @@
 
 #define CFG_DB_FILESYSTEM_PATH        125
 #define CFG_DB_NO_REDOLOG_FILES       126
+#define CFG_DB_NO_REDOLOG_PARTS       632
 #define CFG_DB_REDOLOG_FILE_SIZE      140
 
 #define CFG_DB_LCP_DISC_PAGES_TUP     127
@@ -198,6 +199,7 @@
 #define CFG_DB_MT_THREAD_CONFIG          628
 
 #define CFG_DB_CRASH_ON_CORRUPTED_TUPLE  629
+/* 632 used for CFG_DB_NO_REDOLOG_PARTS */
 
 #define CFG_NODE_ARBIT_RANK           200
 #define CFG_NODE_ARBIT_DELAY          201

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2011-11-16 08:17:17 +0000
@@ -693,4 +693,25 @@ ndbd_get_config_supported(Uint32 x)
   return x >= NDBD_GET_CONFIG_SUPPORT_71;
 }
 
+#define NDBD_CONFIGURABLE_LOG_PARTS_70 NDB_MAKE_VERSION(7,0,29)
+#define NDBD_CONFIGURABLE_LOG_PARTS_71 NDB_MAKE_VERSION(7,1,18)
+#define NDBD_CONFIGURABLE_LOG_PARTS_72 NDB_MAKE_VERSION(7,2,3)
+
+static
+inline
+int
+ndb_configurable_log_parts(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor < 2)
+  {
+    if (minor == 0)
+      return x >= NDBD_CONFIGURABLE_LOG_PARTS_70;
+    else if (minor == 1)
+      return x >= NDBD_CONFIGURABLE_LOG_PARTS_71;
+  }
+  return x >= NDBD_CONFIGURABLE_LOG_PARTS_72;
+}
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp	2011-11-16 05:47:02 +0000
@@ -625,8 +625,8 @@ struct ScanRec {
 /* TABREC                                                                            */
 /* --------------------------------------------------------------------------------- */
 struct Tabrec {
-  Uint32 fragholder[MAX_FRAG_PER_NODE];
-  Uint32 fragptrholder[MAX_FRAG_PER_NODE];
+  Uint32 fragholder[MAX_FRAG_PER_LQH];
+  Uint32 fragptrholder[MAX_FRAG_PER_LQH];
   Uint32 tabUserPtr;
   BlockReference tabUserRef;
   Uint32 tabUserGsn;

=== modified file 'storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp	2011-11-16 05:47:02 +0000
@@ -481,7 +481,7 @@ void Dbacc::initialiseTableRec(Signal* s
   for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
     refresh_watch_dog();
     ptrAss(tabptr, tabrec);
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) {
       tabptr.p->fragholder[i] = RNIL;
       tabptr.p->fragptrholder[i] = RNIL;
     }//for
@@ -653,7 +653,7 @@ Dbacc::execDROP_FRAG_REQ(Signal* signal)
   tabPtr.p->tabUserPtr = req->senderData;
   tabPtr.p->tabUserGsn = GSN_DROP_FRAG_REQ;
 
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabPtr.p->fragholder); i++)
   {
     jam();
     if (tabPtr.p->fragholder[i] == req->fragId)
@@ -677,7 +677,7 @@ void Dbacc::releaseRootFragResources(Sig
   if (tabPtr.p->tabUserGsn == GSN_DROP_TAB_REQ)
   {
     jam();
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabPtr.p->fragholder); i++)
     {
       jam();
       if (tabPtr.p->fragholder[i] != RNIL)
@@ -857,7 +857,7 @@ void Dbacc::releaseFragRecord(Signal* si
 /* -------------------------------------------------------------------------- */
 bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid) 
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) {
     jam();
     if (tabptr.p->fragholder[i] == RNIL) {
       jam();
@@ -2493,7 +2493,7 @@ void Dbacc::execACC_LOCKREQ(Signal* sign
     ptrCheckGuard(tabptr, ctablesize, tabrec);
     // find fragment (TUX will know it)
     if (req->fragPtrI == RNIL) {
-      for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+      for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) {
         jam();
         if (tabptr.p->fragholder[i] == req->fragId){
 	  jam();
@@ -7590,7 +7590,7 @@ void Dbacc::takeOutReadyScanQueue(Signal
 
 bool Dbacc::getfragmentrec(Signal* signal, FragmentrecPtr& rootPtr, Uint32 fid) 
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) {
     jam();
     if (tabptr.p->fragholder[i] == fid) {
       jam();

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-11-16 05:47:02 +0000
@@ -520,7 +520,7 @@ public:
 // Each entry in this array contains a reference to 16 fragment records in a
 // row. Thus finding the correct record is very quick provided the fragment id.
 //-----------------------------------------------------------------------------
-    Uint32 startFid[MAX_NDB_NODES * MAX_FRAG_PER_NODE / NO_OF_FRAGS_PER_CHUNK];
+    Uint32 startFid[MAX_NDB_NODES * MAX_FRAG_PER_LQH / NO_OF_FRAGS_PER_CHUNK];
 
     Uint32 tabFile[2];
     Uint32 connectrec;                                    

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-11-15 13:13:38 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-11-16 09:29:49 +0000
@@ -7485,6 +7485,8 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ
   Uint32 err = 0;
   const Uint32 defaultFragments = 
     c_fragments_per_node * cnoOfNodeGroups * cnoReplicas;
+  const Uint32 maxFragments =
+    MAX_FRAG_PER_LQH * getLqhWorkers() * cnoOfNodeGroups * cnoReplicas;
 
   do {
     NodeGroupRecordPtr NGPtr;
@@ -7506,11 +7508,15 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ
       case DictTabInfo::AllNodesMediumTable:
         jam();
         noOfFragments = 2 * defaultFragments;
+        if (noOfFragments > maxFragments)
+          noOfFragments = maxFragments;
         set_default_node_groups(signal, noOfFragments);
         break;
       case DictTabInfo::AllNodesLargeTable:
         jam();
         noOfFragments = 4 * defaultFragments;
+        if (noOfFragments > maxFragments)
+          noOfFragments = maxFragments;
         set_default_node_groups(signal, noOfFragments);
         break;
       case DictTabInfo::SingleFragment:
@@ -7863,7 +7869,7 @@ void Dbdih::execDIADDTABREQ(Signal* sign
   }
 
   union {
-    Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
+    Uint16 fragments[2 + MAX_FRAG_PER_LQH*MAX_REPLICAS*MAX_NDB_NODES];
     Uint32 align;
   };
   (void)align; // kill warning

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-11-03 17:22:01 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-11-16 08:17:17 +0000
@@ -111,6 +111,9 @@ class Lgman;
 #define ZPOS_PREV_PAGE_NO 19
 #define ZPOS_IN_FREE_LIST 20
 
+/* Specify number of log parts used to enable use of more LQH threads */
+#define ZPOS_NO_LOG_PARTS 21
+
 /* ------------------------------------------------------------------------- */
 /*       CONSTANTS FOR THE VARIOUS REPLICA AND NODE TYPES.                   */
 /* ------------------------------------------------------------------------- */
@@ -1929,8 +1932,8 @@ public:
       ,TABLE_READ_ONLY = 9
     };
     
-    UintR fragrec[MAX_FRAG_PER_NODE];
-    Uint16 fragid[MAX_FRAG_PER_NODE];
+    UintR fragrec[MAX_FRAG_PER_LQH];
+    Uint16 fragid[MAX_FRAG_PER_LQH];
     /**
      * Status of the table 
      */
@@ -2834,7 +2837,6 @@ private:
   UintR cfirstfreeLcpLoc;
   UintR clcpFileSize;
 
-#define ZLOG_PART_FILE_SIZE 4
   LogPartRecord *logPartRecord;
   LogPartRecordPtr logPartPtr;
   UintR clogPartFileSize;

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.cpp	2011-11-16 05:47:02 +0000
@@ -20,6 +20,7 @@
 
 NdbLogPartInfo::NdbLogPartInfo(Uint32 instanceNo)
 {
+  LogParts = globalData.ndbLogParts;
   lqhWorkers = globalData.ndbMtLqhWorkers;
   partCount = 0;
   partMask.clear();

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhCommon.hpp	2011-11-16 05:47:02 +0000
@@ -22,7 +22,10 @@
 #include <Bitmask.hpp>
 
 /*
- * Log part id is from DBDIH.  Number of log parts is fixed as 4.
+ * Log part id is from DBDIH.  Number of log parts is configurable with a
+ * maximum setting and minimum of 4 parts. The below description assumes
+ * 4 parts.
+ *
  * A log part is identified by log part number (0-3)
  *
  *   log part number = log part id % 4
@@ -38,12 +41,12 @@
  * instance (main instance 0 or worker instances 1-4).
  */
 struct NdbLogPartInfo {
-  enum { LogParts = 4 };
+  Uint32 LogParts;
   NdbLogPartInfo(Uint32 instanceNo);
   Uint32 lqhWorkers;
   Uint32 partCount;
-  Uint16 partNo[LogParts];
-  Bitmask<(LogParts+31)/32> partMask;
+  Uint16 partNo[NDB_MAX_LOG_PARTS];
+  Bitmask<(NDB_MAX_LOG_PARTS+31)/32> partMask;
   Uint32 partNoFromId(Uint32 lpid) const;
   bool partNoOwner(Uint32 lpno) const;
   bool partNoOwner(Uint32 tabId, Uint32 fragId);

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp	2011-11-16 05:47:02 +0000
@@ -36,9 +36,7 @@ void Dblqh::initData() 
   clcpFileSize = ZNO_CONCURRENT_LCP;
   clfoFileSize = 0;
   clogFileFileSize = 0;
-
-  NdbLogPartInfo lpinfo(instance());
-  clogPartFileSize = lpinfo.partCount;
+  clogPartFileSize = 0; // Not valid until READ_CONFIG
 
   cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
   cscanrecFileSize = 0;
@@ -117,7 +115,7 @@ void Dblqh::initRecords() 
 
   logPartRecord = (LogPartRecord*)allocRecord("LogPartRecord",
 					      sizeof(LogPartRecord), 
-					      clogPartFileSize);
+					      NDB_MAX_LOG_PARTS);
 
   logFileRecord = (LogFileRecord*)allocRecord("LogFileRecord",
 					      sizeof(LogFileRecord),

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-11-15 13:13:38 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-11-16 09:29:49 +0000
@@ -1219,7 +1219,40 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* 
   const ndb_mgm_configuration_iterator * p = 
     m_ctx.m_config.getOwnConfigIterator();
   ndbrequire(p != 0);
-  
+
+  clogPartFileSize = 4;
+
+  Uint32 nodeLogParts = 4;
+  ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_PARTS,
+                            &nodeLogParts);
+  globalData.ndbLogParts = nodeLogParts;
+  ndbrequire(nodeLogParts <= NDB_MAX_LOG_PARTS);
+  {
+    NdbLogPartInfo lpinfo(instance());
+    clogPartFileSize = lpinfo.partCount; // How many are this instance responsible for...
+  }
+
+  if (globalData.ndbMtLqhWorkers > nodeLogParts)
+  {
+    char buf[255];
+    BaseString::snprintf(buf, sizeof(buf),
+      "Trying to start %d LQH workers with only %d log parts, try initial"
+      " node restart to be able to use more LQH workers.",
+      globalData.ndbMtLqhWorkers, nodeLogParts);
+    progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+  }
+  if (nodeLogParts != 4 &&
+      nodeLogParts != 8 &&
+      nodeLogParts != 16)
+  {
+    char buf[255];
+    BaseString::snprintf(buf, sizeof(buf),
+      "Trying to start with %d log parts, number of log parts can"
+      " only be set to 4, 8 or 16.",
+      nodeLogParts);
+    progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+  }
+
   cnoLogFiles = 8;
   ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES, 
 					&cnoLogFiles));
@@ -1889,7 +1922,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signa
     ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
     FragrecordPtr tFragPtr;
     tFragPtr.i = RNIL;
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tTablePtr.p->fragid); i++) {
       if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
         jam();
         tFragPtr.i = tTablePtr.p->fragrec[i];
@@ -2633,7 +2666,7 @@ void Dblqh::removeTable(Uint32 tableId)
   tabptr.i = tableId;
   ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
   
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragid); i++) {
     jam();
     if (tabptr.p->fragid[i] != ZNIL) {
       jam();
@@ -2778,7 +2811,7 @@ Dblqh::wait_reorg_suma_filter_enabled(Si
 void
 Dblqh::commit_reorg(TablerecPtr tablePtr)
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tablePtr.p->fragrec); i++)
   {
     jam();
     Ptr<Fragrecord> fragPtr;
@@ -14644,7 +14677,7 @@ void Dblqh::initGcpRecLab(Signal* signal
   }//for
   // initialize un-used part
   Uint32 Ti;
-  for (Ti = clogPartFileSize; Ti < ZLOG_PART_FILE_SIZE; Ti++) {
+  for (Ti = clogPartFileSize; Ti < NDB_MAX_LOG_PARTS; Ti++) {
     gcpPtr.p->gcpFilePtr[Ti] = ZNIL;
     gcpPtr.p->gcpPageNo[Ti] = ZNIL;
     gcpPtr.p->gcpSyncReady[Ti] = FALSE;
@@ -15698,7 +15731,10 @@ void Dblqh::initWriteEndLab(Signal* sign
 /*---------------------------------------------------------------------------*/
 /* PAGE ZERO IN FILE ZERO MUST SET LOG LAP TO ONE SINCE IT HAS STARTED       */
 /* WRITING TO THE LOG, ALSO GLOBAL CHECKPOINTS ARE SET TO ZERO.              */
+/* Set number of log parts used to ensure we use correct number of log parts */
+/* at system restart. Was previously hardcoded to 4.                         */
 /*---------------------------------------------------------------------------*/
+    logPagePtr.p->logPageWord[ZPOS_NO_LOG_PARTS]= globalData.ndbLogParts;
     logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
     logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = 0;
     logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = 0;
@@ -15881,6 +15917,8 @@ void Dblqh::initLogpage(Signal* signal) 
 {
   TcConnectionrecPtr ilpTcConnectptr;
 
+  /* Ensure all non-used header words are zero */
+  bzero(logPagePtr.p, sizeof(Uint32) * ZPAGE_HEADER_SIZE);
   logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = logPartPtr.p->logLap;
   logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = 
         logPartPtr.p->logPartNewestCompletedGCI;
@@ -16423,6 +16461,35 @@ void Dblqh::openSrFrontpageLab(Signal* s
  * -------------------------------------------------------------------------- */
 void Dblqh::readSrFrontpageLab(Signal* signal) 
 {
+  Uint32 num_parts_used;
+  if (!ndb_configurable_log_parts(logPagePtr.p->logPageWord[ZPOS_VERSION])) {
+    jam();
+    num_parts_used= 4;
+  }
+  else
+  {
+    jam();
+    num_parts_used = logPagePtr.p->logPageWord[ZPOS_NO_LOG_PARTS];
+  }
+  /* Verify that number of log parts >= number of LQH workers */
+  if (globalData.ndbMtLqhWorkers > num_parts_used) {
+    char buf[255];
+    BaseString::snprintf(buf, sizeof(buf),
+      "Trying to start %d LQH workers with only %d log parts, try initial"
+      " node restart to be able to use more LQH workers.",
+      globalData.ndbMtLqhWorkers, num_parts_used);
+    progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+  }
+  if (num_parts_used != globalData.ndbLogParts)
+  {
+    char buf[255];
+    BaseString::snprintf(buf, sizeof(buf),
+      "Can only change NoOfLogParts through initial node restart, old"
+      " value of NoOfLogParts = %d, tried using %d",
+      num_parts_used, globalData.ndbLogParts);
+    progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf);
+  }
+
   Uint32 fileNo = logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO];
   if (fileNo == 0) {
     jam();
@@ -20050,7 +20117,7 @@ void Dblqh::deleteFragrec(Uint32 fragId)
 {
   Uint32 indexFound= RNIL;
   fragptr.i = RNIL;
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragid); i++) {
     jam();
     if (tabptr.p->fragid[i] == fragId) {
       fragptr.i = tabptr.p->fragrec[i];
@@ -20265,7 +20332,7 @@ Dblqh::getFirstInLogQueue(Signal* signal
 /* ---------------------------------------------------------------- */
 bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId) 
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragid); i++) {
     jam();
     if (tabptr.p->fragid[i] == fragId) {
       fragptr.i = tabptr.p->fragrec[i];
@@ -20328,7 +20395,7 @@ void Dblqh::initialiseGcprec(Signal* sig
   if (cgcprecFileSize != 0) {
     for (gcpPtr.i = 0; gcpPtr.i < cgcprecFileSize; gcpPtr.i++) {
       ptrAss(gcpPtr, gcpRecord);
-      for (tigpIndex = 0; tigpIndex < ZLOG_PART_FILE_SIZE; tigpIndex++) {
+      for (tigpIndex = 0; tigpIndex < NDB_MAX_LOG_PARTS; tigpIndex++) {
         gcpPtr.p->gcpLogPartState[tigpIndex] = ZIDLE;
         gcpPtr.p->gcpSyncReady[tigpIndex] = ZFALSE;
       }//for
@@ -20616,7 +20683,7 @@ void Dblqh::initialiseTabrec(Signal* sig
       tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
       tabptr.p->usageCountR = 0;
       tabptr.p->usageCountW = 0;
-      for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+      for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragid); i++) {
         tabptr.p->fragid[i] = ZNIL;
         tabptr.p->fragrec[i] = RNIL;
       }//for
@@ -20886,7 +20953,7 @@ bool Dblqh::insertFragrec(Signal* signal
     terrorCode = ZNO_FREE_FRAGMENTREC;
     return false;
   }
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragid); i++) {
     jam();
     if (tabptr.p->fragid[i] == ZNIL) {
       jam();
@@ -22579,7 +22646,7 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal
 		  i, tabPtr.p->tableStatus,
                   tabPtr.p->usageCountR, tabPtr.p->usageCountW);
 
-	for (Uint32 j = 0; j<MAX_FRAG_PER_NODE; j++)
+	for (Uint32 j = 0; j<NDB_ARRAY_SIZE(tabPtr.p->fragrec); j++)
 	{
 	  FragrecordPtr fragPtr;
 	  if ((fragPtr.i = tabPtr.p->fragrec[j]) != RNIL)

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-11-10 20:35:28 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-11-16 08:17:17 +0000
@@ -1532,7 +1532,17 @@ Dbspj::releaseNodeRows(Ptr<Request> requ
       releaseRow(requestPtr, pos);
       cnt++;
     }
-    treeNodePtr.p->m_row_map.init();
+
+    // Release the (now empty) RowMap
+    RowMap& map = treeNodePtr.p->m_row_map;
+    if (!map.isNull())
+    {
+      jam();
+      RowRef ref;
+      map.copyto(ref);
+      releaseRow(requestPtr, ref);  // Map was allocated in row memory
+      map.init();
+    }
     DEBUG("RowMapIterator: released " << cnt << " rows!");
   }
 }

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-11-16 08:17:17 +0000
@@ -1135,8 +1135,8 @@ ArrayPool<TupTriggerData> c_triggerPool;
     // List of ordered indexes
     DLList<TupTriggerData> tuxCustomTriggers;
     
-    Uint32 fragid[MAX_FRAG_PER_NODE];
-    Uint32 fragrec[MAX_FRAG_PER_NODE];
+    Uint32 fragid[MAX_FRAG_PER_LQH];
+    Uint32 fragrec[MAX_FRAG_PER_LQH];
 
     union {
       struct {

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp	2011-09-06 12:43:05 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp	2011-11-16 09:29:49 +0000
@@ -1546,7 +1546,7 @@ Dbtup::disk_restart_undo(Signal* signal,
     Ptr<Tablerec> tabPtr;
     tabPtr.i= rec->m_table;
     ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
-    for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
+    for(Uint32 i = 0; i<NDB_ARRAY_SIZE(tabPtr.p->fragrec); i++)
       if (tabPtr.p->fragrec[i] != RNIL)
 	disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i], 
 			      Fragrecord::UC_CREATE, 0);
@@ -1566,7 +1566,7 @@ Dbtup::disk_restart_undo(Signal* signal,
     Ptr<Tablerec> tabPtr;
     tabPtr.i= rec->m_table;
     ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
-    for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
+    for(Uint32 i = 0; i<NDB_ARRAY_SIZE(tabPtr.p->fragrec); i++)
       if (tabPtr.p->fragrec[i] != RNIL)
 	disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i], 
 			      Fragrecord::UC_CREATE, 0);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-11-16 08:17:17 +0000
@@ -3920,7 +3920,7 @@ Dbtup::validate_page(Tablerec* regTabPtr
   if(mm_vars == 0)
     return;
   
-  for(Uint32 F= 0; F<MAX_FRAG_PER_NODE; F++)
+  for(Uint32 F= 0; F<NDB_ARRAY_SIZE(regTabPtr->fragrec); F++)
   {
     FragrecordPtr fragPtr;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-10-07 16:12:13 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-11-14 09:18:48 +0000
@@ -43,9 +43,10 @@ extern EventLogger * g_eventLogger;
 
 void Dbtup::initData() 
 {
-  cnoOfFragrec = MAX_FRAG_PER_NODE;
-  cnoOfFragoprec = MAX_FRAG_PER_NODE;
-  cnoOfAlterTabOps = MAX_FRAG_PER_NODE;
+  TablerecPtr tablePtr;
+  cnoOfFragrec = NDB_ARRAY_SIZE(tablePtr.p->fragrec);
+  cnoOfFragoprec = NDB_ARRAY_SIZE(tablePtr.p->fragrec);
+  cnoOfAlterTabOps = NDB_ARRAY_SIZE(tablePtr.p->fragrec);
   c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE;
   c_noOfBuildIndexRec = 32;
 
@@ -772,7 +773,7 @@ void Dbtup::initializeTablerec() 
 void
 Dbtup::initTab(Tablerec* const regTabPtr)
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr->fragid); i++) {
     regTabPtr->fragid[i] = RNIL;
     regTabPtr->fragrec[i] = RNIL;
   }//for
@@ -870,7 +871,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* sign
   return;
 }//Dbtup::execTUPSEIZEREQ()
 
-#define printFragment(t){ for(Uint32 i = 0; i < MAX_FRAG_PER_NODE;i++){\
+#define printFragment(t){ for(Uint32 i = 0; i < NDB_ARRAY_SIZE(t.p->fragid);i++){ \
   ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \
            t.i, t.p->fragid[i], i, t.p->fragrec[i]); }}
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp	2011-11-16 05:47:02 +0000
@@ -552,14 +552,14 @@ Dbtup::buildIndex(Signal* signal, Uint32
   do {
     // get fragment
     FragrecordPtr fragPtr;
-    if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) {
+    if (buildPtr.p->m_fragNo == NDB_ARRAY_SIZE(tablePtr.p->fragrec)) {
       jam();
       // build ready
       buildIndexReply(signal, buildPtr.p);
       c_buildIndexList.release(buildPtr);
       return;
     }
-    ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE);
+    ndbrequire(buildPtr.p->m_fragNo < NDB_ARRAY_SIZE(tablePtr.p->fragrec));
     fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
     if (fragPtr.i == RNIL) {
       jam();
@@ -809,7 +809,8 @@ Dbtup::execALTER_TAB_CONF(Signal* signal
   else
   {
     jam();
-    ndbrequire(buildPtr.p->m_fragNo >= MAX_FRAG_PER_NODE);
+    TablerecPtr tablePtr;
+    ndbrequire(buildPtr.p->m_fragNo >= NDB_ARRAY_SIZE(tablePtr.p->fragid));
     buildIndexReply(signal, buildPtr.p);
     c_buildIndexList.release(buildPtr);
     return;
@@ -830,7 +831,7 @@ Dbtup::buildIndexOffline_table_readonly(
   tablePtr.i= buildReq->tableId;
   ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
 
-  for (;buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE;
+  for (;buildPtr.p->m_fragNo < NDB_ARRAY_SIZE(tablePtr.p->fragrec);
        buildPtr.p->m_fragNo++)
   {
     jam();
@@ -906,7 +907,7 @@ Dbtup::mt_scan_init(Uint32 tableId, Uint
 
   FragrecordPtr fragPtr;
   fragPtr.i = RNIL;
-  for (Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
+  for (Uint32 i = 0; i<NDB_ARRAY_SIZE(tablePtr.p->fragid); i++)
   {
     if (tablePtr.p->fragid[i] == fragId)
     {
@@ -1011,8 +1012,10 @@ Dbtup::execBUILD_INDX_IMPL_REF(Signal* s
   ndbrequire(buildPtr.p->m_outstanding);
   buildPtr.p->m_outstanding--;
 
+  TablerecPtr tablePtr;
   buildPtr.p->m_errorCode = (BuildIndxImplRef::ErrorCode)err;
-  buildPtr.p->m_fragNo = MAX_FRAG_PER_NODE; // No point in starting any more
+  // No point in starting any more
+  buildPtr.p->m_fragNo = NDB_ARRAY_SIZE(tablePtr.p->fragrec);
   buildIndexOffline_table_readonly(signal, ptr);
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-09-02 07:40:42 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-11-16 05:47:02 +0000
@@ -910,7 +910,7 @@ bool Dbtup::addfragtotab(Tablerec* const
                          Uint32 fragId,
                          Uint32 fragIndex)
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr->fragid); i++) {
     jam();
     if (regTabPtr->fragid[i] == RNIL) {
       jam();
@@ -926,7 +926,7 @@ void Dbtup::getFragmentrec(FragrecordPtr
                            Uint32 fragId,
                            Tablerec* const regTabPtr)
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr->fragid); i++) {
     jam();
     if (regTabPtr->fragid[i] == fragId) {
       jam();
@@ -1015,7 +1015,7 @@ Dbtup::execALTER_TAB_REQ(Signal *signal)
   case AlterTabReq::AlterTableSumaEnable:
   {
     FragrecordPtr regFragPtr;
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr.p->fragrec); i++)
     {
       jam();
       if ((regFragPtr.i = regTabPtr.p->fragrec[i]) != RNIL)
@@ -1044,7 +1044,7 @@ Dbtup::execALTER_TAB_REQ(Signal *signal)
     Uint32 gci = signal->theData[signal->getLength() - 1];
     regTabPtr.p->m_reorg_suma_filter.m_gci_hi = gci;
     FragrecordPtr regFragPtr;
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr.p->fragrec); i++)
     {
       jam();
       if ((regFragPtr.i = regTabPtr.p->fragrec[i]) != RNIL)
@@ -1320,7 +1320,7 @@ Dbtup::handleAlterTableCommit(Signal *si
   if (AlterTableReq::getReorgFragFlag(req->changeMask))
   {
     FragrecordPtr regFragPtr;
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr->fragrec); i++)
     {
       jam();
       if ((regFragPtr.i = regTabPtr->fragrec[i]) != RNIL)
@@ -1363,7 +1363,7 @@ Dbtup::handleAlterTableComplete(Signal *
   if (AlterTableReq::getReorgCompleteFlag(req->changeMask))
   {
     FragrecordPtr regFragPtr;
-    for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+    for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr->fragrec); i++)
     {
       jam();
       if ((regFragPtr.i = regTabPtr->fragrec[i]) != RNIL)
@@ -1892,7 +1892,7 @@ void Dbtup::releaseAlterTabOpRec(AlterTa
 
 void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId) 
 {
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(regTabPtr->fragid); i++) {
     jam();
     if (regTabPtr->fragid[i] == fragId) {
       jam();
@@ -1991,7 +1991,7 @@ void Dbtup::releaseFragment(Signal* sign
   Uint32 fragIndex = RNIL;
   Uint32 fragId = RNIL;
   Uint32 i = 0;
-  for (i = 0; i < MAX_FRAG_PER_NODE; i++) {
+  for (i = 0; i < NDB_ARRAY_SIZE(tabPtr.p->fragid); i++) {
     jam();
     if (tabPtr.p->fragid[i] != RNIL) {
       jam();
@@ -2464,11 +2464,11 @@ Dbtup::drop_fragment_fsremove_done(Signa
   Uint32 logfile_group_id = fragPtr.p->m_logfile_group_id ;
 
   Uint32 i;
-  for(i= 0; i<MAX_FRAG_PER_NODE; i++)
+  for(i= 0; i<NDB_ARRAY_SIZE(tabPtr.p->fragrec); i++)
     if(tabPtr.p->fragrec[i] == fragPtr.i)
       break;
 
-  ndbrequire(i != MAX_FRAG_PER_NODE);
+  ndbrequire(i != NDB_ARRAY_SIZE(tabPtr.p->fragrec));
   tabPtr.p->fragid[i]= RNIL;
   tabPtr.p->fragrec[i]= RNIL;
   releaseFragrec(fragPtr);
@@ -2694,7 +2694,7 @@ Dbtup::execDROP_FRAG_REQ(Signal* signal)
   tabPtr.p->m_dropTable.tabUserPtr = req->senderData;
 
   Uint32 fragIndex = RNIL;
-  for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++)
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabPtr.p->fragid); i++)
   {
     jam();
     if (tabPtr.p->fragid[i] == req->fragId)

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-10-13 17:13:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-11-16 05:47:02 +0000
@@ -120,7 +120,7 @@ public:
 
 private:
   // sizes are in words (Uint32)
-  STATIC_CONST( MaxIndexFragments = MAX_FRAG_PER_NODE );
+  STATIC_CONST( MaxIndexFragments = MAX_FRAG_PER_LQH );
   STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX );
   STATIC_CONST( MaxAttrDataSize = 2 * MAX_ATTRIBUTES_IN_INDEX + MAX_KEY_SIZE_IN_WORDS );
   STATIC_CONST( MaxXfrmDataSize = MaxAttrDataSize * MAX_XFRM_MULTIPLY);

=== modified file 'storage/ndb/src/kernel/vm/GlobalData.hpp'
--- a/storage/ndb/src/kernel/vm/GlobalData.hpp	2011-09-15 20:21:59 +0000
+++ b/storage/ndb/src/kernel/vm/GlobalData.hpp	2011-11-14 12:02:56 +0000
@@ -75,6 +75,7 @@ struct GlobalData {
   Uint32     ndbMtLqhWorkers;
   Uint32     ndbMtLqhThreads;
   Uint32     ndbMtTcThreads;
+  Uint32     ndbLogParts;
   
   GlobalData(){ 
     theSignalId = 0; 
@@ -85,6 +86,7 @@ struct GlobalData {
     ndbMtLqhWorkers = 0;
     ndbMtLqhThreads = 0;
     ndbMtTcThreads = 0;
+    ndbLogParts = 0;
 #ifdef GCP_TIMER_HACK
     gcp_timer_limit = 0;
 #endif

=== modified file 'storage/ndb/src/kernel/vm/pc.hpp'
--- a/storage/ndb/src/kernel/vm/pc.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/vm/pc.hpp	2011-11-16 05:47:02 +0000
@@ -165,7 +165,7 @@
 // need large value.
 /* ------------------------------------------------------------------------- */
 #define NO_OF_FRAG_PER_NODE 1
-#define MAX_FRAG_PER_NODE 8
+#define MAX_FRAG_PER_LQH 8
 
 /**
 * DIH allocates fragments in chunk for fast find of fragment record.

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk-cluster branch (magnus.blaudd:3409 to 3410) magnus.blaudd16 Nov