List:Commits« Previous MessageNext Message »
From:jonas oreland Date:September 2 2011 8:10am
Subject:bzr push into mysql-5.5-cluster branch (jonas.oreland:3453 to 3455)
View as plain text  
 3455 jonas oreland	2011-09-02
      ndb - fix ndb_alter_table_error which failed due to different locking in 5.5 than in 5.1...

    modified:
      sql/ha_ndbcluster.cc
 3454 jonas oreland	2011-09-02 [merge]
      ndb - merge 71 to 55

    added:
      mysql-test/suite/ndb/r/ndb_alter_table_error.result
      mysql-test/suite/ndb/t/ndb_alter_table_error.test
    modified:
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_index_stat.result
      mysql-test/suite/ndb/r/ndb_statistics1.result
      mysql-test/suite/ndb/t/ndb_basic.test
      mysql-test/suite/ndb/t/ndb_index_stat.test
      mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test
      sql/ha_ndbcluster.cc
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/include/kernel/signaldata/QueryTree.hpp
      storage/ndb/include/mgmapi/mgmapi_config_parameters.h
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/ndbd.cpp
      storage/ndb/src/kernel/vm/CMakeLists.txt
      storage/ndb/src/kernel/vm/Configuration.cpp
      storage/ndb/src/kernel/vm/Configuration.hpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.cpp
      storage/ndb/src/kernel/vm/mt_thr_config.hpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testBlobs.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/tools/ndb_config.cpp
 3453 Jonas Oreland	2011-09-01
      ndb - wl5482 - user tables in ndb

    added:
      mysql-test/suite/ndb/r/ndb_dist_priv.result
      mysql-test/suite/ndb/t/have_ndb_dist_priv.inc
      mysql-test/suite/ndb/t/ndb_dist_priv.test
      mysql-test/suite/rpl_ndb/r/rpl_ndb_dist_priv.result
      mysql-test/suite/rpl_ndb/t/rpl_ndb_dist_priv.test
      sql/ndb_dist_priv_util.h
      storage/ndb/tools/HOWTO_distribute_privileges.txt
      storage/ndb/tools/ndb_dist_priv.sql
    modified:
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      storage/ndb/tools/restore/restore_main.cpp
=== added file 'mysql-test/suite/ndb/r/ndb_alter_table_error.result'
--- a/mysql-test/suite/ndb/r/ndb_alter_table_error.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_alter_table_error.result	2011-09-02 06:37:29 +0000
@@ -0,0 +1,19 @@
+*******************************
+* basic concurent online alter test
+*******************************
+* With Commit
+*******************************
+CREATE TABLE t1 (a INT UNSIGNED, 
+b INT UNSIGNED not null, 
+primary key(a)) ENGINE NDB;
+begin;
+update t1 set b = b + 1 where a = 1;
+ALTER OFFLINE TABLE t1 ADD c CHAR(19);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+commit;
+ALTER OFFLINE TABLE t1 ADD c CHAR(19);
+create unique index b_unq on t1(b) using hash;
+ERROR 23000: Can't write, because of unique constraint, to table 't1'
+update t1 set b = b - 1 where a = 1;
+create unique index b_unq on t1(b) using hash;
+DROP TABLE t1;

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-07-08 15:05:28 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-09-02 07:40:42 +0000
@@ -570,6 +570,12 @@ length(b)
 13000
 13000
 drop table t1;
+create table t1 ( c50 char(255), c49 char(255), c48 char(255), c47 char(255), c46 char(255), c45 char(255), c44 char(255), c43 char(255), c42 char(255), c41 char(255), c40 char(255), c39 char(255), c38 char(255), c37 char(255), c36 char(255), c35 char(255), c34 char(255), c33 char(255), c32 char(255), c31 char(255), c30 char(255), c29 char(255), c28 char(255), c27 char(255), c26 char(255), c25 char(255), c24 char(255), c23 char(255), c22 char(255), c21 char(255), c20 char(255), c19 char(255), c18 char(255), c17 char(255), c16 char(255), c15 char(255), c14 char(255), c13 char(255), c12 char(255), c11 char(255), c10 char(255), c9 char(255), c8 char(255), c7 char(255), c6 char(255), c5 char(255), c4 char(255), c3 char(255), c2 char(255), c1 char(255), primary key using hash(c1)) engine=ndb;
+ERROR HY000: Can't create table 'test.t1' (errno: 851)
+show warnings;
+Level	Code	Message
+Warning	1296	Got error 851 'Maximum 8052 bytes of FIXED columns supported, use varchar or COLUMN_FORMAT DYNMIC instead' from NDB
+Error	1005	Can't create table 'test.t1' (errno: 851)
 create table `t1` (`a` int, b int, primary key (a,b)) engine=ndb partition by key(`a`,`b`,`a`);
 ERROR HY000: Duplicate partition field name 'a'
 create table t1 (

=== modified file 'mysql-test/suite/ndb/r/ndb_index_stat.result'
--- a/mysql-test/suite/ndb/r/ndb_index_stat.result	2011-08-17 10:36:01 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_stat.result	2011-09-02 07:40:42 +0000
@@ -475,6 +475,18 @@ select count(*) from t1 where f > '222';
 count(*)
 1
 drop table t1;
+create table t1 (a1 int, b1 int, primary key(b1), key(a1)) engine=ndbcluster partition by key() partitions 1;
+create table t2 (b2 int, c2 int, primary key(b2,c2)) engine=ndbcluster partition by key() partitions 1;
+# table t1 is only for forcing record by key count for table t2 that should be near 50 (not 1)
+analyze table t1, t2;
+Table	Op	Msg_type	Msg_text
+test.t1	analyze	status	OK
+test.t2	analyze	status	OK
+explain select * from t1, t2 where b2 = b1 and a1 = 1;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ref	PRIMARY,a1	a1	5	const	2	#
+1	SIMPLE	t2	ref	PRIMARY	PRIMARY	4	test.t1.b1	50	#
+drop table t1, t2;
 set @is_enable = @is_enable_default;
 set @is_enable = NULL;
 # is_enable_on=0 is_enable_off=1

=== modified file 'mysql-test/suite/ndb/r/ndb_statistics1.result'
--- a/mysql-test/suite/ndb/r/ndb_statistics1.result	2011-08-17 10:36:01 +0000
+++ b/mysql-test/suite/ndb/r/ndb_statistics1.result	2011-09-02 07:40:42 +0000
@@ -77,7 +77,7 @@ SELECT * FROM t10000 AS X JOIN t10000 AS
 ON Y.I=X.I AND Y.J = X.I;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
 1	SIMPLE	X	ALL	I	NULL	NULL	NULL	10000	Parent of 2 pushed join@1
-1	SIMPLE	Y	ref	J,I	I	10	test.X.I,test.X.I	1	Child of 'X' in pushed join@1; Using where
+1	SIMPLE	Y	ref	J,I	J	5	test.X.I	1	Child of 'X' in pushed join@1; Using where
 EXPLAIN
 SELECT * FROM t100 WHERE k < 42;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
@@ -145,11 +145,11 @@ id	select_type	table	type	possible_keys
 EXPLAIN
 SELECT * FROM t10000 WHERE J = 0 AND K < 1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t10000	range	PRIMARY,J	PRIMARY	4	NULL	2	Using where with pushed condition
+1	SIMPLE	t10000	ref	PRIMARY,J	J	5	const	2	Using where with pushed condition
 EXPLAIN
 SELECT * FROM t10000 WHERE J = 0 AND K BETWEEN 1 AND 10;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t10000	range	PRIMARY,J	PRIMARY	4	NULL	2	Using where with pushed condition
+1	SIMPLE	t10000	ref	PRIMARY,J	J	5	const	2	Using where with pushed condition
 EXPLAIN
 SELECT * FROM t10000 WHERE J = 0 AND K = 1;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra

=== added file 'mysql-test/suite/ndb/t/ndb_alter_table_error.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_error.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_error.test	2011-09-02 06:37:29 +0000
@@ -0,0 +1,49 @@
+-- source include/not_embedded.inc
+-- source include/have_multi_ndb.inc
+
+--echo *******************************
+--echo * basic concurent online alter test
+--echo *******************************
+--echo * With Commit
+--echo *******************************
+
+connection server1;
+
+CREATE TABLE t1 (a INT UNSIGNED, 
+                 b INT UNSIGNED not null, 
+                 primary key(a)) ENGINE NDB;
+let $v=100;
+disable_query_log;
+while ($v)
+{
+  --eval INSERT INTO t1 VALUES($v,$v); 
+  dec $v;
+}
+enable_query_log;
+
+
+connection server2;
+begin;
+update t1 set b = b + 1 where a = 1;
+
+connection server1;
+--error 1205
+ALTER OFFLINE TABLE t1 ADD c CHAR(19);
+
+connection server2;
+commit;
+
+connection server1;
+--error 0
+ALTER OFFLINE TABLE t1 ADD c CHAR(19);
+
+connection server1;
+--error 1169
+create unique index b_unq on t1(b) using hash;
+
+update t1 set b = b - 1 where a = 1;
+
+connection server1;
+create unique index b_unq on t1(b) using hash;
+
+DROP TABLE t1;

=== modified file 'mysql-test/suite/ndb/t/ndb_basic.test'
--- a/mysql-test/suite/ndb/t/ndb_basic.test	2011-07-06 07:14:46 +0000
+++ b/mysql-test/suite/ndb/t/ndb_basic.test	2011-09-02 07:40:42 +0000
@@ -438,6 +438,21 @@ insert into t1 values (1,@v13000), (2,@v
 select length(b) from t1 order by 1;
 drop table t1;
 
+let $i=50;
+let $separator=;
+let $sql=create table t1 (;
+while ($i)
+{
+  let $sql=$sql$separator c$i char(255);
+  let $separator=,;
+  dec $i;
+}
+let $sql=$sql, primary key using hash(c1)) engine=ndb;
+
+--error 1005
+eval $sql; # eval the sql and create the table
+show warnings;
+
 #
 # test bug#53354 - crash when creating partitioned table with multiple columns in the partition key
 #

=== modified file 'mysql-test/suite/ndb/t/ndb_index_stat.test'
--- a/mysql-test/suite/ndb/t/ndb_index_stat.test	2011-07-23 14:35:37 +0000
+++ b/mysql-test/suite/ndb/t/ndb_index_stat.test	2011-08-28 13:29:22 +0000
@@ -309,5 +309,30 @@ while ($i)
 }
 drop table t1;
 
+#
+# Check estimates of records per key for partial keys using unique/primary ordered index
+#
+
+create table t1 (a1 int, b1 int, primary key(b1), key(a1)) engine=ndbcluster partition by key() partitions 1;
+create table t2 (b2 int, c2 int, primary key(b2,c2)) engine=ndbcluster partition by key() partitions 1;
+
+--disable_query_log
+let $i = 100;
+while ($i)
+{
+  eval insert into t1 (a1,b1) values ($i,$i);
+  eval insert into t2 (b2,c2) values ($i mod 2, $i div 2);
+  dec $i;
+}
+--enable_query_log
+
+--echo # table t1 is only for forcing record by key count for table t2 that should be near 50 (not 1)
+analyze table t1, t2;
+# Hide Extra column
+--replace_column 10 #
+explain select * from t1, t2 where b2 = b1 and a1 = 1;
+
+drop table t1, t2;
+
 set @is_enable = @is_enable_default;
 source ndb_index_stat_enable.inc;

=== modified file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result	2011-09-01 15:12:11 +0000
@@ -66,6 +66,169 @@ select * from t1_max_delete_win;
 a	b	X
 delete from t1_old;
 delete from t1_max;
+drop table t1_old, t1_max, t1_max_delete_win;
+delete from t1_old$EX;
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+create table t1_old (a int primary key, b longtext, X int unsigned) engine = ndb;
+create table t1_max (a int primary key, b longtext, X int unsigned) engine = ndb;
+create table t1_max_delete_win (a int primary key, b longtext, X int unsigned) engine = ndb;
+"Test 3"
+insert into t1_old values (1, repeat('Initial X=1',1000), 1);
+insert into t1_max values (1, repeat('Initial X=1',1000), 1);
+insert into t1_max_delete_win values (1, repeat('Initial X=1',1000), 1);
+update t1_old set X = 2, b=repeat('Slave X=2',1001);
+update t1_max set X = 2, b=repeat('Slave X=2',1001);
+update t1_max_delete_win set X = 2, b=repeat('Slave X=2',1001);
+update t1_old set X = 3, b=repeat('Master X=3',1002);
+update t1_max set X = 3, b=repeat('Master X=3',1002);
+update t1_max_delete_win set X = 3, b=repeat('Master X=3',1002);
+"Expect t1_old to contain slave row, and t1_max* to contain master row"
+select a, left(b, 20), length(b), X from t1_old;
+a	left(b, 20)	length(b)	X
+1	Slave X=2Slave X=2Sl	9009	2
+select a, left(b, 20), length(b), X from t1_max;
+a	left(b, 20)	length(b)	X
+1	Master X=3Master X=3	10020	3
+select a, left(b, 20), length(b), X from t1_max_delete_win;
+a	left(b, 20)	length(b)	X
+1	Master X=3Master X=3	10020	3
+Expect t1_old to have 1 entry, and t1_max* to have no entries
+select server_id, master_server_id, count, a from t1_old$EX order by count;
+server_id	master_server_id	count	a
+2	1	1	1
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+server_id	master_server_id	count	a
+select server_id, master_server_id, count, a from t1_max_delete_win$EX order by count;
+server_id	master_server_id	count	a
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+update t1_old set X = 3, b=repeat('Master X=3', 1002);
+"Test 4"
+update t1_old set X = 4, b=repeat('Slave X=4',2000);
+update t1_max set X = 4, b=repeat('Slave X=4',2000);
+update t1_max_delete_win set X = 4, b=repeat('Slave X=4',2000);
+delete from t1_old;
+delete from t1_max;
+delete from t1_max_delete_win;
+"Expect t1_old and t1_max to contain slave row, and t1_max_delete_win to be empty(as master)"
+select a, left(b, 20), length(b), X from t1_old;
+a	left(b, 20)	length(b)	X
+1	Slave X=4Slave X=4Sl	18000	4
+select a, left(b, 20), length(b), X from t1_max;
+a	left(b, 20)	length(b)	X
+1	Slave X=4Slave X=4Sl	18000	4
+select a, left(b, 20), length(b), X from t1_max_delete_win;
+a	left(b, 20)	length(b)	X
+Expect t1_old and t1_max to contain 1 entry, and t1_max_delete_win to be empty
+select server_id, master_server_id, count, a from t1_old$EX order by count;
+server_id	master_server_id	count	a
+2	1	2	1
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+server_id	master_server_id	count	a
+2	1	1	1
+select server_id, master_server_id, count, a from t1_max_delete_win$EX order by count;
+server_id	master_server_id	count	a
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+delete from t1_old;
+delete from t1_max;
+delete from t1_max_delete_win;
+delete from t1_old;
+delete from t1_max;
+delete from t1_max_delete_win;
+"Test 5"
+Test that Updates affecting Blobs are rejected
+correctly on the slave
+drop table t1_max;
+create table t1_max (a int primary key, b int, c longtext, d longtext, X int unsigned) engine = ndb;
+insert into t1_max values (1, 1, repeat("B", 10000), repeat("E", 10001), 1);
+insert into t1_max values (2, 2, repeat("A", 10002), repeat("T", 10003), 1);
+update t1_max set X=20;
+Initial values on Slave
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+a	b	SHA1(c)	length(c)	SHA1(d)	length(d)	X
+1	1	4a222e18b539cdefbf0960eaa7f4362a4976e1e0	10000	9641d473ab1bd921263190eee074397084933e2d	10001	20
+2	2	f833241322c062495632923d74314a6a5c23034d	10002	2dad269dfa115f6c7e53e91a73251e597aab8fe9	10003	20
+Originate update which will be rejected
+update t1_max set c=repeat("Z", 10006), d=repeat("I", 10005), X=2 where a=1;
+Check slave has rejected due to lower version
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+a	b	SHA1(c)	length(c)	SHA1(d)	length(d)	X
+1	1	4a222e18b539cdefbf0960eaa7f4362a4976e1e0	10000	9641d473ab1bd921263190eee074397084933e2d	10001	20
+2	2	f833241322c062495632923d74314a6a5c23034d	10002	2dad269dfa115f6c7e53e91a73251e597aab8fe9	10003	20
+Originate delete which will be rejected (due to NDB-OLD) algorith
+delete from t1_max where a=1;
+Check slave has rejected due to before image mismatch
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+a	b	SHA1(c)	length(c)	SHA1(d)	length(d)	X
+1	1	4a222e18b539cdefbf0960eaa7f4362a4976e1e0	10000	9641d473ab1bd921263190eee074397084933e2d	10001	20
+2	2	f833241322c062495632923d74314a6a5c23034d	10002	2dad269dfa115f6c7e53e91a73251e597aab8fe9	10003	20
+Originate insert which will be rejected (as row exists)
+insert into t1_max values (1, 1, repeat("R", 10004), repeat("A", 10007), 1);
+Check slave has rejected due to row existing already
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+a	b	SHA1(c)	length(c)	SHA1(d)	length(d)	X
+1	1	4a222e18b539cdefbf0960eaa7f4362a4976e1e0	10000	9641d473ab1bd921263190eee074397084933e2d	10001	20
+2	2	f833241322c062495632923d74314a6a5c23034d	10002	2dad269dfa115f6c7e53e91a73251e597aab8fe9	10003	20
+Expect t1_max to have 3 entries
+select server_id, master_server_id, count, a from t1_old$EX order by count;
+server_id	master_server_id	count	a
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+server_id	master_server_id	count	a
+2	1	1	1
+2	1	2	1
+2	1	3	1
+select server_id, master_server_id, count, a from t1_max_delete_win$EX order by count;
+server_id	master_server_id	count	a
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+Test 6
+Check that non-Blob related operations in a batch with a Blob
+operation are still subject to conflict detection.
+
+insert into mysql.ndb_replication values ("test", "t2_max", 0, 7, "NDB$MAX(X)");
+create table `t2_max$EX`
+  (server_id int unsigned,
+master_server_id int unsigned,
+master_epoch bigint unsigned,
+count int unsigned,
+a int not null,
+primary key(server_id, master_server_id, master_epoch, count)) engine ndb;
+create table t2_max (a int primary key, b int, X bigint unsigned) engine=ndb;
+insert into t2_max values (1,1,10), (2,2,10), (3,3,10), (4,4,10), (5,5,10);
+Now issue a transaction with a successful Blob op, and unsuccessful
+non-Blob op.  Check that the Blob op succeeds, and the unsuccessful
+non-Blob op is handled as expected.
+begin;
+update t2_max set b=b+1, X=1 where a=3;
+update t1_max set c=repeat("R", 10008), d=repeat("A", 10009), X = 21 where a=1;
+commit;
+Contents on Slave
+Expect Blob data applied to t1_max, no update applied to t2_max
+select a,b,left(c,1), length(c), left(d,1), length(d), X from t1_max where a=1;
+a	b	left(c,1)	length(c)	left(d,1)	length(d)	X
+1	1	R	10008	A	10009	21
+select * from t2_max order by a;
+a	b	X
+1	1	10
+2	2	10
+3	3	10
+4	4	10
+5	5	10
+Expect No conflict in t1_max, 1 conflict in t2_max
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+server_id	master_server_id	count	a
+select server_id, master_server_id, count, a from t2_max$EX order by count;
+server_id	master_server_id	count	a
+2	1	1	3
+drop table t2_max, t2_max$EX;
 "Cleanup"
 drop table mysql.ndb_replication;
 drop table t1_old, `t1_old$EX`, t1_max, `t1_max$EX`, t1_max_delete_win, `t1_max_delete_win$EX`;

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test	2011-09-01 15:12:11 +0000
@@ -105,6 +105,209 @@ select * from t1_max_delete_win;
 delete from t1_old;
 delete from t1_max;
 
+--connection master
+
+# Now test with Blobs
+drop table t1_old, t1_max, t1_max_delete_win;
+delete from t1_old$EX;
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+
+--sync_slave_with_master
+--connection slave
+# Delete on slave, as $EX table ops don't replicate
+delete from t1_old$EX;
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+
+--connection master
+
+create table t1_old (a int primary key, b longtext, X int unsigned) engine = ndb;
+create table t1_max (a int primary key, b longtext, X int unsigned) engine = ndb;
+create table t1_max_delete_win (a int primary key, b longtext, X int unsigned) engine = ndb;
+
+--sync_slave_with_master
+
+###############
+--echo "Test 3"
+
+--connection master
+insert into t1_old values (1, repeat('Initial X=1',1000), 1);
+insert into t1_max values (1, repeat('Initial X=1',1000), 1);
+insert into t1_max_delete_win values (1, repeat('Initial X=1',1000), 1);
+--sync_slave_with_master
+
+--connection slave
+update t1_old set X = 2, b=repeat('Slave X=2',1001);
+update t1_max set X = 2, b=repeat('Slave X=2',1001);
+update t1_max_delete_win set X = 2, b=repeat('Slave X=2',1001);
+
+--connection master
+update t1_old set X = 3, b=repeat('Master X=3',1002);
+update t1_max set X = 3, b=repeat('Master X=3',1002);
+update t1_max_delete_win set X = 3, b=repeat('Master X=3',1002);
+--sync_slave_with_master
+
+--connection slave
+--echo "Expect t1_old to contain slave row, and t1_max* to contain master row"
+select a, left(b, 20), length(b), X from t1_old;
+select a, left(b, 20), length(b), X from t1_max;
+select a, left(b, 20), length(b), X from t1_max_delete_win;
+
+--echo Expect t1_old to have 1 entry, and t1_max* to have no entries
+select server_id, master_server_id, count, a from t1_old$EX order by count;
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+select server_id, master_server_id, count, a from t1_max_delete_win$EX order by count;
+
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+
+# syncronize
+update t1_old set X = 3, b=repeat('Master X=3', 1002);
+
+###############
+--echo "Test 4"
+
+--connection slave
+update t1_old set X = 4, b=repeat('Slave X=4',2000);
+update t1_max set X = 4, b=repeat('Slave X=4',2000);
+update t1_max_delete_win set X = 4, b=repeat('Slave X=4',2000);
+
+--connection master
+delete from t1_old;
+delete from t1_max;
+delete from t1_max_delete_win;
+--sync_slave_with_master
+
+--connection slave
+--echo "Expect t1_old and t1_max to contain slave row, and t1_max_delete_win to be empty(as master)"
+select a, left(b, 20), length(b), X from t1_old;
+select a, left(b, 20), length(b), X from t1_max;
+select a, left(b, 20), length(b), X from t1_max_delete_win;
+
+--echo Expect t1_old and t1_max to contain 1 entry, and t1_max_delete_win to be empty
+select server_id, master_server_id, count, a from t1_old$EX order by count;
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+select server_id, master_server_id, count, a from t1_max_delete_win$EX order by count;
+
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+
+delete from t1_old;
+delete from t1_max;
+delete from t1_max_delete_win;
+
+--connection master
+delete from t1_old;
+delete from t1_max;
+delete from t1_max_delete_win;
+
+#################
+--echo "Test 5"
+
+--echo Test that Updates affecting Blobs are rejected
+--echo correctly on the slave
+drop table t1_max;
+create table t1_max (a int primary key, b int, c longtext, d longtext, X int unsigned) engine = ndb;
+
+insert into t1_max values (1, 1, repeat("B", 10000), repeat("E", 10001), 1);
+insert into t1_max values (2, 2, repeat("A", 10002), repeat("T", 10003), 1);
+
+--sync_slave_with_master
+--connection slave
+
+# Bump up tuple versions
+update t1_max set X=20;
+
+--echo Initial values on Slave
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+
+--connection master
+--echo Originate update which will be rejected
+update t1_max set c=repeat("Z", 10006), d=repeat("I", 10005), X=2 where a=1;
+
+--sync_slave_with_master
+--connection slave
+--echo Check slave has rejected due to lower version
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+
+--connection master
+--echo Originate delete which will be rejected (due to NDB-OLD) algorith
+delete from t1_max where a=1;
+
+--sync_slave_with_master
+--connection slave
+--echo Check slave has rejected due to before image mismatch
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+
+--connection master
+--echo Originate insert which will be rejected (as row exists)
+insert into t1_max values (1, 1, repeat("R", 10004), repeat("A", 10007), 1);
+
+--sync_slave_with_master
+--connection slave
+--echo Check slave has rejected due to row existing already
+select a,b,SHA1(c),length(c), SHA1(d), length(d), X from t1_max order by a;
+
+--echo Expect t1_max to have 3 entries
+select server_id, master_server_id, count, a from t1_old$EX order by count;
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+select server_id, master_server_id, count, a from t1_max_delete_win$EX order by count;
+
+delete from t1_max$EX;
+delete from t1_max_delete_win$EX;
+delete from t1_old$EX;
+
+--connection master
+
+#######
+--echo Test 6
+--echo Check that non-Blob related operations in a batch with a Blob
+--echo operation are still subject to conflict detection.
+--echo
+insert into mysql.ndb_replication values ("test", "t2_max", 0, 7, "NDB$MAX(X)");
+
+create table `t2_max$EX`
+  (server_id int unsigned,
+   master_server_id int unsigned,
+   master_epoch bigint unsigned,
+   count int unsigned,
+   a int not null,
+   primary key(server_id, master_server_id, master_epoch, count)) engine ndb;
+
+create table t2_max (a int primary key, b int, X bigint unsigned) engine=ndb;
+
+insert into t2_max values (1,1,10), (2,2,10), (3,3,10), (4,4,10), (5,5,10);
+
+--sync_slave_with_master
+
+--connection master
+--echo Now issue a transaction with a successful Blob op, and unsuccessful
+--echo non-Blob op.  Check that the Blob op succeeds, and the unsuccessful
+--echo non-Blob op is handled as expected.
+
+begin;
+update t2_max set b=b+1, X=1 where a=3; # conflicts
+update t1_max set c=repeat("R", 10008), d=repeat("A", 10009), X = 21 where a=1; # ok
+commit;
+
+--sync_slave_with_master
+
+--connection slave
+--echo Contents on Slave
+--echo Expect Blob data applied to t1_max, no update applied to t2_max
+select a,b,left(c,1), length(c), left(d,1), length(d), X from t1_max where a=1;
+select * from t2_max order by a;
+
+--echo Expect No conflict in t1_max, 1 conflict in t2_max$EX
+select server_id, master_server_id, count, a from t1_max$EX order by count;
+select server_id, master_server_id, count, a from t2_max$EX order by count;
+
+--connection master
+drop table t2_max, t2_max$EX;
+
 
 ###############
 --echo "Cleanup"

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-09-01 12:36:37 +0000
+++ b/sql/ha_ndbcluster.cc	2011-09-02 08:09:35 +0000
@@ -1408,19 +1408,22 @@ void ha_ndbcluster::set_rec_per_key()
   */
   for (uint i=0 ; i < table_share->keys ; i++)
   {
+    bool is_unique_index= false;
     KEY* key_info= table->key_info + i;
     switch (get_index_type(i))
     {
-    case UNIQUE_ORDERED_INDEX:
-    case PRIMARY_KEY_ORDERED_INDEX:
     case UNIQUE_INDEX:
     case PRIMARY_KEY_INDEX:
     {
       // Index is unique when all 'key_parts' are specified,
       // else distribution is unknown and not specified here.
-      key_info->rec_per_key[key_info->key_parts-1]= 1;
+      is_unique_index= true;
       break;
     }
+    case UNIQUE_ORDERED_INDEX:
+    case PRIMARY_KEY_ORDERED_INDEX:
+      is_unique_index= true;
+      // intentional fall thru to logic for ordered index
     case ORDERED_INDEX:
       // 'Records pr. key' are unknown for non-unique indexes.
       // (May change when we get better index statistics.)
@@ -1450,6 +1453,11 @@ void ha_ndbcluster::set_rec_per_key()
     default:
       DBUG_ASSERT(false);
     }
+    // set rows per key to 1 for complete key given for unique/primary index
+    if (is_unique_index)
+    {
+      key_info->rec_per_key[key_info->key_parts-1]= 1;
+    }
   }
   DBUG_VOID_RETURN;
 }
@@ -7409,7 +7417,17 @@ THR_LOCK_DATA **ha_ndbcluster::store_loc
     
     if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
       lock_type= TL_READ;
-    
+
+    /**
+     * We need locks on source table when
+     *   doing offline alter...
+     * In 5.1 this worked due to TL_WRITE_ALLOW_READ...
+     * but that has been removed in 5.5
+     * I simply add this to get it...
+     */
+    if (sql_command == SQLCOM_ALTER_TABLE)
+      lock_type = TL_WRITE;
+
     m_lock.type=lock_type;
   }
   *to++= &m_lock;

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-07-05 17:19:11 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2011-08-29 08:17:26 +0000
@@ -512,6 +512,7 @@ public class SessionImpl implements Sess
     public int deletePersistentAll(DomainTypeHandler<?> domainTypeHandler) {
         startAutoTransaction();
         Table storeTable = domainTypeHandler.getStoreTable();
+        String tableName = storeTable.getName();
         ScanOperation op = null;
         int count = 0;
         try {
@@ -521,7 +522,7 @@ public class SessionImpl implements Sess
             failAutoTransaction();
             // TODO add table name to the error message
             throw new ClusterJException(
-                    local.message("ERR_Select_Scan"), ex);
+                    local.message("ERR_Delete_All", tableName), ex);
         }
         endAutoTransaction();
         return count;
@@ -1218,7 +1219,7 @@ public class SessionImpl implements Sess
             return result;
         } catch (ClusterJException ex) {
             throw new ClusterJException(
-                    local.message("ERR_Unique_Scan", storeTable.getName(), storeIndex.getName()), ex);
+                    local.message("ERR_Unique_Index", storeTable.getName(), storeIndex.getName()), ex);
         }
     }
 
@@ -1234,7 +1235,7 @@ public class SessionImpl implements Sess
             return result;
         } catch (ClusterJException ex) {
             throw new ClusterJException(
-                    local.message("ERR_Select_Scan", storeTable), ex);
+                    local.message("ERR_Select", storeTable), ex);
         }
     }
 
@@ -1267,7 +1268,7 @@ public class SessionImpl implements Sess
            return result;
        } catch (ClusterJException ex) {
            throw new ClusterJException(
-                   local.message("ERR_Index_Delete", storeTable.getName(), storeIndex.getName()), ex);
+                   local.message("ERR_Unique_Index_Delete", storeTable.getName(), storeIndex.getName()), ex);
        }
    }
 

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties'
--- a/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2011-06-20 23:34:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2011-08-29 08:17:26 +0000
@@ -21,8 +21,6 @@ ERR_Create_Ndb:Failed to create Ndb for
 ERR_Get_NdbFilter:Failure getting NdbFilter.
 ERR_Get_NdbTable:Failure getting NdbTable for class {0}, table {1}. \
 Verify that the table is defined with ENGINE=NDB.
-ERR_Get_NdbDictionary:Failure getting NdbDictionary.
-ERR_Get_NdbIndex:Error getting NdbIndex for table {0} index {1} and {2}.
 ERR_NumericFormat:Property {0} with value {1} must be numeric.
 ERR_Unmatched_Methods:Unmatched method(s) {0}, {1}.
 ERR_Not_A_Member:The property {0} is not a member of {1}.
@@ -38,17 +36,6 @@ ERR_Get_Constructor:Cannot get Construct
 ERR_Annotate_Set_Method:Property {0}: Cannot annotate set methods with {1}.
 ERR_Primary_Field_Type:For class {0}, primary key column {1}: field type {2} is not supported.
 ERR_Primary_Column_Type:For class {0}, primary key column {1}: column type {2} is not supported.
-ERR_No_Primary_Key:For class {0}, there is no @PrimaryKey annotation \
-and no field annotated with PrimaryKey.
-ERR_Multiple_Primary_Key:For class {0}, there is no @PrimaryKey annotation \
-on the class but @PrimaryKey annotations on fields {1}.
-ERR_Compound_Primary_Key:For class {0}, compound primary key fields must \
-all be declared on the class and optionally declared on each field. \
-The class @PrimaryKey annotation has columns {1}. \
-The fields {2} are annotated as primary key fields.
-ERR_PrimaryKey_Column_And_Columns:For class {0}, the @PrimaryKey annotation \
-must contain either column or columns but not both. Column is {1} \
-and columns length is {2}.
 ERR_Value_Delegate:For field {0} column {1} valueDelegate {2}, error executing {3}.
 ERR_Filter_Value:Error performing filter operation for field {0} column {1} \
 valueDelegate {2}, value {3}.
@@ -62,21 +49,21 @@ ERR_Exception_On_Method:Exception on met
 ERR_Only_Parameters:Operation {0} is implemented only for parameters.
 ERR_Operation_Not_Supported:Operation {0} is not supported for {1}.
 ERR_Unsupported_Field_Type:Unsupported field type {0} for {1}.
-ERR_Index_Scan:Error executing getSelectIndexScanOperation on table {0} using index {1}.
-ERR_Index_Delete:Error executing getIndexDeleteOperation on table {0} using index {1}.
-ERR_Table_Scan:Error executing getSelectScanOperation on table {0}.
-ERR_Select_Scan:Error executing getSelectOperation on table {0}.
-ERR_Unique_Scan:Error executing getSelectUniqueOperation on table {0} using index {1}.
+ERR_Index_Scan:Error executing getIndexScanOperation on table {0} using index {1}.
+ERR_Unique_Index_Delete:Error executing getUniqueIndexDeleteOperation on table {0} using index {1}.
+ERR_Table_Scan:Error executing getTableScanOperation on table {0}.
+ERR_Select:Error executing getSelectOperation on table {0}.
+ERR_Unique_Index:Error executing getUniqueIndexOperation on table {0} using index {1}.
 ERR_Insert:Error executing getInsertOperation on table {0}.
 ERR_Update:Error executing getUpdateOperation on table {0}.
 ERR_Write:Error executing getWriteOperation on table {0}.
+ERR_Delete_All:Error deleting all on table {0}.
 ERR_Illegal_Scan_Type:Illegal scan type {0}.
 ERR_Session_Closed:The session has been closed; no operations can be performed.
 ERR_Create_Query:Parameter to CreateQuery must be ClusterJ DomainObject.
 ERR_Ndb_Start:Error starting NdbTransaction.
 ERR_Ndb_Commit:Error committing NdbTransaction.
 ERR_Next_Result_Illegal:Error: nextResult() returned an illegal value: {0}
-ERR_Select_Scan:Error on SelectScanOperation.
 ERR_Transaction_Must_Not_Be_Active_For_Method:A transaction must not be active \
 for method {0}.
 ERR_Transaction_Must_Be_Active_For_Method:A transaction must be active \

=== modified file 'storage/ndb/include/kernel/signaldata/QueryTree.hpp'
--- a/storage/ndb/include/kernel/signaldata/QueryTree.hpp	2011-05-04 11:45:33 +0000
+++ b/storage/ndb/include/kernel/signaldata/QueryTree.hpp	2011-09-01 11:46:45 +0000
@@ -271,9 +271,11 @@ struct QN_ScanIndexParameters
 {
   Uint32 len;
   Uint32 requestInfo;
-  Uint32 batchSize;    // (bytes << 16) | (rows)
+  Uint32 batchSize;    // (bytes << 11) | (rows)
   Uint32 resultData;   // Api connect ptr
   STATIC_CONST ( NodeSize = 4 );
+  // Number of bits for representing row count in 'batchSize'.
+  STATIC_CONST ( BatchRowBits = 11 );
 
   enum ScanIndexParamBits
   {

=== modified file 'storage/ndb/include/mgmapi/mgmapi_config_parameters.h'
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h	2011-09-02 07:40:42 +0000
@@ -195,6 +195,7 @@
 #define CFG_DB_INDEX_STAT_UPDATE_DELAY   626
 
 #define CFG_DB_MAX_DML_OPERATIONS_PER_TRANSACTION 627
+#define CFG_DB_MT_THREAD_CONFIG          628
 
 #define CFG_NODE_ARBIT_RANK           200
 #define CFG_NODE_ARBIT_DELAY          201

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-08-25 06:35:39 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-02 07:40:42 +0000
@@ -4326,15 +4326,17 @@ Dbspj::scanIndex_build(Build_context& ct
     treeNodePtr.p->m_info = &g_ScanIndexOpInfo;
     treeNodePtr.p->m_bits |= TreeNode::T_ATTR_INTERPRETED;
     treeNodePtr.p->m_bits |= TreeNode::T_NEED_REPORT_BATCH_COMPLETED;
-    treeNodePtr.p->m_batch_size = batchSize & 0xFFFF;
+    treeNodePtr.p->m_batch_size = 
+      batchSize & ~(0xFFFFFFFF << QN_ScanIndexParameters::BatchRowBits);
 
     ScanFragReq*dst=(ScanFragReq*)treeNodePtr.p->m_scanindex_data.m_scanFragReq;
     dst->senderData = treeNodePtr.i;
     dst->resultRef = reference();
     dst->resultData = treeNodePtr.i;
     dst->savePointId = ctx.m_savepointId;
-    dst->batch_size_rows  = batchSize & 0xFFFF;
-    dst->batch_size_bytes = batchSize >> 16;
+    dst->batch_size_rows  = 
+      batchSize & ~(0xFFFFFFFF << QN_ScanIndexParameters::BatchRowBits);
+    dst->batch_size_bytes = batchSize >> QN_ScanIndexParameters::BatchRowBits;
 
     Uint32 transId1 = requestPtr.p->m_transId[0];
     Uint32 transId2 = requestPtr.p->m_transId[1];
@@ -5020,12 +5022,13 @@ Dbspj::scanIndex_parent_batch_complete(S
    * When parent's batch is complete, we send our batch
    */
   const ScanFragReq * org = (const ScanFragReq*)data.m_scanFragReq;
-  ndbassert(org->batch_size_rows >= data.m_fragCount - data.m_frags_complete);
+  ndbrequire(org->batch_size_rows > 0);
 
   if (treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL)
   {
     jam();
-    data.m_parallelism = data.m_fragCount - data.m_frags_complete;
+    data.m_parallelism = MIN(data.m_fragCount - data.m_frags_complete, 
+                             org->batch_size_rows);
   }
   else if (data.m_firstExecution)
   {
@@ -5051,8 +5054,9 @@ Dbspj::scanIndex_parent_batch_complete(S
      * in the other direction is more costly).
      */
     Int32 parallelism = 
-      static_cast<Int32>(data.m_parallelismStat.getMean()
-                         - 2 * data.m_parallelismStat.getStdDev());
+      static_cast<Int32>(MIN(data.m_parallelismStat.getMean()
+                             - 2 * data.m_parallelismStat.getStdDev(),
+                             org->batch_size_rows));
 
     if (parallelism < 1)
     {
@@ -5117,17 +5121,9 @@ Dbspj::scanIndex_parent_batch_complete(S
 
   data.m_firstExecution = false;
 
-  if (treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL)
-  {
-    ndbrequire((data.m_frags_outstanding + data.m_frags_complete) ==
-               data.m_fragCount);
-  }
-  else
-  {
-    ndbrequire(static_cast<Uint32>(data.m_frags_outstanding + 
-                                   data.m_frags_complete) <=
-               data.m_fragCount);
-  }
+  ndbrequire(static_cast<Uint32>(data.m_frags_outstanding + 
+                                 data.m_frags_complete) <=
+             data.m_fragCount);
 
   data.m_batch_chunks = 1;
   requestPtr.p->m_cnt_active++;
@@ -5575,7 +5571,8 @@ Dbspj::scanIndex_execSCAN_NEXTREQ(Signal
         data.m_largestBatchBytes < org->batch_size_bytes/data.m_parallelism)
     {
       jam();
-      data.m_parallelism = data.m_fragCount - data.m_frags_complete;
+      data.m_parallelism = MIN(data.m_fragCount - data.m_frags_complete,
+                               org->batch_size_rows);
       if (data.m_largestBatchRows > 0)
       {
         jam();
@@ -5624,7 +5621,8 @@ Dbspj::scanIndex_execSCAN_NEXTREQ(Signal
   else
   {
     jam();
-    data.m_parallelism = data.m_fragCount - data.m_frags_complete;
+    data.m_parallelism = MIN(data.m_fragCount - data.m_frags_complete,
+                             org->batch_size_rows);
   }
 
   const Uint32 bs_rows = org->batch_size_rows/data.m_parallelism;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp	2011-09-01 18:42:31 +0000
@@ -34,7 +34,9 @@ public:
   static Uint32 getNullFlagOffset(const Uint32 &);
   static Uint32 getNullFlagByteOffset(const Uint32 & desc);
   static Uint32 getNullFlagBitOffset(const Uint32 &);
-  
+
+  static Uint32 getMaxOffset();
+
   Uint32 m_data;
 
   friend class NdbOut& operator<<(class NdbOut&, const AttributeOffset&);
@@ -143,6 +145,13 @@ AttributeOffset::getNullFlagBitOffset(co
   return (getNullFlagPos(desc) & AO_NULL_FLAG_WORD_MASK);
 }
 
+inline
+Uint32
+AttributeOffset::getMaxOffset()
+{
+  return AO_ATTRIBUTE_OFFSET_MASK;
+}
+
 class NdbOut&
 operator<<(class NdbOut&, const AttributeOffset&);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-06-30 12:19:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-09-02 07:40:42 +0000
@@ -2537,7 +2537,7 @@ private:
   void handleCharsetPos(Uint32 csNumber, CHARSET_INFO** charsetArray,
                         Uint32 noOfCharsets,
                         Uint32 & charsetIndex, Uint32 & attrDes2);
-  void computeTableMetaData(Tablerec *regTabPtr);
+  Uint32 computeTableMetaData(Tablerec *regTabPtr);
 
 //------------------------------------------------------------------
 //------------------------------------------------------------------

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-09-02 07:40:42 +0000
@@ -434,7 +434,12 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal*
   }
 
   /* Compute table aggregate metadata. */
-  computeTableMetaData(regTabPtr.p);
+  terrorCode = computeTableMetaData(regTabPtr.p);
+  if (terrorCode)
+  {
+    jam();
+    goto error;
+  }
 
 #if 0
   ndbout << *regTabPtr.p << endl;
@@ -1453,7 +1458,7 @@ Dbtup::handleCharsetPos(Uint32 csNumber,
   This function (re-)computes aggregated metadata. It is called for
   both ALTER TABLE and CREATE TABLE.
  */
-void
+Uint32
 Dbtup::computeTableMetaData(Tablerec *regTabPtr)
 {
   Uint32 dyn_null_words[2];
@@ -1610,6 +1615,10 @@ Dbtup::computeTableMetaData(Tablerec *re
         BitmaskImpl::set(dyn_null_words[ind], regTabPtr->dynVarSizeMask[ind], null_pos);
       }
     }
+    if (off > AttributeOffset::getMaxOffset())
+    {
+      return ZTOO_LARGE_TUPLE_ERROR;
+    }
     AttributeOffset::setOffset(attrDes2, off);
     *tabDesc++= attrDes2;
   }
@@ -1681,6 +1690,7 @@ Dbtup::computeTableMetaData(Tablerec *re
 
   setUpQueryRoutines(regTabPtr);
   setUpKeyArray(regTabPtr);
+  return 0;
 }
 
 void

=== modified file 'storage/ndb/src/kernel/ndbd.cpp'
--- a/storage/ndb/src/kernel/ndbd.cpp	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/kernel/ndbd.cpp	2011-08-30 12:00:48 +0000
@@ -297,69 +297,45 @@ get_multithreaded_config(EmulatorData& e
 {
   // multithreaded is compiled in ndbd/ndbmtd for now
   globalData.isNdbMt = SimulatedBlock::isMultiThreaded();
-  if (!globalData.isNdbMt) {
+  if (!globalData.isNdbMt)
+  {
     ndbout << "NDBMT: non-mt" << endl;
     return 0;
   }
 
-  ndb_mgm_configuration * conf = ed.theConfiguration->getClusterConfig();
-  if (conf == 0)
-  {
-    abort();
-  }
-
-  ndb_mgm_configuration_iterator * p =
-    ndb_mgm_create_configuration_iterator(conf, CFG_SECTION_NODE);
-  if (ndb_mgm_find(p, CFG_NODE_ID, globalData.ownId))
-  {
-    abort();
-  }
+  THRConfig & conf = ed.theConfiguration->m_thr_config;
 
-  Uint32 mtthreads = 0;
-  ndb_mgm_get_int_parameter(p, CFG_DB_MT_THREADS, &mtthreads);
-  ndbout << "NDBMT: MaxNoOfExecutionThreads=" << mtthreads << endl;
+  Uint32 threadcount = conf.getThreadCount();
+  ndbout << "NDBMT: MaxNoOfExecutionThreads=" << threadcount << endl;
 
   globalData.isNdbMtLqh = true;
 
   {
-    Uint32 classic = 0;
-    ndb_mgm_get_int_parameter(p, CFG_NDBMT_CLASSIC, &classic);
-    if (classic)
-      globalData.isNdbMtLqh = false;
-
-    const char* p = NdbEnv_GetEnv("NDB_MT_LQH", (char*)0, 0);
-    if (p != 0)
+    if (conf.getMtClassic())
     {
-      if (strstr(p, "NOPLEASE") != 0)
-        globalData.isNdbMtLqh = false;
-      else
-        globalData.isNdbMtLqh = true;
+      globalData.isNdbMtLqh = false;
     }
   }
 
   if (!globalData.isNdbMtLqh)
     return 0;
 
-  Uint32 threads = 0;
-  switch(mtthreads){
-  case 0:
-  case 1:
-  case 2:
-  case 3:
-    threads = 1; // TC + receiver + SUMA + LQH
-    break;
-  case 4:
-  case 5:
-  case 6:
-    threads = 2; // TC + receiver + SUMA + 2 * LQH
-    break;
-  default:
-    threads = 4; // TC + receiver + SUMA + 4 * LQH
-  }
-
-  ndb_mgm_get_int_parameter(p, CFG_NDBMT_LQH_THREADS, &threads);
+  Uint32 threads = conf.getThreadCount(THRConfig::T_LDM);
   Uint32 workers = threads;
-  ndb_mgm_get_int_parameter(p, CFG_NDBMT_LQH_WORKERS, &workers);
+  {
+    ndb_mgm_configuration * conf = ed.theConfiguration->getClusterConfig();
+    if (conf == 0)
+    {
+      abort();
+    }
+    ndb_mgm_configuration_iterator * p =
+      ndb_mgm_create_configuration_iterator(conf, CFG_SECTION_NODE);
+    if (ndb_mgm_find(p, CFG_NODE_ID, globalData.ownId))
+    {
+      abort();
+    }
+    ndb_mgm_get_int_parameter(p, CFG_NDBMT_LQH_WORKERS, &workers);
+  }
 
 #ifdef VM_TRACE
   // testing
@@ -368,9 +344,6 @@ get_multithreaded_config(EmulatorData& e
     p = NdbEnv_GetEnv("NDBMT_LQH_WORKERS", (char*)0, 0);
     if (p != 0)
       workers = atoi(p);
-    p = NdbEnv_GetEnv("NDBMT_LQH_THREADS", (char*)0, 0);
-    if (p != 0)
-      threads = atoi(p);
   }
 #endif
 
@@ -654,10 +627,11 @@ ndbd_run(bool foreground, int report_fd,
     // Ignore error
   }
 
+  theConfig->setupConfiguration();
+
   if (get_multithreaded_config(globalEmulatorData))
     ndbd_exit(-1);
 
-  theConfig->setupConfiguration();
   systemInfo(* theConfig, * theConfig->m_logLevel);
 
   NdbThread* pWatchdog = globalEmulatorData.theWatchDog->doStart();

=== modified file 'storage/ndb/src/kernel/vm/CMakeLists.txt'
--- a/storage/ndb/src/kernel/vm/CMakeLists.txt	2011-08-27 13:06:42 +0000
+++ b/storage/ndb/src/kernel/vm/CMakeLists.txt	2011-09-02 07:40:42 +0000
@@ -38,6 +38,7 @@ ADD_LIBRARY(ndbkernel STATIC
     Ndbinfo.cpp
     NdbinfoTables.cpp
     ArenaPool.cpp
+    mt_thr_config.cpp
 )
 
 ADD_LIBRARY(ndbsched STATIC

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2011-08-27 13:06:42 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2011-09-02 07:40:42 +0000
@@ -33,6 +33,7 @@
 #include <kernel_config_parameters.h>
 
 #include <util/ConfigValues.hpp>
+#include <NdbEnv.h>
 
 #include <ndbapi_limits.h>
 
@@ -392,7 +393,77 @@ Configuration::setupConfiguration(){
     t = globalEmulatorData.theWatchDog ->setCheckInterval(t);
     _timeBetweenWatchDogCheckInitial = t;
   }
-  
+
+  const char * thrconfigstring = NdbEnv_GetEnv("NDB_MT_THREAD_CONFIG",
+                                               (char*)0, 0);
+  if (thrconfigstring ||
+      iter.get(CFG_DB_MT_THREAD_CONFIG, &thrconfigstring) == 0)
+  {
+    int res = m_thr_config.do_parse(thrconfigstring);
+    if (res != 0)
+    {
+      ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG,
+                "Invalid configuration fetched, invalid ThreadConfig",
+                m_thr_config.getErrorMessage());
+    }
+  }
+  else
+  {
+    const char * mask;
+    if (iter.get(CFG_DB_EXECUTE_LOCK_CPU, &mask) == 0)
+    {
+      int res = m_thr_config.setLockExecuteThreadToCPU(mask);
+      if (res < 0)
+      {
+        // Could not parse LockExecuteThreadToCPU mask
+        g_eventLogger->warning("Failed to parse 'LockExecuteThreadToCPU=%s' "
+                               "(error: %d), ignoring it!",
+                               mask, res);
+      }
+    }
+
+    Uint32 maintCPU = NO_LOCK_CPU;
+    iter.get(CFG_DB_MAINT_LOCK_CPU, &maintCPU);
+    if (maintCPU == 65535)
+      maintCPU = NO_LOCK_CPU; // Ignore old default(may come from old mgmd)
+    if (maintCPU != NO_LOCK_CPU)
+      m_thr_config.setLockMaintThreadsToCPU(maintCPU);
+
+    Uint32 mtthreads = 0;
+    iter.get(CFG_DB_MT_THREADS, &mtthreads);
+
+    Uint32 classic = 0;
+    iter.get(CFG_NDBMT_CLASSIC, &classic);
+    const char* p = NdbEnv_GetEnv("NDB_MT_LQH", (char*)0, 0);
+    if (p != 0)
+    {
+      if (strstr(p, "NOPLEASE") != 0)
+        classic = 1;
+    }
+
+    Uint32 lqhthreads = 0;
+    iter.get(CFG_NDBMT_LQH_THREADS, &lqhthreads);
+
+    int res = m_thr_config.do_parse(mtthreads, lqhthreads, classic);
+    if (res != 0)
+    {
+      ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG,
+                "Invalid configuration fetched, invalid thread configuration",
+                m_thr_config.getErrorMessage());
+    }
+  }
+  if (thrconfigstring)
+  {
+    ndbout_c("ThreadConfig: input: %s parsed: %s",
+             thrconfigstring,
+             m_thr_config.getConfigString());
+  }
+  else
+  {
+    ndbout_c("ThreadConfig (old ndb_mgmd): parsed: %s",
+             m_thr_config.getConfigString());
+  }
+
   ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config);
 
   if(m_clusterConfigIter)

=== modified file 'storage/ndb/src/kernel/vm/Configuration.hpp'
--- a/storage/ndb/src/kernel/vm/Configuration.hpp	2011-08-27 13:06:42 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.hpp	2011-09-02 07:40:42 +0000
@@ -27,6 +27,7 @@
 #include <NdbThread.h>
 #include <util/SparseBitmask.hpp>
 #include <util/UtilBuffer.hpp>
+#include "mt_thr_config.hpp"
 
 enum ThreadTypes
 {
@@ -124,6 +125,7 @@ public:
   ndb_mgm_configuration* getClusterConfig() const { return m_clusterConfig; }
   Uint32 get_config_generation() const; 
 
+  THRConfigApplier m_thr_config;
 private:
   friend class Cmvmi;
   friend class Qmgr;

=== modified file 'storage/ndb/src/kernel/vm/mt.cpp'
--- a/storage/ndb/src/kernel/vm/mt.cpp	2011-08-27 13:06:42 +0000
+++ b/storage/ndb/src/kernel/vm/mt.cpp	2011-09-02 07:40:42 +0000
@@ -3251,28 +3251,11 @@ sendprioa_STOP_FOR_CRASH(const struct th
   */
   static thr_job_buffer dummy_buffer;
 
-  /*
-   * Before we had three main threads with fixed block assignment.
-   * Now there is also worker instances (we send to LQH instance).
+  /**
+   * Pick any instance running in this thread
    */
-  Uint32 main = 0;
-  Uint32 instance = 0;
-  if (dst == 0)
-    main = NDBCNTR;
-  else if (dst == 1)
-    main = DBLQH;
-  else if (dst >= NUM_MAIN_THREADS && dst < NUM_MAIN_THREADS + num_lqh_threads)
-  {
-    main = DBLQH;
-    instance = dst - NUM_MAIN_THREADS + 1;
-  }
-  else if (dst == receiver_thread_no)
-    main = CMVMI;
-  else
-    require(false);
-  Uint32 bno = numberToBlock(main, instance);
-  require(block2ThreadId(main, instance) == dst);
   struct thr_data * dstptr = rep->m_thread + dst;
+  Uint32 bno = dstptr->m_instance_list[0];
 
   memset(&signalT.header, 0, sizeof(SignalHeader));
   signalT.header.theVerId_signalNumber   = GSN_STOP_FOR_CRASH;

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.cpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2011-08-26 09:57:03 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.cpp	2011-08-30 14:13:15 +0000
@@ -500,6 +500,34 @@ THRConfig::getConfigString()
   return m_cfg_string.c_str();
 }
 
+Uint32
+THRConfig::getThreadCount() const
+{
+  // Note! not counting T_MAINT
+  Uint32 cnt = 0;
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(m_threads); i++)
+  {
+    if (i != T_MAINT)
+    {
+      cnt += m_threads[i].size();
+    }
+  }
+  return cnt;
+}
+
+Uint32
+THRConfig::getThreadCount(T_Type type) const
+{
+  for (Uint32 i = 0; i < NDB_ARRAY_SIZE(m_threads); i++)
+  {
+    if (i == (Uint32)type)
+    {
+      return m_threads[i].size();
+    }
+  }
+  return 0;
+}
+
 const char *
 THRConfig::getErrorMessage() const
 {
@@ -516,7 +544,7 @@ static
 char *
 skipblank(char * str)
 {
-  while (isblank(* str))
+  while (isspace(* str))
     str++;
   return str;
 }
@@ -564,7 +592,7 @@ parseUnsigned(char *& str, unsigned * ds
   str = skipblank(str);
   char * endptr = 0;
   errno = 0;
-  long val = strtoll(str, &endptr, 0);
+  long val = strtol(str, &endptr, 0);
   if (errno == ERANGE)
     return -1;
   if (val < 0 || Int64(val) > 0xFFFFFFFF)
@@ -585,7 +613,7 @@ parseBitmask(char *& str, SparseBitmask
   if (len == 0)
     return -1;
 
-  while (isblank(str[len-1]))
+  while (isspace(str[len-1]))
     len--;
   if (str[len-1] == ',')
     len--;
@@ -830,7 +858,7 @@ THRConfig::do_parse(const char * ThreadC
 unsigned
 THRConfig::createCpuSet(const SparseBitmask& mask)
 {
-  for (size_t i = 0; i < m_cpu_sets.size(); i++)
+  for (unsigned i = 0; i < m_cpu_sets.size(); i++)
     if (m_cpu_sets[i].equal(mask))
       return i;
 

=== modified file 'storage/ndb/src/kernel/vm/mt_thr_config.hpp'
--- a/storage/ndb/src/kernel/vm/mt_thr_config.hpp	2011-08-26 09:57:03 +0000
+++ b/storage/ndb/src/kernel/vm/mt_thr_config.hpp	2011-08-30 12:00:48 +0000
@@ -64,6 +64,9 @@ public:
   const char * getErrorMessage() const;
   const char * getInfoMessage() const;
 
+  Uint32 getThreadCount() const; // Don't count FS/MAINT thread
+  Uint32 getThreadCount(T_Type) const;
+  Uint32 getMtClassic() const { return m_classic; }
 private:
   struct T_Thread
   {

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-07-04 16:30:34 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-09-01 13:09:24 +0000
@@ -760,7 +760,7 @@ MgmtSrvr::get_packed_config_from_node(No
 
   if (getNodeType(nodeId) != NDB_MGM_NODE_TYPE_NDB)
   {
-    error.assfmt("Node %d is not an NDB node. ", nodeId);
+    error.assfmt("Node %d is not a data node. ", nodeId);
     DBUG_RETURN(false);
   }
 

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2011-09-02 07:40:42 +0000
@@ -623,12 +623,12 @@ MgmApiSession::getConfig(Parser_t::Conte
 
   UtilBuffer packed;
 
-  bool success =
-   (from_node == 0 || from_node == m_mgmsrv.getOwnNodeId()) ?
-                m_mgmsrv.get_packed_config((ndb_mgm_node_type)nodetype,
-                                           pack64, error) :
-                m_mgmsrv.get_packed_config_from_node(from_node,
-                                                     pack64, error);
+  bool success = (from_node > 0) ?
+                 m_mgmsrv.get_packed_config_from_node(from_node,
+                                                      pack64, error) :
+                 m_mgmsrv.get_packed_config((ndb_mgm_node_type)nodetype,
+                                            pack64, error);
+
   if (!success)
   {
     m_output->println("result: %s", error.c_str());

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-08-25 06:35:39 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-09-02 07:40:42 +0000
@@ -47,7 +47,8 @@
  */
 #define UNUSED(x) ((void)(x))
 
-//#define TEST_NEXTREQ
+// To force usage of SCAN_NEXTREQ even for small scans resultsets
+static const bool testNextReq = false;
 
 /* Various error codes that are not specific to NdbQuery. */
 static const int Err_TupleNotFound = 626;
@@ -4255,18 +4256,11 @@ NdbQueryOperationImpl
   if (myClosestScan != NULL)
   {
 
-#ifdef TEST_NEXTREQ
     // To force usage of SCAN_NEXTREQ even for small scans resultsets
-    if (this == &getRoot())
+    if (testNextReq)
     {
       m_maxBatchRows = 1;
     }
-    else
-    {
-      m_maxBatchRows =
-        myClosestScan->getQueryOperationDef().getTable().getFragmentCount();
-    }
-#endif
 
     const Ndb& ndb = *getQuery().getNdbTransaction().getNdb();
 
@@ -4312,14 +4306,6 @@ NdbQueryOperationImpl
   
   if (m_operationDef.isScanOperation())
   {
-    if (myClosestScan != &getRoot())
-    {
-      /** Each SPJ block instance will scan each fragment, so the batch size
-       * cannot be smaller than the number of fragments.*/
-      maxBatchRows = 
-        MAX(maxBatchRows, myClosestScan->getQueryOperationDef().
-            getTable().getFragmentCount());
-    }
     // Use this value for current op and all lookup descendants.
     m_maxBatchRows = maxBatchRows;
     // Return max(Unit32) to avoid interfering with batch size calculation 
@@ -4478,16 +4464,20 @@ NdbQueryOperationImpl::prepareAttrInfo(U
                                       batchRows,
                                       batchByteSize,
                                       firstBatchRows);
-    assert(batchRows==getMaxBatchRows());
-    assert(batchRows==firstBatchRows);
+    assert(batchRows == firstBatchRows);
+    assert(batchRows == getMaxBatchRows());
     assert(m_parallelism == Parallelism_max ||
            m_parallelism == Parallelism_adaptive);
     if (m_parallelism == Parallelism_max)
     {
       requestInfo |= QN_ScanIndexParameters::SIP_PARALLEL;
     }
-    param->requestInfo = requestInfo; 
-    param->batchSize = ((Uint16)batchByteSize << 16) | (Uint16)firstBatchRows;
+    param->requestInfo = requestInfo;
+    // Check that both values fit in param->batchSize.
+    assert(getMaxBatchRows() < (1<<QN_ScanIndexParameters::BatchRowBits));
+    assert(batchByteSize < (1 << (sizeof param->batchSize * 8
+                                  - QN_ScanIndexParameters::BatchRowBits)));
+    param->batchSize = (batchByteSize << 11) | getMaxBatchRows();
     param->resultData = getIdOfReceiver();
     QueryNodeParameters::setOpLen(param->len, paramType, length);
   }

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-09-02 07:40:42 +0000
@@ -290,7 +290,8 @@ NdbTransaction::execute(ExecType aTypeOf
 			NdbOperation::AbortOption abortOption,
 			int forceSend)
 {
-  NdbError savedError= theError;
+  NdbError existingTransError = theError;
+  NdbError firstTransError;
   DBUG_ENTER("NdbTransaction::execute");
   DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d", 
 		       aTypeOfExec, abortOption));
@@ -374,8 +375,8 @@ NdbTransaction::execute(ExecType aTypeOf
           if (tBlob->preExecute(tExecType, batch) == -1)
 	  {
             ret = -1;
-	    if(savedError.code==0)
-	      savedError= theError;
+	    if (firstTransError.code==0)
+	      firstTransError= theError;
 	  }
           tBlob = tBlob->theNext;
         }
@@ -413,8 +414,8 @@ NdbTransaction::execute(ExecType aTypeOf
             if (tBlob->preCommit() == -1)
 	    {
 	      ret = -1;
-	      if(savedError.code==0)
-		savedError= theError;
+	      if (firstTransError.code==0)
+		firstTransError= theError;
 	    }
             tBlob = tBlob->theNext;
           }
@@ -440,8 +441,6 @@ NdbTransaction::execute(ExecType aTypeOf
 		       NdbOperation::DefaultAbortOption,
 		       forceSend) == -1)
     {
-      if(savedError.code==0)
-	savedError= theError;
       /**
        * We abort the execute here. But we still need to put the split-off
        * operation list back into the transaction object, or we will get a
@@ -463,9 +462,13 @@ NdbTransaction::execute(ExecType aTypeOf
           theCompletedLastOp = tCompletedLastOp;
       }
 
+      /* executeNoBlobs will have set transaction error */
       DBUG_RETURN(-1);
     }
 
+    /* Capture any trans error left by the execute() in case it gets trampled */
+    if (firstTransError.code==0)
+      firstTransError= theError;
 
 #ifdef ndb_api_crash_on_complex_blob_abort
     assert(theFirstOpInList == NULL && theLastOpInList == NULL);
@@ -483,8 +486,8 @@ NdbTransaction::execute(ExecType aTypeOf
             if (tBlob->postExecute(tExecType) == -1)
 	    {
               ret = -1;
-	      if(savedError.code==0)
-		savedError= theError;
+	      if (firstTransError.code==0)
+		firstTransError= theError;
 	    }
             tBlob = tBlob->theNext;
           }
@@ -520,8 +523,37 @@ NdbTransaction::execute(ExecType aTypeOf
   }
 #endif
 
-  if(savedError.code!=0 && theError.code==4350) // Trans already aborted
-      theError= savedError;
+  /* Sometimes the original error is trampled by 'Trans already aborted',
+   * detect this case and attempt to restore the original error
+   */
+  if (theError.code == 4350) // Trans already aborted
+  {
+    DBUG_PRINT("info", ("Trans already aborted, existingTransError.code %u, "
+                        "firstTransError.code %u",
+                        existingTransError.code,
+                        firstTransError.code));
+    if (existingTransError.code != 0)
+    {
+      theError = existingTransError;
+    }
+    else if (firstTransError.code != 0)
+    {
+      theError = firstTransError;
+    }
+  }
+
+  /* Generally return the first error which we encountered as
+   * the Trans error.  Caller can traverse the op list to
+   * get the full picture
+   */
+  if (firstTransError.code != 0)
+  {
+    DBUG_PRINT("info", ("Setting error to first error.  firstTransError.code = %u, "
+                        "theError.code = %u",
+                        firstTransError.code,
+                        theError.code));
+    theError = firstTransError;
+  }
 
   DBUG_RETURN(ret);
 }

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-09-02 07:40:42 +0000
@@ -324,6 +324,8 @@ ErrorBundle ErrorCodes[] = {
   { 829,  DMEC, AE, "Corrupt data received for insert/update" },
   { 831,  DMEC, AE, "Too many nullable/bitfields in table definition" },
   { 850,  DMEC, AE, "Too long or too short default value"},
+  { 851,  DMEC, AE, "Maximum 8052 bytes of FIXED columns supported"
+    ", use varchar or COLUMN_FORMAT DYNMIC instead" },
   { 876,  DMEC, AE, "876" },
   { 877,  DMEC, AE, "877" },
   { 878,  DMEC, AE, "878" },

=== modified file 'storage/ndb/test/ndbapi/testBlobs.cpp'
--- a/storage/ndb/test/ndbapi/testBlobs.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/ndbapi/testBlobs.cpp	2011-09-01 15:12:11 +0000
@@ -184,6 +184,7 @@ printusage()
     << "  -bug 27370  Potential inconsistent blob reads for ReadCommitted reads" << endl
     << "  -bug 36756  Handling execute(.., abortOption) and Blobs " << endl
     << "  -bug 45768  execute(Commit) after failing blob batch " << endl
+    << "  -bug 62321  Blob obscures ignored error codes in batch" << endl
     ;
 }
 
@@ -3860,6 +3861,124 @@ static int bugtest_48040()
 }
 
 
+static int bugtest_62321()
+{
+  /* Having a Blob operation in a batch with other operations
+   * causes the other operation's ignored error not to be
+   * set as the transaction error code after execution.
+   * This is used (e.g in MySQLD) to check for conflicts
+   */
+  DBG("bugtest_62321 : Error code from other ops in batch obscured");
+
+  /*
+     1) Setup table : 1 row exists, another doesnt
+     2) Start transaction
+     3) Define failing before op
+     4) Define Blob op with/without post-exec part
+     5) Define failing after op
+     6) Execute
+     7) Check results
+  */
+  calcTups(true);
+
+  /* Setup table */
+  Tup& tupExists = g_tups[0];
+  Tup& notExists = g_tups[1];
+  {
+    CHK((g_con= g_ndb->startTransaction()) != 0);
+    CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
+    CHK(g_opr->insertTuple() == 0);
+    CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
+    if (g_opt.m_pk2chr.m_len != 0)
+    {
+      CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
+      CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
+    }
+    setUDpartId(tupExists, g_opr);
+    CHK(getBlobHandles(g_opr) == 0);
+
+    CHK(setBlobValue(tupExists) == 0);
+
+    CHK(g_con->execute(Commit) == 0);
+    g_con->close();
+  }
+
+  for (int scenario = 0; scenario < 4; scenario++)
+  {
+    DBG(" Scenario : " << scenario);
+    CHK((g_con= g_ndb->startTransaction()) != 0);
+    NdbOperation* failOp = NULL;
+    if ((scenario & 0x1) == 0)
+    {
+      DBG("  Fail op before");
+      /* Define failing op in batch before Blob op */
+      failOp= g_con->getNdbOperation(g_opt.m_tname);
+      CHK(failOp != 0);
+      CHK(failOp->readTuple() == 0);
+      CHK(failOp->equal("PK1", notExists.m_pk1) == 0);
+      if (g_opt.m_pk2chr.m_len != 0)
+      {
+        CHK(failOp->equal("PK2", notExists.m_pk2) == 0);
+        CHK(failOp->equal("PK3", notExists.m_pk3) == 0);
+      }
+      setUDpartId(notExists, failOp);
+      CHK(failOp->getValue("PK1") != 0);
+      CHK(failOp->setAbortOption(NdbOperation::AO_IgnoreError) == 0);
+    }
+
+    /* Now define successful Blob op */
+    CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
+    CHK(g_opr->readTuple() == 0);
+    CHK(g_opr->equal("PK1", tupExists.m_pk1) == 0);
+    if (g_opt.m_pk2chr.m_len != 0)
+    {
+      CHK(g_opr->equal("PK2", tupExists.m_pk2) == 0);
+      CHK(g_opr->equal("PK3", tupExists.m_pk3) == 0);
+    }
+    setUDpartId(tupExists, g_opr);
+    CHK(getBlobHandles(g_opr) == 0);
+
+    CHK(getBlobValue(tupExists) == 0);
+
+
+    /* Define failing batch op after Blob op if not defined before */
+    if (failOp == 0)
+    {
+      DBG("  Fail op after");
+      failOp= g_con->getNdbOperation(g_opt.m_tname);
+      CHK(failOp != 0);
+      CHK(failOp->readTuple() == 0);
+      CHK(failOp->equal("PK1", notExists.m_pk1) == 0);
+      if (g_opt.m_pk2chr.m_len != 0)
+      {
+        CHK(failOp->equal("PK2", notExists.m_pk2) == 0);
+        CHK(failOp->equal("PK3", notExists.m_pk3) == 0);
+      }
+      setUDpartId(notExists, failOp);
+      CHK(failOp->getValue("PK1") != 0);
+      CHK(failOp->setAbortOption(NdbOperation::AO_IgnoreError) == 0);
+    }
+
+    /* Now execute and check rc etc */
+    NdbTransaction::ExecType et = (scenario & 0x2) ?
+      NdbTransaction::NoCommit:
+      NdbTransaction::Commit;
+
+    DBG("  Executing with execType = " << ((et == NdbTransaction::NoCommit)?
+                                           "NoCommit":"Commit"));
+    int rc = g_con->execute(NdbTransaction::NoCommit);
+
+    CHK(rc == 0);
+    CHK(g_con->getNdbError().code == 626);
+    CHK(failOp->getNdbError().code == 626);
+    CHK(g_opr->getNdbError().code == 0);
+    DBG("  Error code on transaction as expected");
+
+    g_con->close();
+  }
+
+  return 0;
+}
 
 // main
 
@@ -4824,7 +4943,8 @@ static struct {
   { 36756, bugtest_36756 },
   { 45768, bugtest_45768 },
   { 48040, bugtest_48040 },
-  { 28116, bugtest_28116 }
+  { 28116, bugtest_28116 },
+  { 62321, bugtest_62321 }
 };
 
 NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-06-30 12:19:14 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-09-02 07:40:42 +0000
@@ -1740,3 +1740,8 @@ max-time: 300
 cmd: testIndexStat
 args:
 
+max-time: 300
+cmd: testBlobs
+args: -bug 62321 -skip p
+
+

=== modified file 'storage/ndb/tools/ndb_config.cpp'
--- a/storage/ndb/tools/ndb_config.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/tools/ndb_config.cpp	2011-09-02 07:40:42 +0000
@@ -21,11 +21,15 @@
  * in xml format (--xml).
  *
  * Config can be retrieved from only one of the following sources:
- ** config stored at mgmd (default. The options --config_from_node=0,
- ** or --config_from_node=1 also give the same results.)
- ** config stored at a data node (--config_from_node)
- ** my.cnf (--mycnf=<fullPath/mycnfFileName>)
- ** config.file  (--config_file=<fullPath/configFileName>
+ ** 1) config stored at mgmd (default)
+ ** 2) config stored at a data node (--config_from_node=<data node id>)
+ *** (Note:
+ ***  Node numbers less than 1 give error:
+ ***  "Given value <node id> is not a valid node number." 
+ ***  Non-data node numbers give error:
+ ***  "Node <node id> is not a data node.")
+ ** 3) my.cnf (--mycnf=<fullPath/mycnfFileName>)
+ ** 4) config.file (--config_file=<fullPath/configFileName>
  *
  * Config variables are displayed from only one of the following
  * sections of the retrieved config:
@@ -53,7 +57,7 @@
  ** ndb_config --config_from_node=2 --connections --query=type
  ** ndb_config --config_from_node=2 --query=id,NoOfFragmentLogFiles
  *
- ** Display results for only node 2:
+ **  Get config from eg. node 2 and display results for node 2 only:
  *** ndb_config --config_from_node=2 --query=id,NoOfFragmentLogFiles --nodeid=2
  */
 
@@ -139,7 +143,7 @@ static struct my_option my_long_options[
     0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
   { "config_from_node", NDB_OPT_NOSHORT, "Use current config from node with given nodeid",
     (uchar**) &g_config_from_node, (uchar**) &g_config_from_node,
-    0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+    0, GET_INT, REQUIRED_ARG, INT_MIN, INT_MIN, 0, 0, 0, 0},
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 
@@ -224,7 +228,7 @@ main(int argc, char** argv){
   }
 
   if ((g_nodes && g_connections) ||
-       g_system && (g_nodes || g_connections))
+      (g_system && (g_nodes || g_connections)))
   {
     fprintf(stderr,
 	    "Error: Only one of the section-options: --nodes, --connections, --system is allowed.\n");
@@ -237,7 +241,7 @@ main(int argc, char** argv){
    */
 
   if ((g_config_file && g_mycnf) ||
-       g_config_from_node && (g_config_file || g_mycnf))
+      ((g_config_from_node != INT_MIN) && (g_config_file || g_mycnf)))
   {
     fprintf(stderr,
 	    "Error: Config should be retrieved from only one of the following sources:\n");
@@ -602,13 +606,23 @@ fetch_configuration(int from_node)
 	    ndb_mgm_get_connected_host(mgm),
 	    ndb_mgm_get_connected_port(mgm));
   }
-	  
-  if (from_node > 1)
+	 
+  if (from_node == INT_MIN)
   {
-    conf = ndb_mgm_get_configuration_from_node(mgm, from_node);
+    // from_node option is not requested.
+    // Retrieve config from the default src: mgmd
+    conf = ndb_mgm_get_configuration(mgm, 0);
+  }
+  else if (from_node < 1)
+  {
+    fprintf(stderr, "Invalid node number %d is given for --config_from_node.\n", from_node);
+    goto noconnect;
   }
   else
-     conf = ndb_mgm_get_configuration(mgm, 0);
+  {
+    // Retrieve config from the given data node
+     conf = ndb_mgm_get_configuration_from_node(mgm, from_node);
+  }
 
   if(conf == 0)
   {

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster branch (jonas.oreland:3453 to 3455) jonas oreland2 Sep