List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:May 8 2012 9:24am
Subject:bzr push into mysql-5.5-cluster-7.3 branch (magnus.blaudd:3886 to 3887)
View as plain text  
 3887 magnus.blaudd@stripped	2012-05-08 [merge]
      Merge 7.2 -> 7.3

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown_default.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.inc
      storage/ndb/src/dummy.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
 3886 magnus.blaudd@stripped	2012-05-07 [merge]
      Merge 7.2 -> 7.3

    modified:
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Operation.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordTableScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordUniqueKeyOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/OperationImpl.java
      storage/ndb/include/kernel/signaldata/ScanFrag.hpp
      storage/ndb/include/util/SocketAuthenticator.hpp
      storage/ndb/src/common/transporter/Transporter.cpp
      storage/ndb/src/common/transporter/Transporter.hpp
      storage/ndb/src/common/util/SocketAuthenticator.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/test/include/HugoTransactions.hpp
      storage/ndb/test/include/NdbTimer.hpp
      storage/ndb/test/ndbapi/testUpgrade.cpp
      storage/ndb/test/src/HugoTransactions.cpp
      storage/ndb/test/tools/connect.cpp
=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown_default.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-05-03 10:42:49 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-05-08 08:03:29 +0000
@@ -12,6 +12,7 @@ where variable_name in 
 'Ndb_pushed_queries_defined',
 'Ndb_pushed_queries_dropped',
 'Ndb_pushed_queries_executed');
+set @save_debug = @@global.debug;
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 create table t1 (
@@ -2105,6 +2106,7 @@ PRIMARY KEY (`a`)
 delete from t1;
 insert into tx values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
 insert into t1 select 1, x1.a * 10+x2.a, 1, 1 from tx as x1 cross join tx as x2;
+set global debug='+d,max_64rows_in_spj_batches';
 explain select count(*) from t1 as x1
 join t1 as x2 on x2.a = x1.c and x1.b < 2 
 join t1 as x3 on x3.a = x1.d;
@@ -2117,6 +2119,7 @@ join t1 as x2 on x2.a = x1.c and x1.b < 
 join t1 as x3 on x3.a = x1.d;
 count(*)
 20000
+set global debug='-d,max_64rows_in_spj_batches';
 drop table t1;
 drop table tx;
 create table t1 (
@@ -3936,6 +3939,7 @@ k	b	k	b	k	b	k	b
 3	1	1	1	1	1	1	1
 4	1	1	1	1	1	1	1
 drop table t1;
+set global debug='+d,max_64rows_in_spj_batches';
 create table t1 (
 a int not null auto_increment,
 b char(255) not null,
@@ -4013,6 +4017,7 @@ join t1 as t3 on t3.a = t2.c and t3.b = 
 count(*)
 8990
 drop table t1;
+set global debug='-d,max_64rows_in_spj_batches';
 create logfile group lg1
 add undofile 'undofile.dat'
 initial_size 1m
@@ -4461,6 +4466,7 @@ insert into t1 values (0,-1), (1,-1), (2
 (118,-1), (119,-1), (120,-1), (121,-1), (122,-1), (123,-1), (124,-1), 
 (125,-1), (126,-1), (127,-1), (128,-1), (129,-1), (130,-1), (131,-1), 
 (132,-1), (133,-1), (134,-1), (135,-1), (136,-1), (137,-1), (138,-1), (139,-1);
+set global debug='+d,max_64rows_in_spj_batches';
 explain extended select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
 1	SIMPLE	x	index	NULL	PRIMARY	4	NULL	140	100.00	Parent of 2 pushed join@1
@@ -4469,6 +4475,7 @@ Warnings:
 Note	1003	select `test`.`x`.`pk` AS `pk`,`test`.`x`.`u` AS `u`,`test`.`y`.`pk` AS `pk`,`test`.`y`.`u` AS `u` from `test`.`t1` `x` join `test`.`t1` `y` where (`test`.`y`.`pk` = `test`.`x`.`u`) order by `test`.`x`.`pk`
 select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 pk	u	pk	u
+set global debug='-d,max_64rows_in_spj_batches';
 drop table t1;
 create table t1 (pk int primary key, u int not null, a int, b int) engine=ndb;
 create index ix1 on t1(b,a);
@@ -4594,6 +4601,7 @@ join t1 as x2 on x1.a=x2.b
 where x1.pk = 1 or x1.u=1;
 count(*)
 13
+set global debug='+d,max_4rows_in_spj_batches';
 set ndb_join_pushdown=on;
 explain extended
 select straight_join * from t1 as table1
@@ -4638,6 +4646,7 @@ pk	u	a	b	pk	u	a	b	pk	u	a	b
 7	7	10	10	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 8	8	10	10	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 9	9	10	10	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
+set global debug='-d,max_4rows_in_spj_batches';
 explain extended select straight_join * from t1 as x1 
 inner join t1 as x2 on x2.b = x1.a;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
@@ -4721,6 +4730,7 @@ id	select_type	table	type	possible_keys	
 1	SIMPLE	x7	ref	ix1	ix1	5	test.x2.b	2	100.00	Child of 'x1' in pushed join@1; Using where
 Warnings:
 Note	1003	select straight_join count(0) AS `count(*)` from `test`.`t1` `x1` join `test`.`t1` `x2` join `test`.`t1` `x3` join `test`.`t1` `x4` join `test`.`t1` `x5` join `test`.`t1` `x6` join `test`.`t1` `x7` where ((`test`.`x2`.`b` = `test`.`x1`.`a`) and (`test`.`x3`.`b` = `test`.`x1`.`a`) and (`test`.`x4`.`b` = `test`.`x1`.`a`) and (`test`.`x5`.`b` = `test`.`x1`.`a`) and (`test`.`x6`.`b` = `test`.`x1`.`a`) and (`test`.`x7`.`b` = `test`.`x1`.`a`) and (`test`.`x3`.`a` < `test`.`x2`.`pk`) and (`test`.`x4`.`a` < `test`.`x3`.`pk`))
+set global debug='+d,max_64rows_in_spj_batches';
 select straight_join count(*) from t1 as x1 
 join t1 as x2 on x2.b = x1.a
 join t1 as x3 on x3.b = x1.a
@@ -4731,6 +4741,7 @@ join t1 as x7 on x7.b = x1.a
 where x3.a < x2.pk and x4.a < x3.pk;
 count(*)
 632736
+set global debug='-d,max_64rows_in_spj_batches';
 explain extended select straight_join count(*) from t1 as x1 
 left join t1 as x2 on x2.b = x1.a
 join t1 as x3 on x3.b = x1.b;
@@ -4822,6 +4833,7 @@ count(*)
 update t1 set b=b-10;
 update t1 set u=u+100;
 set ndb_join_pushdown=on;
+set global debug='+d,max_64rows_in_spj_batches';
 explain extended select straight_join count(*) from 
 (t1 as x join t1 as y on y.b = x.a)
 left outer join t1 as z on z.u = x.a;
@@ -4836,6 +4848,7 @@ select straight_join count(*) from 
 left outer join t1 as z on z.u = x.a;
 count(*)
 156
+set global debug='-d,max_64rows_in_spj_batches';
 update t1 set u=u-100;
 drop index ix2 on t1;
 create unique index ix2 on t1(a,u);
@@ -4893,6 +4906,7 @@ PRIMARY KEY (`a`,`b`)
 ) ENGINE=ndbcluster;
 insert into t2 values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
 insert into t3 select 1, x1.a * 10+x2.a from t2 as x1 cross join t2 as x2;
+set global debug='+d,max_64rows_in_spj_batches';
 explain select straight_join count(*) from t1 as x0  
 join t3 as x1 on x1.a=x0.c
 join t1 as x2 on x2.a=x0.d
@@ -4925,6 +4939,7 @@ count(*)
 300
 Local_range_scans
 4
+set global debug='-d,max_64rows_in_spj_batches';
 drop table t1;
 drop table t2;
 drop table t3;
@@ -5656,6 +5671,44 @@ k1	i	name
 Warnings:
 Warning	4294	Scan filter is too large, discarded
 drop table t1;
+create table t(
+pk int primary key auto_increment,
+i int, 
+j int,
+k int,
+index(i,j),
+index(i),
+index(j),
+index(k)
+) engine = ndb;
+insert into t(i,j,k) values
+(1,1,1), (1,1,1), (1,1,1),
+(2,2,2), (2,2,2), (2,2,2);
+set global debug='+d,max_4rows_in_spj_batches';
+explain
+select straight_join count(*) from 
+t as t1
+join t as t2 on t2.i = t1.i
+join (t as t3 join t as t4 on t4.k=t3.k join t as t5 on t5.i=t4.i and t5.j=t3.j) on t3.pk=t1.j
+join t as t6 on t6.k = t1.k
+where t1.i < 2;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	range	i,i_2,j,k	i	5	NULL	3	Parent of 6 pushed join@1; Using where with pushed condition
+1	SIMPLE	t2	ref	i,i_2	i	5	test.t1.i	1	Child of 't1' in pushed join@1; Using where
+1	SIMPLE	t3	eq_ref	PRIMARY,j,k	PRIMARY	4	test.t1.j	1	Child of 't1' in pushed join@1
+1	SIMPLE	t4	ref	i,i_2,k	k	5	test.t3.k	2	Child of 't3' in pushed join@1; Using where
+1	SIMPLE	t5	ref	i,i_2,j	i	10	test.t4.i,test.t3.j	2	Child of 't4' in pushed join@1; Using where
+1	SIMPLE	t6	ref	k	k	5	test.t1.k	2	Child of 't1' in pushed join@1; Using where
+select straight_join count(*) from 
+t as t1
+join t as t2 on t2.i = t1.i
+join (t as t3 join t as t4 on t4.k=t3.k join t as t5 on t5.i=t4.i and t5.j=t3.j) on t3.pk=t1.j
+join t as t6 on t6.k = t1.k
+where t1.i < 2;
+count(*)
+243
+set global debug='-d,max_4rows_in_spj_batches';
+drop table t;
 create temporary table spj_counts_at_end
 select counter_name, sum(val) as val 
 from ndbinfo.counters 
@@ -5676,7 +5729,7 @@ counter_name	spj_counts_at_end.val - spj
 CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	254
 PRUNED_RANGE_SCANS_RECEIVED	27
-RANGE_SCANS_RECEIVED	736
+RANGE_SCANS_RECEIVED	738
 READS_RECEIVED	47
 TABLE_SCANS_RECEIVED	254
 drop table spj_counts_at_startup;
@@ -5694,3 +5747,4 @@ NDB_PUSHED_QUERIES_EXECUTED	541
 NDB_SORTED_SCAN_COUNT	11
 drop table server_counts_at_startup;
 set ndb_join_pushdown = @save_ndb_join_pushdown;
+set global debug=@save_debug;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.inc'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-04-25 06:24:54 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-05-08 08:03:29 +0000
@@ -39,6 +39,8 @@ create temporary table server_counts_at_
 ##############
 # Test start
 
+--error 0,1193
+set @save_debug = @@global.debug;
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 
@@ -1092,6 +1094,9 @@ insert into tx values (0), (1), (2), (3)
 
 insert into t1 select 1, x1.a * 10+x2.a, 1, 1 from tx as x1 cross join tx as x2;
 
+--error 0,1193
+set global debug='+d,max_64rows_in_spj_batches';
+
 explain select count(*) from t1 as x1
   join t1 as x2 on x2.a = x1.c and x1.b < 2 
   join t1 as x3 on x3.a = x1.d;
@@ -1099,6 +1104,9 @@ select count(*) from t1 as x1
   join t1 as x2 on x2.a = x1.c and x1.b < 2 
   join t1 as x3 on x3.a = x1.d;
 
+--error 0,1193
+set global debug='-d,max_64rows_in_spj_batches';
+
 connection ddl;
 drop table t1;
 drop table tx;
@@ -2521,6 +2529,9 @@ drop table t1;
 ##
 # Try with higher row-count to test batching/flow control
 #
+--error 0,1193
+set global debug='+d,max_64rows_in_spj_batches';
+
 connection ddl;
 create table t1 (
   a int not null auto_increment,
@@ -2588,6 +2599,9 @@ join t1 as t3 on t3.a = t2.c and t3.b = 
 connection ddl;
 drop table t1;
 
+--error 0,1193
+set global debug='-d,max_64rows_in_spj_batches';
+
 # Pushed join accessing disk data.
 
 connection ddl;
@@ -3094,9 +3108,15 @@ insert into t1 values (0,-1), (1,-1), (2
 (125,-1), (126,-1), (127,-1), (128,-1), (129,-1), (130,-1), (131,-1), 
 (132,-1), (133,-1), (134,-1), (135,-1), (136,-1), (137,-1), (138,-1), (139,-1);
 
+--error 0,1193
+set global debug='+d,max_64rows_in_spj_batches';
+
 explain extended select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 
+--error 0,1193
+set global debug='-d,max_64rows_in_spj_batches';
+
 connection ddl;
 drop table t1;
 
@@ -3206,6 +3226,9 @@ select count(*) from t1 as x1
 # which correctly preserves the dependency between the parent subscans 
 # and its child(s).
 
+--error 0,1193
+set global debug='+d,max_4rows_in_spj_batches';
+
 set ndb_join_pushdown=on;
 explain extended
 select straight_join * from t1 as table1
@@ -3218,6 +3241,9 @@ select straight_join * from t1 as table1
    (t1 as table2  join t1 as table3 on table2.pk = table3.b)
  on table1.pk = table2.b;
 
+--error 0,1193
+set global debug='-d,max_4rows_in_spj_batches';
+
 #############
 # Testcase for 'sledgehammer' fix for scan -> outer join scan:
 # Pushing of outer joined has to be dissabled as incomplete child batches
@@ -3281,6 +3307,12 @@ explain extended select straight_join co
   join t1 as x7 on x7.b = x1.a
 where x3.a < x2.pk and x4.a < x3.pk; 
 
+# set '64rows' in order to avoid to small batches which will
+# cause all subscans to be repeated... and repeated... and
+
+--error 0,1193
+set global debug='+d,max_64rows_in_spj_batches';
+
 select straight_join count(*) from t1 as x1 
   join t1 as x2 on x2.b = x1.a
   join t1 as x3 on x3.b = x1.a
@@ -3290,6 +3322,9 @@ select straight_join count(*) from t1 as
   join t1 as x7 on x7.b = x1.a
 where x3.a < x2.pk and x4.a < x3.pk; 
 
+--error 0,1193
+set global debug='-d,max_64rows_in_spj_batches';
+
 #############
 # If we have an outer join, we can't create an artificial dep. 'through' the outer join.
 # In this case the child scan can't be part of the pushed query.
@@ -3380,6 +3415,10 @@ update t1 set b=b-10;
 update t1 set u=u+100;
 
 set ndb_join_pushdown=on;
+
+--error 0,1193
+set global debug='+d,max_64rows_in_spj_batches';
+
 explain extended select straight_join count(*) from 
   (t1 as x join t1 as y on y.b = x.a)
  left outer join t1 as z on z.u = x.a;
@@ -3387,6 +3426,9 @@ select straight_join count(*) from 
   (t1 as x join t1 as y on y.b = x.a)
  left outer join t1 as z on z.u = x.a;
 
+--error 0,1193
+set global debug='-d,max_64rows_in_spj_batches';
+
 #Undo update
 update t1 set u=u-100;
 
@@ -3450,6 +3492,9 @@ connection spj;
 # Make t3 so big that it takes multiple batches to scan it.
 insert into t3 select 1, x1.a * 10+x2.a from t2 as x1 cross join t2 as x2;
 
+--error 0,1193
+set global debug='+d,max_64rows_in_spj_batches';
+
 explain select straight_join count(*) from t1 as x0  
    join t3 as x1 on x1.a=x0.c
    join t1 as x2 on x2.a=x0.d
@@ -3483,6 +3528,8 @@ select straight_join count(*) from t1 as
 --eval select sum(val) - $scan_rows as Local_range_scans from ndbinfo.counters where block_name='DBSPJ' and counter_name='LOCAL_RANGE_SCANS_SENT';
 --enable_query_log
 
+--error 0,1193
+set global debug='-d,max_64rows_in_spj_batches';
 
 connection ddl;
 drop table t1;
@@ -4086,6 +4133,57 @@ eval $query;
 
 drop table t1;
 
+############################################################
+# Bug#14010406 LARGE PUSHED JOIN HIT ASSERT IN SPJ BLOCK
+#
+# Buffered rows (caused by PARENT refs) was prematurely
+# released when there was no TN_ACTIVE treeNodes childs.
+# However, in bushy scans, a scan branch can be 'repeated'
+# even if its previous execution was 'complete'. 
+#
+# Thus we have to use a less eager release strategy where
+# we don't release any buffered rows until we prepare for
+# a NEXTREQ which will fetch more rows into the treeNode.
+############################################################
+
+create table t(
+  pk int primary key auto_increment,
+  i int, 
+  j int,
+  k int,
+  index(i,j),
+  index(i),
+  index(j),
+  index(k)
+) engine = ndb;
+
+insert into t(i,j,k) values
+   (1,1,1), (1,1,1), (1,1,1),
+   (2,2,2), (2,2,2), (2,2,2);
+
+--error 0,1193
+set global debug='+d,max_4rows_in_spj_batches';
+
+explain
+select straight_join count(*) from 
+  t as t1
+  join t as t2 on t2.i = t1.i
+  join (t as t3 join t as t4 on t4.k=t3.k join t as t5 on t5.i=t4.i and t5.j=t3.j) on t3.pk=t1.j
+  join t as t6 on t6.k = t1.k
+  where t1.i < 2;
+
+select straight_join count(*) from 
+  t as t1
+  join t as t2 on t2.i = t1.i
+  join (t as t3 join t as t4 on t4.k=t3.k join t as t5 on t5.i=t4.i and t5.j=t3.j) on t3.pk=t1.j
+  join t as t6 on t6.k = t1.k
+  where t1.i < 2;
+
+--error 0,1193
+set global debug='-d,max_4rows_in_spj_batches';
+
+drop table t;
+
 ########################################
 # Verify DBSPJ counters for entire test:
 # Note: These tables are 'temporary' withing 'connection spj'
@@ -4133,4 +4231,6 @@ drop table server_counts_at_startup;
 --source ndbinfo_drop.inc
 
 set ndb_join_pushdown = @save_ndb_join_pushdown;
+--error 0,1193
+set global debug=@save_debug;
 

=== modified file 'storage/ndb/src/dummy.cpp'
--- a/storage/ndb/src/dummy.cpp	2011-09-19 09:26:01 +0000
+++ b/storage/ndb/src/dummy.cpp	2012-05-08 09:18:50 +0000
@@ -4,5 +4,6 @@ typedef void (Ndb_cluster_connection::* 
 
 NDB_EXPORT fptr functions[] = {
   (fptr)&Ndb_cluster_connection::set_name,
+  (fptr)&NdbScanFilter::isfalse,
   0
 };

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2012-04-25 06:24:54 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2012-05-08 08:03:29 +0000
@@ -1087,8 +1087,6 @@ private:
   void releaseRow(Ptr<Request>, RowRef ref);
   void registerActiveCursor(Ptr<Request>, Ptr<TreeNode>);
   void nodeFail_checkRequests(Signal*);
-
-  void cleanupChildBranch(Ptr<Request>, Ptr<TreeNode>);
   void cleanup_common(Ptr<Request>, Ptr<TreeNode>);
 
   /**

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2012-04-25 09:22:21 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2012-05-08 08:03:29 +0000
@@ -1160,7 +1160,7 @@ Dbspj::batchComplete(Signal* signal, Ptr
   {
     jam();
     /**
-     * release unneeded buffers and position cursor for SCAN_NEXTREQ
+     * release unneeded buffers as preparation for later SCAN_NEXTREQ
      */
     releaseScanBuffers(requestPtr);
   }
@@ -1193,6 +1193,8 @@ Dbspj::prepareNextBatch(Signal* signal, 
     return;
   }
 
+  DEBUG("prepareNextBatch, request: " << requestPtr.i);
+
   if (requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT)
   {
     /**
@@ -1427,43 +1429,36 @@ Dbspj::releaseScanBuffers(Ptr<Request> r
 {
   Ptr<TreeNode> treeNodePtr;
   Local_TreeNode_list list(m_treenode_pool, requestPtr.p->m_nodes);
-  TreeNodeBitMask ancestors_of_active;
 
-  for (list.last(treeNodePtr); !treeNodePtr.isNull(); list.prev(treeNodePtr))
+  for (list.first(treeNodePtr); !treeNodePtr.isNull(); list.next(treeNodePtr))
   {
     /**
-     * If there are no active children,
-     *   then we can cleanup in our sub-branch
+     * Release buffered rows for all treeNodes getting more rows
+     * in the following NEXTREQ, including all its childs.
      */
-    if (!ancestors_of_active.get(treeNodePtr.p->m_node_no))
+    if (requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no) ||
+        requestPtr.p->m_active_nodes.overlaps(treeNodePtr.p->m_ancestors))
     {
       if (treeNodePtr.p->m_bits & TreeNode::T_ROW_BUFFER)
       {
         jam();
         releaseNodeRows(requestPtr, treeNodePtr);
       }
-      
-      /**
-       * Cleanup ACTIVE nodes fetching more rows in a NEXTREQ,
-       * or nodes being in 'm_active_nodes' as they will 'repeat'.
-       * (and then become active)
-       */
-      if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE ||
-          requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no))
-      {
-        jam();
-        cleanupChildBranch(requestPtr, treeNodePtr);
-      }
     }
 
     /**
-      * Collect ancestors of all nodes which are, or will
-      * become active in NEXTREQ (possibly repeated)
-      */
-    if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE ||
-        requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no))
+     * Do further cleanup in treeNodes having ancestor getting more rows.
+     * (Which excludes the restarted treeNode itself)
+     */
+    if (requestPtr.p->m_active_nodes.overlaps(treeNodePtr.p->m_ancestors))
     {
-      ancestors_of_active.bitOR(treeNodePtr.p->m_ancestors);
+      jam();
+      if (treeNodePtr.p->m_info->m_parent_batch_cleanup != 0)
+      {
+        jam();
+        (this->*(treeNodePtr.p->m_info->m_parent_batch_cleanup))(requestPtr,
+                                                                 treeNodePtr);
+      }
     }
   }
   /**
@@ -1494,32 +1489,15 @@ Dbspj::registerActiveCursor(Ptr<Request>
 }
 
 void
-Dbspj::cleanupChildBranch(Ptr<Request> requestPtr, Ptr<TreeNode> treeNodePtr)
-{
-  LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
-  Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
-  Dependency_map::ConstDataBufferIterator it;
-  for (list.first(it); !it.isNull(); list.next(it))
-  {
-    jam();
-    Ptr<TreeNode> childPtr;
-    m_treenode_pool.getPtr(childPtr, *it.data);
-    if (childPtr.p->m_info->m_parent_batch_cleanup != 0)
-    {
-      jam();
-      (this->*(childPtr.p->m_info->m_parent_batch_cleanup))(requestPtr,
-                                                            childPtr);
-    }
-    cleanupChildBranch(requestPtr,childPtr);
-  }
-}
-
-void
 Dbspj::releaseNodeRows(Ptr<Request> requestPtr, Ptr<TreeNode> treeNodePtr)
 {
   /**
    * Release all rows associated with tree node
    */
+  DEBUG("releaseNodeRows"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+  );
 
   // only when var-alloc, or else stack will be popped wo/ consideration
   // to individual rows
@@ -1630,6 +1608,9 @@ Dbspj::releaseRow(Ptr<Request> requestPt
 void
 Dbspj::releaseRequestBuffers(Ptr<Request> requestPtr, bool reset)
 {
+  DEBUG("releaseRequestBuffers"
+     << ", request: " << requestPtr.i
+  );
   /**
    * Release all pages for request
    */
@@ -1952,13 +1933,18 @@ Dbspj::execLQHKEYREF(Signal* signal)
 
   const LqhKeyRef* ref = reinterpret_cast<const LqhKeyRef*>(signal->getDataPtr());
 
-  DEBUG("execLQHKEYREF, errorCode:" << ref->errorCode);
   Ptr<TreeNode> treeNodePtr;
   m_treenode_pool.getPtr(treeNodePtr, ref->connectPtr);
 
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
 
+  DEBUG("execLQHKEYREF"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+     << ", errorCode: " << ref->errorCode
+  );
+
   ndbrequire(treeNodePtr.p->m_info && treeNodePtr.p->m_info->m_execLQHKEYREF);
   (this->*(treeNodePtr.p->m_info->m_execLQHKEYREF))(signal,
                                                     requestPtr,
@@ -1970,8 +1956,6 @@ Dbspj::execLQHKEYCONF(Signal* signal)
 {
   jamEntry();
 
-  DEBUG("execLQHKEYCONF");
-
   const LqhKeyConf* conf = reinterpret_cast<const LqhKeyConf*>(signal->getDataPtr());
   Ptr<TreeNode> treeNodePtr;
   m_treenode_pool.getPtr(treeNodePtr, conf->opPtr);
@@ -1979,6 +1963,11 @@ Dbspj::execLQHKEYCONF(Signal* signal)
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
 
+  DEBUG("execLQHKEYCONF"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+  );
+
   ndbrequire(treeNodePtr.p->m_info && treeNodePtr.p->m_info->m_execLQHKEYCONF);
   (this->*(treeNodePtr.p->m_info->m_execLQHKEYCONF))(signal,
                                                      requestPtr,
@@ -1991,8 +1980,6 @@ Dbspj::execSCAN_FRAGREF(Signal* signal)
   jamEntry();
   const ScanFragRef* ref = reinterpret_cast<const ScanFragRef*>(signal->getDataPtr());
 
-  DEBUG("execSCAN_FRAGREF, errorCode:" << ref->errorCode);
-
   Ptr<ScanFragHandle> scanFragHandlePtr;
   m_scanfraghandle_pool.getPtr(scanFragHandlePtr, ref->senderData);
   Ptr<TreeNode> treeNodePtr;
@@ -2000,6 +1987,12 @@ Dbspj::execSCAN_FRAGREF(Signal* signal)
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
 
+  DEBUG("execSCAN_FRAGCONF"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+     << ", errorCode: " << ref->errorCode
+  );
+
   ndbrequire(treeNodePtr.p->m_info&&treeNodePtr.p->m_info->m_execSCAN_FRAGREF);
   (this->*(treeNodePtr.p->m_info->m_execSCAN_FRAGREF))(signal,
                                                        requestPtr,
@@ -2021,6 +2014,10 @@ Dbspj::execSCAN_HBREP(Signal* signal)
   m_treenode_pool.getPtr(treeNodePtr, scanFragHandlePtr.p->m_treeNodePtrI);
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
+  DEBUG("execSCAN_HBREP"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+  );
 
   Uint32 ref = requestPtr.p->m_senderRef;
   signal->theData[0] = requestPtr.p->m_senderData;
@@ -2031,7 +2028,6 @@ void
 Dbspj::execSCAN_FRAGCONF(Signal* signal)
 {
   jamEntry();
-  DEBUG("execSCAN_FRAGCONF");
 
   const ScanFragConf* conf = reinterpret_cast<const ScanFragConf*>(signal->getDataPtr());
 
@@ -2048,6 +2044,10 @@ Dbspj::execSCAN_FRAGCONF(Signal* signal)
   m_treenode_pool.getPtr(treeNodePtr, scanFragHandlePtr.p->m_treeNodePtrI);
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
+  DEBUG("execSCAN_FRAGCONF"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+  );
 
   ndbrequire(treeNodePtr.p->m_info&&treeNodePtr.p->m_info->m_execSCAN_FRAGCONF);
   (this->*(treeNodePtr.p->m_info->m_execSCAN_FRAGCONF))(signal,
@@ -2062,8 +2062,8 @@ Dbspj::execSCAN_NEXTREQ(Signal* signal)
   jamEntry();
   const ScanFragNextReq * req = (ScanFragNextReq*)&signal->theData[0];
 
-  DEBUG("Incomming SCAN_NEXTREQ");
 #ifdef DEBUG_SCAN_FRAGREQ
+  DEBUG("Incomming SCAN_NEXTREQ";
   printSCANFRAGNEXTREQ(stdout, &signal->theData[0],
                        ScanFragNextReq::SignalLength, DBLQH);
 #endif
@@ -2080,6 +2080,7 @@ Dbspj::execSCAN_NEXTREQ(Signal* signal)
     ndbrequire(req->requestInfo == ScanFragNextReq::ZCLOSE);
     return;
   }
+  DEBUG("execSCAN_NEXTREQ, request: " << requestPtr.i);
 
 #ifdef SPJ_TRACE_TIME
   Uint64 now = spj_now();
@@ -2132,7 +2133,7 @@ Dbspj::execSCAN_NEXTREQ(Signal* signal)
       if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE)
       {
         jam();
-        DEBUG("SCAN_NEXTREQ on TreeNode: " << treeNodePtr.i
+        DEBUG("SCAN_NEXTREQ on TreeNode: "
            << ",  m_node_no: " << treeNodePtr.p->m_node_no
            << ", w/ m_parentPtrI: " << treeNodePtr.p->m_parentPtrI);
 
@@ -2151,7 +2152,7 @@ Dbspj::execSCAN_NEXTREQ(Signal* signal)
          */
         jam();
         ndbrequire(requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT);
-        DEBUG("  Restart TreeNode: " << treeNodePtr.i
+        DEBUG("Restart TreeNode "
            << ",  m_node_no: " << treeNodePtr.p->m_node_no
            << ", w/ m_parentPtrI: " << treeNodePtr.p->m_parentPtrI);
 
@@ -2172,7 +2173,6 @@ void
 Dbspj::execTRANSID_AI(Signal* signal)
 {
   jamEntry();
-  DEBUG("execTRANSID_AI");
   TransIdAI * req = (TransIdAI *)signal->getDataPtr();
   Uint32 ptrI = req->connectPtr;
   //Uint32 transId[2] = { req->transId[0], req->transId[1] };
@@ -2182,6 +2182,11 @@ Dbspj::execTRANSID_AI(Signal* signal)
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
 
+  DEBUG("execTRANSID_AI"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+  );
+
   ndbrequire(signal->getNoOfSections() != 0);
 
   SegmentedSectionPtr dataPtr;
@@ -2249,6 +2254,11 @@ Dbspj::storeRow(Ptr<Request> requestPtr,
   Uint32 * headptr = (Uint32*)row.m_row_data.m_section.m_header;
   Uint32 headlen = 1 + row.m_row_data.m_section.m_header->m_len;
 
+  DEBUG("storeRow"
+     << ", node: " << treeNodePtr.p->m_node_no
+     << ", request: " << requestPtr.i
+  );
+
   /**
    * If rows are not in map, then they are kept in linked list
    */
@@ -3478,7 +3488,8 @@ Dbspj::lookup_parent_row(Signal* signal,
   const Uint32 tableId = LqhKeyReq::getTableId(src->tableSchemaVersion);
   const Uint32 corrVal = rowRef.m_src_correlation;
 
-  DEBUG("::lookup_parent_row");
+  DEBUG("::lookup_parent_row"
+     << ", node: " << treeNodePtr.p->m_node_no);
 
   do
   {
@@ -5117,6 +5128,8 @@ Dbspj::scanIndex_parent_row(Signal* sign
                             const RowPtr & rowRef)
 {
   jam();
+  DEBUG("::scanIndex_parent_row"
+     << ", node: " << treeNodePtr.p->m_node_no);
 
   Uint32 err;
   ScanIndexData& data = treeNodePtr.p->m_scanindex_data;
@@ -6885,6 +6898,8 @@ Dbspj::appendFromParent(Uint32 & dst, Lo
   m_treenode_pool.getPtr(treeNodePtr, rowptr.m_src_node_ptrI);
   Uint32 corrVal = rowptr.m_src_correlation;
   RowPtr targetRow;
+  DEBUG("appendFromParent-of"
+     << " node: " << treeNodePtr.p->m_node_no);
   while (levels--)
   {
     jam();
@@ -6894,6 +6909,8 @@ Dbspj::appendFromParent(Uint32 & dst, Lo
       return DbspjErr::InvalidPattern;
     }
     m_treenode_pool.getPtr(treeNodePtr, treeNodePtr.p->m_parentPtrI);
+    DEBUG("appendFromParent"
+       << ", node: " << treeNodePtr.p->m_node_no);
     if (unlikely((treeNodePtr.p->m_bits & TreeNode::T_ROW_BUFFER_MAP) == 0))
     {
       DEBUG_CRASH();

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2012-04-26 13:05:42 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2012-05-08 09:23:25 +0000
@@ -2181,7 +2181,8 @@ NdbQueryOperationDefImpl::printTree(Uint
   ndbout << NdbQueryOperationDef::getTypeName(getType()) << endl;
   printMargin(depth, hasMoreSiblingsMask, false);
   // Print attributes.
-  ndbout << " opNo: " << getOpNo() << endl;
+  ndbout << " opNo: " << getOpNo()
+         << " (internal: " << getInternalOpNo() << ")" << endl;
   printMargin(depth, hasMoreSiblingsMask, false);
   ndbout << " table: " << getTable().getName() << endl;
   if (getIndex() != NULL)

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-04-26 13:05:42 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-05-08 09:23:25 +0000
@@ -47,8 +47,10 @@
  */
 #define UNUSED(x) ((void)(x))
 
-// To force usage of SCAN_NEXTREQ even for small scans resultsets
-static const bool testNextReq = false;
+// To force usage of SCAN_NEXTREQ even for small scans resultsets:
+// - '0' is default (production) value
+// - '4' is normally a good value for testing batch related problems
+static const int enforcedBatchSize = 0;
 
 // Use double buffered ResultSets, may later change 
 // to be more adaptive based on query type
@@ -4374,11 +4376,18 @@ NdbQueryOperationImpl
   Uint32 maxBatchRows = 0;
   if (myClosestScan != NULL)
   {
-
     // To force usage of SCAN_NEXTREQ even for small scans resultsets
-    if (testNextReq)
+    if (DBUG_EVALUATE_IF("max_4rows_in_spj_batches", true, false))
+    {
+      m_maxBatchRows = 4;
+    }
+    else if (DBUG_EVALUATE_IF("max_64rows_in_spj_batches", true, false))
+    {
+      m_maxBatchRows = 64;
+    }
+    else if (enforcedBatchSize)
     {
-      m_maxBatchRows = 1;
+      m_maxBatchRows = enforcedBatchSize;
     }
 
     const Ndb& ndb = *getQuery().getNdbTransaction().getNdb();

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.3 branch (magnus.blaudd:3886 to 3887) magnus.blaudd8 May