List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:May 7 2012 11:08am
Subject:bzr push into mysql-5.5-cluster-7.2 branch (ole.john.aske:3912 to 3913)
View as plain text  
 3913 Ole John Aske	2012-05-07
      Some of the SPJ-MTR tests are written to detect failures (previously) caused by
      incorrect handling when the result was retrieved in multiple batches.
      Thus, these tests depends in a specific 'batch size' being used.
      
      As the default batchsize was increased from 64 -> 256 rows in cluster-7.2
      we believe these tests to now be broken.
      
      This fix introduce the debug flags 'max_4rows_in_spj_batches' and
      'max_64rows_in_spj_batches' which will enforce the usage of the
      batchsize as indicated by their names.
      
      This fix is also a preparation for making a fix + testcase for:
      Bug# 14010406 LARGE PUSHED JOIN HIT ASSERT IN SPJ BLOCK

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown_default.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.inc
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
 3912 magnus.blaudd@stripped	2012-05-07 [merge]
      Merge 7.1 -> 7.2

    modified:
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/Operation.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordTableScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordUniqueKeyOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/OperationImpl.java
      storage/ndb/include/kernel/signaldata/ScanFrag.hpp
      storage/ndb/include/util/SocketAuthenticator.hpp
      storage/ndb/src/common/transporter/Transporter.cpp
      storage/ndb/src/common/transporter/Transporter.hpp
      storage/ndb/src/common/util/SocketAuthenticator.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/test/include/HugoTransactions.hpp
      storage/ndb/test/include/NdbTimer.hpp
      storage/ndb/test/ndbapi/testUpgrade.cpp
      storage/ndb/test/src/HugoTransactions.cpp
      storage/ndb/test/tools/connect.cpp
=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown_default.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-05-03 10:42:49 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-05-07 11:07:32 +0000
@@ -12,6 +12,7 @@ where variable_name in 
 'Ndb_pushed_queries_defined',
 'Ndb_pushed_queries_dropped',
 'Ndb_pushed_queries_executed');
+set @save_debug = @@global.debug;
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 create table t1 (
@@ -2105,6 +2106,7 @@ PRIMARY KEY (`a`)
 delete from t1;
 insert into tx values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
 insert into t1 select 1, x1.a * 10+x2.a, 1, 1 from tx as x1 cross join tx as x2;
+set global debug='+d,max_64rows_in_spj_batches';
 explain select count(*) from t1 as x1
 join t1 as x2 on x2.a = x1.c and x1.b < 2 
 join t1 as x3 on x3.a = x1.d;
@@ -2117,6 +2119,7 @@ join t1 as x2 on x2.a = x1.c and x1.b < 
 join t1 as x3 on x3.a = x1.d;
 count(*)
 20000
+set global debug='-d,max_64rows_in_spj_batches';
 drop table t1;
 drop table tx;
 create table t1 (
@@ -3936,6 +3939,7 @@ k	b	k	b	k	b	k	b
 3	1	1	1	1	1	1	1
 4	1	1	1	1	1	1	1
 drop table t1;
+set global debug='+d,max_64rows_in_spj_batches';
 create table t1 (
 a int not null auto_increment,
 b char(255) not null,
@@ -4013,6 +4017,7 @@ join t1 as t3 on t3.a = t2.c and t3.b = 
 count(*)
 8990
 drop table t1;
+set global debug='-d,max_64rows_in_spj_batches';
 create logfile group lg1
 add undofile 'undofile.dat'
 initial_size 1m
@@ -4461,6 +4466,7 @@ insert into t1 values (0,-1), (1,-1), (2
 (118,-1), (119,-1), (120,-1), (121,-1), (122,-1), (123,-1), (124,-1), 
 (125,-1), (126,-1), (127,-1), (128,-1), (129,-1), (130,-1), (131,-1), 
 (132,-1), (133,-1), (134,-1), (135,-1), (136,-1), (137,-1), (138,-1), (139,-1);
+set global debug='+d,max_64rows_in_spj_batches';
 explain extended select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
 1	SIMPLE	x	index	NULL	PRIMARY	4	NULL	140	100.00	Parent of 2 pushed join@1
@@ -4469,6 +4475,7 @@ Warnings:
 Note	1003	select `test`.`x`.`pk` AS `pk`,`test`.`x`.`u` AS `u`,`test`.`y`.`pk` AS `pk`,`test`.`y`.`u` AS `u` from `test`.`t1` `x` join `test`.`t1` `y` where (`test`.`y`.`pk` = `test`.`x`.`u`) order by `test`.`x`.`pk`
 select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 pk	u	pk	u
+set global debug='-d,max_64rows_in_spj_batches';
 drop table t1;
 create table t1 (pk int primary key, u int not null, a int, b int) engine=ndb;
 create index ix1 on t1(b,a);
@@ -4594,6 +4601,7 @@ join t1 as x2 on x1.a=x2.b
 where x1.pk = 1 or x1.u=1;
 count(*)
 13
+set global debug='+d,max_4rows_in_spj_batches';
 set ndb_join_pushdown=on;
 explain extended
 select straight_join * from t1 as table1
@@ -4638,6 +4646,7 @@ pk	u	a	b	pk	u	a	b	pk	u	a	b
 7	7	10	10	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 8	8	10	10	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
 9	9	10	10	NULL	NULL	NULL	NULL	NULL	NULL	NULL	NULL
+set global debug='-d,max_4rows_in_spj_batches';
 explain extended select straight_join * from t1 as x1 
 inner join t1 as x2 on x2.b = x1.a;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	filtered	Extra
@@ -4721,6 +4730,7 @@ id	select_type	table	type	possible_keys	
 1	SIMPLE	x7	ref	ix1	ix1	5	test.x2.b	2	100.00	Child of 'x1' in pushed join@1; Using where
 Warnings:
 Note	1003	select straight_join count(0) AS `count(*)` from `test`.`t1` `x1` join `test`.`t1` `x2` join `test`.`t1` `x3` join `test`.`t1` `x4` join `test`.`t1` `x5` join `test`.`t1` `x6` join `test`.`t1` `x7` where ((`test`.`x2`.`b` = `test`.`x1`.`a`) and (`test`.`x3`.`b` = `test`.`x1`.`a`) and (`test`.`x4`.`b` = `test`.`x1`.`a`) and (`test`.`x5`.`b` = `test`.`x1`.`a`) and (`test`.`x6`.`b` = `test`.`x1`.`a`) and (`test`.`x7`.`b` = `test`.`x1`.`a`) and (`test`.`x3`.`a` < `test`.`x2`.`pk`) and (`test`.`x4`.`a` < `test`.`x3`.`pk`))
+set global debug='+d,max_64rows_in_spj_batches';
 select straight_join count(*) from t1 as x1 
 join t1 as x2 on x2.b = x1.a
 join t1 as x3 on x3.b = x1.a
@@ -4731,6 +4741,7 @@ join t1 as x7 on x7.b = x1.a
 where x3.a < x2.pk and x4.a < x3.pk;
 count(*)
 632736
+set global debug='-d,max_64rows_in_spj_batches';
 explain extended select straight_join count(*) from t1 as x1 
 left join t1 as x2 on x2.b = x1.a
 join t1 as x3 on x3.b = x1.b;
@@ -4822,6 +4833,7 @@ count(*)
 update t1 set b=b-10;
 update t1 set u=u+100;
 set ndb_join_pushdown=on;
+set global debug='+d,max_64rows_in_spj_batches';
 explain extended select straight_join count(*) from 
 (t1 as x join t1 as y on y.b = x.a)
 left outer join t1 as z on z.u = x.a;
@@ -4836,6 +4848,7 @@ select straight_join count(*) from 
 left outer join t1 as z on z.u = x.a;
 count(*)
 156
+set global debug='-d,max_64rows_in_spj_batches';
 update t1 set u=u-100;
 drop index ix2 on t1;
 create unique index ix2 on t1(a,u);
@@ -4893,6 +4906,7 @@ PRIMARY KEY (`a`,`b`)
 ) ENGINE=ndbcluster;
 insert into t2 values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9);
 insert into t3 select 1, x1.a * 10+x2.a from t2 as x1 cross join t2 as x2;
+set global debug='+d,max_64rows_in_spj_batches';
 explain select straight_join count(*) from t1 as x0  
 join t3 as x1 on x1.a=x0.c
 join t1 as x2 on x2.a=x0.d
@@ -4925,6 +4939,7 @@ count(*)
 300
 Local_range_scans
 4
+set global debug='-d,max_64rows_in_spj_batches';
 drop table t1;
 drop table t2;
 drop table t3;
@@ -5694,3 +5709,4 @@ NDB_PUSHED_QUERIES_EXECUTED	541
 NDB_SORTED_SCAN_COUNT	11
 drop table server_counts_at_startup;
 set ndb_join_pushdown = @save_ndb_join_pushdown;
+set global debug=@save_debug;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.inc'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-04-25 06:24:54 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-05-07 11:07:32 +0000
@@ -39,6 +39,7 @@ create temporary table server_counts_at_
 ##############
 # Test start
 
+set @save_debug = @@global.debug;
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 
@@ -1092,12 +1093,14 @@ insert into tx values (0), (1), (2), (3)
 
 insert into t1 select 1, x1.a * 10+x2.a, 1, 1 from tx as x1 cross join tx as x2;
 
+set global debug='+d,max_64rows_in_spj_batches';
 explain select count(*) from t1 as x1
   join t1 as x2 on x2.a = x1.c and x1.b < 2 
   join t1 as x3 on x3.a = x1.d;
 select count(*) from t1 as x1
   join t1 as x2 on x2.a = x1.c and x1.b < 2 
   join t1 as x3 on x3.a = x1.d;
+set global debug='-d,max_64rows_in_spj_batches';
 
 connection ddl;
 drop table t1;
@@ -2521,6 +2524,7 @@ drop table t1;
 ##
 # Try with higher row-count to test batching/flow control
 #
+set global debug='+d,max_64rows_in_spj_batches';
 connection ddl;
 create table t1 (
   a int not null auto_increment,
@@ -2587,6 +2591,7 @@ join t1 as t3 on t3.a = t2.c and t3.b = 
 
 connection ddl;
 drop table t1;
+set global debug='-d,max_64rows_in_spj_batches';
 
 # Pushed join accessing disk data.
 
@@ -3094,8 +3099,10 @@ insert into t1 values (0,-1), (1,-1), (2
 (125,-1), (126,-1), (127,-1), (128,-1), (129,-1), (130,-1), (131,-1), 
 (132,-1), (133,-1), (134,-1), (135,-1), (136,-1), (137,-1), (138,-1), (139,-1);
 
+set global debug='+d,max_64rows_in_spj_batches';
 explain extended select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
 select * from t1 as x join t1 as y on x.u=y.pk order by(x.pk);
+set global debug='-d,max_64rows_in_spj_batches';
 
 connection ddl;
 drop table t1;
@@ -3206,6 +3213,7 @@ select count(*) from t1 as x1
 # which correctly preserves the dependency between the parent subscans 
 # and its child(s).
 
+set global debug='+d,max_4rows_in_spj_batches';
 set ndb_join_pushdown=on;
 explain extended
 select straight_join * from t1 as table1
@@ -3217,6 +3225,7 @@ select straight_join * from t1 as table1
   left join 
    (t1 as table2  join t1 as table3 on table2.pk = table3.b)
  on table1.pk = table2.b;
+set global debug='-d,max_4rows_in_spj_batches';
 
 #############
 # Testcase for 'sledgehammer' fix for scan -> outer join scan:
@@ -3281,6 +3290,10 @@ explain extended select straight_join co
   join t1 as x7 on x7.b = x1.a
 where x3.a < x2.pk and x4.a < x3.pk; 
 
+# set '64rows' in order to avoid to small batches which will
+# cause all subscans to be repeated... and repeated... and
+set global debug='+d,max_64rows_in_spj_batches';
+
 select straight_join count(*) from t1 as x1 
   join t1 as x2 on x2.b = x1.a
   join t1 as x3 on x3.b = x1.a
@@ -3289,6 +3302,7 @@ select straight_join count(*) from t1 as
   join t1 as x6 on x6.b = x1.a
   join t1 as x7 on x7.b = x1.a
 where x3.a < x2.pk and x4.a < x3.pk; 
+set global debug='-d,max_64rows_in_spj_batches';
 
 #############
 # If we have an outer join, we can't create an artificial dep. 'through' the outer join.
@@ -3380,6 +3394,8 @@ update t1 set b=b-10;
 update t1 set u=u+100;
 
 set ndb_join_pushdown=on;
+set global debug='+d,max_64rows_in_spj_batches';
+
 explain extended select straight_join count(*) from 
   (t1 as x join t1 as y on y.b = x.a)
  left outer join t1 as z on z.u = x.a;
@@ -3387,6 +3403,8 @@ select straight_join count(*) from 
   (t1 as x join t1 as y on y.b = x.a)
  left outer join t1 as z on z.u = x.a;
 
+set global debug='-d,max_64rows_in_spj_batches';
+
 #Undo update
 update t1 set u=u-100;
 
@@ -3449,6 +3467,7 @@ insert into t2 values (0), (1), (2), (3)
 connection spj;
 # Make t3 so big that it takes multiple batches to scan it.
 insert into t3 select 1, x1.a * 10+x2.a from t2 as x1 cross join t2 as x2;
+set global debug='+d,max_64rows_in_spj_batches';
 
 explain select straight_join count(*) from t1 as x0  
    join t3 as x1 on x1.a=x0.c
@@ -3483,6 +3502,7 @@ select straight_join count(*) from t1 as
 --eval select sum(val) - $scan_rows as Local_range_scans from ndbinfo.counters where block_name='DBSPJ' and counter_name='LOCAL_RANGE_SCANS_SENT';
 --enable_query_log
 
+set global debug='-d,max_64rows_in_spj_batches';
 
 connection ddl;
 drop table t1;
@@ -4133,4 +4153,5 @@ drop table server_counts_at_startup;
 --source ndbinfo_drop.inc
 
 set ndb_join_pushdown = @save_ndb_join_pushdown;
+set global debug=@save_debug;
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-04-25 09:22:21 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-05-07 11:07:32 +0000
@@ -47,8 +47,10 @@
  */
 #define UNUSED(x) ((void)(x))
 
-// To force usage of SCAN_NEXTREQ even for small scans resultsets
-static const bool testNextReq = false;
+// To force usage of SCAN_NEXTREQ even for small scans resultsets:
+// - '0' is default (production) value
+// - '4' is normally a good value for testing batch related problems
+static const int enforcedBatchSize = 0;
 
 // Use double buffered ResultSets, may later change 
 // to be more adaptive based on query type
@@ -4374,11 +4376,18 @@ NdbQueryOperationImpl
   Uint32 maxBatchRows = 0;
   if (myClosestScan != NULL)
   {
-
     // To force usage of SCAN_NEXTREQ even for small scans resultsets
-    if (testNextReq)
+    if (DBUG_EVALUATE_IF("max_4rows_in_spj_batches", true, false))
+    {
+      m_maxBatchRows = 4;
+    }
+    else if (DBUG_EVALUATE_IF("max_64rows_in_spj_batches", true, false))
+    {
+      m_maxBatchRows = 64;
+    }
+    else if (enforcedBatchSize)
     {
-      m_maxBatchRows = 1;
+      m_maxBatchRows = enforcedBatchSize;
     }
 
     const Ndb& ndb = *getQuery().getNdbTransaction().getNdb();

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.2 branch (ole.john.aske:3912 to 3913) Ole John Aske8 May