List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:March 5 2012 2:04pm
Subject:bzr push into mysql-trunk-cluster branch (magnus.blaudd:3434 to 3435)
View as plain text  
 3435 magnus.blaudd@stripped	2012-03-05 [merge]
      Merge 7.2 -> trunk-cluster

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown_default.result
      mysql-test/suite/ndb/r/ndb_join_pushdown_nobnl.result
      mysql-test/suite/ndb/r/ndb_join_pushdown_none.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.inc
      storage/ndb/include/kernel/signaldata/DbspjErr.hpp
      storage/ndb/include/ndb_version.h.in
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/pgman.cpp
      storage/ndb/src/mgmsrv/ConfigManager.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/ndbapi/NdbOperationSearch.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/Ndbif.cpp
      storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/HugoQueryBuilder.hpp
      storage/ndb/test/ndbapi/testMgmd.cpp
      storage/ndb/test/ndbapi/testSpj.cpp
      storage/ndb/test/run-test/CMakeLists.txt
      storage/ndb/test/src/HugoQueryBuilder.cpp
 3434 Ole John Aske	2012-02-29
      Updated AQP interface after WL5558 has been merged:
      
      As all test_if_skip_sort_order() has been evaluated prior to building
      any 'pushed joins', we can now use the JOIN::ordered_index_usage to
      determine if GROUP/ORDER BY is skippable instead of calculating this 
      inside the AQP interface.

    modified:
      sql/abstract_query_plan.cc
      sql/abstract_query_plan.h
=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown_default.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-02-29 10:51:31 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-03-05 14:02:05 +0000
@@ -4,6 +4,14 @@ select counter_name, sum(val) as val 
 from ndbinfo.counters 
 where block_name='DBSPJ' 
 group by counter_name;
+create temporary table server_counts_at_startup
+select * from information_schema.global_status 
+where variable_name in 
+('Ndb_pruned_scan_count',
+'Ndb_sorted_scan_count',
+'Ndb_pushed_queries_defined',
+'Ndb_pushed_queries_dropped',
+'Ndb_pushed_queries_executed');
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 create table t1 (
@@ -4071,6 +4079,16 @@ where t1.a = 10 and t1.b = 10 and 
 t2.a = t1.c and t2.b = t1.c;
 a	b	c	a	b	c
 10	10	11	11	11	12
+create temporary table server_counts
+select * from information_schema.global_status 
+where variable_name in 
+('Ndb_scan_count',
+'Ndb_pruned_scan_count',
+'Ndb_sorted_scan_count',
+'Ndb_pushed_queries_defined',
+'Ndb_pushed_queries_dropped',
+'Ndb_pushed_queries_executed',
+'Ndb_pushed_reads');
 select * from t1 t1, t1 t2 
 where t1.a = 11 and t1.b = 11 and 
 t2.a = t1.c and t2.b = t1.c;
@@ -4087,20 +4105,20 @@ where t1.a = 11 and 
 t2.a = t1.c and t2.b = t1.c;
 count(*)
 1
-scan_count
-2
-pruned_scan_count
-1
-sorted_scan_count
-1
-pushed_queries_defined
-3
-pushed_queries_dropped
-0
-pushed_queries_executed
-3
-pushed_reads
-4
+select new.variable_name, new.variable_value - old.variable_value
+from server_counts as old,
+information_schema.global_status as new
+where new.variable_name = old.variable_name
+order by new.variable_name;
+variable_name	new.variable_value - old.variable_value
+NDB_PRUNED_SCAN_COUNT	1
+NDB_PUSHED_QUERIES_DEFINED	3
+NDB_PUSHED_QUERIES_DROPPED	0
+NDB_PUSHED_QUERIES_EXECUTED	3
+NDB_PUSHED_READS	4
+NDB_SCAN_COUNT	2
+NDB_SORTED_SCAN_COUNT	1
+drop table server_counts;
 drop table t1;
 create table t1(
 d int not null,
@@ -5486,13 +5504,14 @@ group by counter_name;
 select spj_counts_at_end.counter_name, spj_counts_at_end.val - spj_counts_at_startup.val 
 from spj_counts_at_end, spj_counts_at_startup 
 where spj_counts_at_end.counter_name = spj_counts_at_startup.counter_name
-and spj_counts_at_end.counter_name <> 'READS_NOT_FOUND'
-       and spj_counts_at_end.counter_name <> 'LOCAL_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'LOCAL_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
-       and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
+and spj_counts_at_end.counter_name not in 
+('READS_NOT_FOUND',
+'LOCAL_READS_SENT',
+'REMOTE_READS_SENT',
+'LOCAL_RANGE_SCANS_SENT',
+'REMOTE_RANGE_SCANS_SENT',
+'SCAN_ROWS_RETURNED',
+'SCAN_BATCHES_RETURNED');
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
 CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	256
@@ -5502,14 +5521,16 @@ READS_RECEIVED	54
 TABLE_SCANS_RECEIVED	256
 drop table spj_counts_at_startup;
 drop table spj_counts_at_end;
-pruned_scan_count
-8
-sorted_scan_count
-10
-pushed_queries_defined
-401
-pushed_queries_dropped
-7
-pushed_queries_executed
-545
+select new.variable_name, new.variable_value - old.variable_value
+from server_counts_at_startup as old,
+information_schema.global_status as new
+where new.variable_name = old.variable_name
+order by new.variable_name;
+variable_name	new.variable_value - old.variable_value
+NDB_PRUNED_SCAN_COUNT	8
+NDB_PUSHED_QUERIES_DEFINED	401
+NDB_PUSHED_QUERIES_DROPPED	7
+NDB_PUSHED_QUERIES_EXECUTED	545
+NDB_SORTED_SCAN_COUNT	10
+drop table server_counts_at_startup;
 set ndb_join_pushdown = @save_ndb_join_pushdown;

=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown_nobnl.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown_nobnl.result	2012-02-29 10:51:31 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown_nobnl.result	2012-03-05 14:02:05 +0000
@@ -5,6 +5,14 @@ select counter_name, sum(val) as val 
 from ndbinfo.counters 
 where block_name='DBSPJ' 
 group by counter_name;
+create temporary table server_counts_at_startup
+select * from information_schema.global_status 
+where variable_name in 
+('Ndb_pruned_scan_count',
+'Ndb_sorted_scan_count',
+'Ndb_pushed_queries_defined',
+'Ndb_pushed_queries_dropped',
+'Ndb_pushed_queries_executed');
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 create table t1 (
@@ -4068,6 +4076,16 @@ where t1.a = 10 and t1.b = 10 and 
 t2.a = t1.c and t2.b = t1.c;
 a	b	c	a	b	c
 10	10	11	11	11	12
+create temporary table server_counts
+select * from information_schema.global_status 
+where variable_name in 
+('Ndb_scan_count',
+'Ndb_pruned_scan_count',
+'Ndb_sorted_scan_count',
+'Ndb_pushed_queries_defined',
+'Ndb_pushed_queries_dropped',
+'Ndb_pushed_queries_executed',
+'Ndb_pushed_reads');
 select * from t1 t1, t1 t2 
 where t1.a = 11 and t1.b = 11 and 
 t2.a = t1.c and t2.b = t1.c;
@@ -4084,20 +4102,20 @@ where t1.a = 11 and 
 t2.a = t1.c and t2.b = t1.c;
 count(*)
 1
-scan_count
-2
-pruned_scan_count
-1
-sorted_scan_count
-1
-pushed_queries_defined
-3
-pushed_queries_dropped
-0
-pushed_queries_executed
-3
-pushed_reads
-4
+select new.variable_name, new.variable_value - old.variable_value
+from server_counts as old,
+information_schema.global_status as new
+where new.variable_name = old.variable_name
+order by new.variable_name;
+variable_name	new.variable_value - old.variable_value
+NDB_PRUNED_SCAN_COUNT	1
+NDB_PUSHED_QUERIES_DEFINED	3
+NDB_PUSHED_QUERIES_DROPPED	0
+NDB_PUSHED_QUERIES_EXECUTED	3
+NDB_PUSHED_READS	4
+NDB_SCAN_COUNT	2
+NDB_SORTED_SCAN_COUNT	1
+drop table server_counts;
 drop table t1;
 create table t1(
 d int not null,
@@ -5481,13 +5499,14 @@ group by counter_name;
 select spj_counts_at_end.counter_name, spj_counts_at_end.val - spj_counts_at_startup.val 
 from spj_counts_at_end, spj_counts_at_startup 
 where spj_counts_at_end.counter_name = spj_counts_at_startup.counter_name
-and spj_counts_at_end.counter_name <> 'READS_NOT_FOUND'
-       and spj_counts_at_end.counter_name <> 'LOCAL_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'LOCAL_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
-       and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
+and spj_counts_at_end.counter_name not in 
+('READS_NOT_FOUND',
+'LOCAL_READS_SENT',
+'REMOTE_READS_SENT',
+'LOCAL_RANGE_SCANS_SENT',
+'REMOTE_RANGE_SCANS_SENT',
+'SCAN_ROWS_RETURNED',
+'SCAN_BATCHES_RETURNED');
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
 CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	298
@@ -5497,15 +5516,17 @@ READS_RECEIVED	54
 TABLE_SCANS_RECEIVED	298
 drop table spj_counts_at_startup;
 drop table spj_counts_at_end;
-pruned_scan_count
-8
-sorted_scan_count
-10
-pushed_queries_defined
-409
-pushed_queries_dropped
-7
-pushed_queries_executed
-569
+select new.variable_name, new.variable_value - old.variable_value
+from server_counts_at_startup as old,
+information_schema.global_status as new
+where new.variable_name = old.variable_name
+order by new.variable_name;
+variable_name	new.variable_value - old.variable_value
+NDB_PRUNED_SCAN_COUNT	8
+NDB_PUSHED_QUERIES_DEFINED	409
+NDB_PUSHED_QUERIES_DROPPED	7
+NDB_PUSHED_QUERIES_EXECUTED	569
+NDB_SORTED_SCAN_COUNT	10
+drop table server_counts_at_startup;
 set ndb_join_pushdown = @save_ndb_join_pushdown;
 set @@global.optimizer_switch=default;

=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown_none.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown_none.result	2012-02-29 10:51:31 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown_none.result	2012-03-05 14:02:05 +0000
@@ -4,6 +4,14 @@ select counter_name, sum(val) as val 
 from ndbinfo.counters 
 where block_name='DBSPJ' 
 group by counter_name;
+create temporary table server_counts_at_startup
+select * from information_schema.global_status 
+where variable_name in 
+('Ndb_pruned_scan_count',
+'Ndb_sorted_scan_count',
+'Ndb_pushed_queries_defined',
+'Ndb_pushed_queries_dropped',
+'Ndb_pushed_queries_executed');
 set @save_ndb_join_pushdown = @@session.ndb_join_pushdown;
 set ndb_join_pushdown = true;
 create table t1 (
@@ -4071,6 +4079,16 @@ where t1.a = 10 and t1.b = 10 and 
 t2.a = t1.c and t2.b = t1.c;
 a	b	c	a	b	c
 10	10	11	11	11	12
+create temporary table server_counts
+select * from information_schema.global_status 
+where variable_name in 
+('Ndb_scan_count',
+'Ndb_pruned_scan_count',
+'Ndb_sorted_scan_count',
+'Ndb_pushed_queries_defined',
+'Ndb_pushed_queries_dropped',
+'Ndb_pushed_queries_executed',
+'Ndb_pushed_reads');
 select * from t1 t1, t1 t2 
 where t1.a = 11 and t1.b = 11 and 
 t2.a = t1.c and t2.b = t1.c;
@@ -4087,20 +4105,20 @@ where t1.a = 11 and 
 t2.a = t1.c and t2.b = t1.c;
 count(*)
 1
-scan_count
-2
-pruned_scan_count
-1
-sorted_scan_count
-1
-pushed_queries_defined
-3
-pushed_queries_dropped
-0
-pushed_queries_executed
-3
-pushed_reads
-4
+select new.variable_name, new.variable_value - old.variable_value
+from server_counts as old,
+information_schema.global_status as new
+where new.variable_name = old.variable_name
+order by new.variable_name;
+variable_name	new.variable_value - old.variable_value
+NDB_PRUNED_SCAN_COUNT	1
+NDB_PUSHED_QUERIES_DEFINED	3
+NDB_PUSHED_QUERIES_DROPPED	0
+NDB_PUSHED_QUERIES_EXECUTED	3
+NDB_PUSHED_READS	4
+NDB_SCAN_COUNT	2
+NDB_SORTED_SCAN_COUNT	1
+drop table server_counts;
 drop table t1;
 create table t1(
 d int not null,
@@ -5486,13 +5504,14 @@ group by counter_name;
 select spj_counts_at_end.counter_name, spj_counts_at_end.val - spj_counts_at_startup.val 
 from spj_counts_at_end, spj_counts_at_startup 
 where spj_counts_at_end.counter_name = spj_counts_at_startup.counter_name
-and spj_counts_at_end.counter_name <> 'READS_NOT_FOUND'
-       and spj_counts_at_end.counter_name <> 'LOCAL_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'LOCAL_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
-       and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
+and spj_counts_at_end.counter_name not in 
+('READS_NOT_FOUND',
+'LOCAL_READS_SENT',
+'REMOTE_READS_SENT',
+'LOCAL_RANGE_SCANS_SENT',
+'REMOTE_RANGE_SCANS_SENT',
+'SCAN_ROWS_RETURNED',
+'SCAN_BATCHES_RETURNED');
 counter_name	spj_counts_at_end.val - spj_counts_at_startup.val
 CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	254
@@ -5502,15 +5521,17 @@ READS_RECEIVED	58
 TABLE_SCANS_RECEIVED	254
 drop table spj_counts_at_startup;
 drop table spj_counts_at_end;
-pruned_scan_count
-11
-sorted_scan_count
-10
-pushed_queries_defined
-401
-pushed_queries_dropped
-11
-pushed_queries_executed
-553
+select new.variable_name, new.variable_value - old.variable_value
+from server_counts_at_startup as old,
+information_schema.global_status as new
+where new.variable_name = old.variable_name
+order by new.variable_name;
+variable_name	new.variable_value - old.variable_value
+NDB_PRUNED_SCAN_COUNT	11
+NDB_PUSHED_QUERIES_DEFINED	401
+NDB_PUSHED_QUERIES_DROPPED	11
+NDB_PUSHED_QUERIES_EXECUTED	553
+NDB_SORTED_SCAN_COUNT	10
+drop table server_counts_at_startup;
 set ndb_join_pushdown = @save_ndb_join_pushdown;
 set @@global.optimizer_switch=default;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.inc'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-01-04 20:25:40 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-03-05 14:02:05 +0000
@@ -26,14 +26,15 @@ create temporary table spj_counts_at_sta
        where block_name='DBSPJ' 
        group by counter_name;
 
-# Save old counter values.
-let $scan_count_at_startup = query_get_value(show status like 'Ndb_scan_count', Value, 1);
-let $pruned_scan_count_at_startup = query_get_value(show status like 'Ndb_pruned_scan_count', Value, 1);
-let $sorted_scan_count_at_startup = query_get_value(show status like 'Ndb_sorted_scan_count', Value, 1);
-let $pushed_queries_defined_at_startup = query_get_value(show status like 'Ndb_pushed_queries_defined', Value, 1);
-let $pushed_queries_dropped_at_startup = query_get_value(show status like 'Ndb_pushed_queries_dropped', Value, 1);
-let $pushed_queries_executed_at_startup = query_get_value(show status like 'Ndb_pushed_queries_executed', Value, 1);
-let $pushed_reads_at_startup = query_get_value(show status like 'Ndb_pushed_reads', Value, 1);
+# Save old mysqld counter values.
+create temporary table server_counts_at_startup
+       select * from information_schema.global_status 
+       where variable_name in 
+       ('Ndb_pruned_scan_count',
+        'Ndb_sorted_scan_count',
+        'Ndb_pushed_queries_defined',
+        'Ndb_pushed_queries_dropped',
+        'Ndb_pushed_queries_executed');
 
 ##############
 # Test start
@@ -2665,13 +2666,17 @@ select * from t1 t1, t1 t2 
       	     t2.a = t1.c and t2.b = t1.c; 
 
 # Save old counter values.
-let $old_scan_count = query_get_value(show status like 'Ndb_scan_count', Value, 1);
-let $old_pruned_scan_count = query_get_value(show status like 'Ndb_pruned_scan_count', Value, 1);
-let $old_sorted_scan_count = query_get_value(show status like 'Ndb_sorted_scan_count', Value, 1);
-let $old_pushed_queries_defined = query_get_value(show status like 'Ndb_pushed_queries_defined', Value, 1);
-let $old_pushed_queries_dropped = query_get_value(show status like 'Ndb_pushed_queries_dropped', Value, 1);
-let $old_pushed_queries_executed = query_get_value(show status like 'Ndb_pushed_queries_executed', Value, 1);
-let $old_pushed_reads = query_get_value(show status like 'Ndb_pushed_reads', Value, 1);
+# Save old mysqld counter values.
+create temporary table server_counts
+       select * from information_schema.global_status 
+       where variable_name in 
+       ('Ndb_scan_count',
+        'Ndb_pruned_scan_count',
+        'Ndb_sorted_scan_count',
+        'Ndb_pushed_queries_defined',
+        'Ndb_pushed_queries_dropped',
+        'Ndb_pushed_queries_executed',
+	'Ndb_pushed_reads');
 
 # Run some queries that should increment the counters.
 # This query should push a single read.
@@ -2690,25 +2695,14 @@ select count(*) from t1 t1, t1 t2 
        where t1.a = 11 and 
        	     t2.a = t1.c and t2.b = t1.c; 
 
-# Get the new values;
-let $new_scan_count = query_get_value(show status like 'Ndb_scan_count', Value, 1);
-let $new_pruned_scan_count = query_get_value(show status like 'Ndb_pruned_scan_count', Value, 1);
-let $new_sorted_scan_count = query_get_value(show status like 'Ndb_sorted_scan_count', Value, 1);
-let $new_pushed_queries_defined = query_get_value(show status like 'Ndb_pushed_queries_defined', Value, 1);
-let $new_pushed_queries_dropped = query_get_value(show status like 'Ndb_pushed_queries_dropped', Value, 1);
-let $new_pushed_queries_executed = query_get_value(show status like 'Ndb_pushed_queries_executed', Value, 1);
-let $new_pushed_reads = query_get_value(show status like 'Ndb_pushed_reads', Value, 1);
+# Calculate the change in mysqld counters.
+select new.variable_name, new.variable_value - old.variable_value
+       from server_counts as old,
+         information_schema.global_status as new
+       where new.variable_name = old.variable_name
+       order by new.variable_name;
 
-# Calculate the change.
---disable_query_log
---eval select $new_scan_count - $old_scan_count as scan_count
---eval select $new_pruned_scan_count - $old_pruned_scan_count as pruned_scan_count
---eval select $new_sorted_scan_count - $old_sorted_scan_count as sorted_scan_count
---eval select $new_pushed_queries_defined - $old_pushed_queries_defined as pushed_queries_defined
---eval select $new_pushed_queries_dropped - $old_pushed_queries_dropped as pushed_queries_dropped
---eval select $new_pushed_queries_executed - $old_pushed_queries_executed as pushed_queries_executed
---eval select $new_pushed_reads - $old_pushed_reads as pushed_reads
---enable_query_log
+drop table server_counts;
 
 connection ddl;
 drop table t1;
@@ -3922,41 +3916,26 @@ connection spj;
 select spj_counts_at_end.counter_name, spj_counts_at_end.val - spj_counts_at_startup.val 
        from spj_counts_at_end, spj_counts_at_startup 
        where spj_counts_at_end.counter_name = spj_counts_at_startup.counter_name
-       and spj_counts_at_end.counter_name <> 'READS_NOT_FOUND'
-       and spj_counts_at_end.counter_name <> 'LOCAL_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_READS_SENT'
-       and spj_counts_at_end.counter_name <> 'LOCAL_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'REMOTE_RANGE_SCANS_SENT'
-       and spj_counts_at_end.counter_name <> 'SCAN_ROWS_RETURNED'
-       and spj_counts_at_end.counter_name <> 'SCAN_BATCHES_RETURNED';
+       and spj_counts_at_end.counter_name not in 
+       ('READS_NOT_FOUND',
+        'LOCAL_READS_SENT',
+        'REMOTE_READS_SENT',
+        'LOCAL_RANGE_SCANS_SENT',
+        'REMOTE_RANGE_SCANS_SENT',
+        'SCAN_ROWS_RETURNED',
+        'SCAN_BATCHES_RETURNED');
 
-connection spj;
 drop table spj_counts_at_startup;
 drop table spj_counts_at_end;
 
-connection spj;
-# Similar for the SPJ specific 'STATUS' counters
-let $scan_count_at_end = query_get_value(show status like 'Ndb_scan_count', Value, 1);
-let $pruned_scan_count_at_end = query_get_value(show status like 'Ndb_pruned_scan_count', Value, 1);
-let $sorted_scan_count_at_end = query_get_value(show status like 'Ndb_sorted_scan_count', Value, 1);
-let $pushed_queries_defined_at_end = query_get_value(show status like 'Ndb_pushed_queries_defined', Value, 1);
-let $pushed_queries_dropped_at_end = query_get_value(show status like 'Ndb_pushed_queries_dropped', Value, 1);
-let $pushed_queries_executed_at_end = query_get_value(show status like 'Ndb_pushed_queries_executed', Value, 1);
-let $pushed_reads_at_end = query_get_value(show status like 'Ndb_pushed_reads', Value, 1);
+# Calculate the change in mysqld counters.
+select new.variable_name, new.variable_value - old.variable_value
+       from server_counts_at_startup as old,
+         information_schema.global_status as new
+       where new.variable_name = old.variable_name
+       order by new.variable_name;
 
-# Calculate the change.
---disable_query_log
-# There is some random variation in scan_count, probably due to statistics
-# being updated at unpredictable intervals. Therefore, we only test for 
-# deviations greater than an tenth of the expected value.
-#--eval select round(($scan_count_at_end - $scan_count_at_startup)/2524, 1) as scan_count_derived
---eval select $pruned_scan_count_at_end - $pruned_scan_count_at_startup as pruned_scan_count
---eval select $sorted_scan_count_at_end - $sorted_scan_count_at_startup as sorted_scan_count
---eval select $pushed_queries_defined_at_end - $pushed_queries_defined_at_startup as pushed_queries_defined
---eval select $pushed_queries_dropped_at_end - $pushed_queries_dropped_at_startup as pushed_queries_dropped
---eval select $pushed_queries_executed_at_end - $pushed_queries_executed_at_startup as pushed_queries_executed
-#--eval select $pushed_reads_at_end - $pushed_reads_at_startup as pushed_reads
---enable_query_log
+drop table server_counts_at_startup;
 
 --source ndbinfo_drop.inc
 

=== modified file 'storage/ndb/include/kernel/signaldata/DbspjErr.hpp'
--- a/storage/ndb/include/kernel/signaldata/DbspjErr.hpp	2011-02-23 19:28:26 +0000
+++ b/storage/ndb/include/kernel/signaldata/DbspjErr.hpp	2012-03-01 15:13:54 +0000
@@ -1,3 +1,4 @@
+
 /*
    Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
 
@@ -39,6 +40,7 @@ struct DbspjErr
     ,OutOfRowMemory = 20015
     ,NodeFailure = 20016
     ,InvalidTreeNodeCount = 20017
+    ,IndexFragNotFound = 20018
   };
 };
 

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2012-02-03 13:37:34 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2012-02-23 15:41:31 +0000
@@ -737,4 +737,23 @@ ndbd_128_instances_address(Uint32 x)
   return x >= NDBD_128_INSTANCES_ADDRESS_72;
 }
 
+#define NDBD_FIXED_LOOKUP_QUERY_ABORT_72 NDB_MAKE_VERSION(7,2,5)
+
+static
+inline
+int
+ndbd_fixed_lookup_query_abort(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor < 2)
+  {
+    // Only experimental support of SPJ pre 7.2.0.
+    // Assume we support 'fixed-abort' as we want it tested.
+    return 1;
+  }
+  return x >= NDBD_FIXED_LOOKUP_QUERY_ABORT_72;
+}
+
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-11-16 08:17:17 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2012-03-05 09:38:49 +0000
@@ -18,6 +18,7 @@
 #define DBSPJ_C
 #include "Dbspj.hpp"
 
+#include <ndb_version.h>
 #include <SectionReader.hpp>
 #include <signaldata/LqhKey.hpp>
 #include <signaldata/QueryTree.hpp>
@@ -53,7 +54,7 @@
 #endif
 
 #if 1
-#define DEBUG_CRASH() ndbrequire(false)
+#define DEBUG_CRASH() { if (ERROR_INSERTED(0)) ndbrequire(false) }
 #else
 #define DEBUG_CRASH()
 #endif
@@ -397,9 +398,18 @@ void Dbspj::execLQHKEYREQ(Signal* signal
     if (unlikely(!m_arenaAllocator.seize(ah)))
       break;
 
-
-    m_request_pool.seize(ah, requestPtr);
-
+    if (ERROR_INSERTED(17001))
+    {
+      ndbout_c("Injecting OutOfQueryMem error 17001 at line %d file %s",
+                __LINE__,  __FILE__);
+      jam();
+      break;
+    }
+    if (unlikely(!m_request_pool.seize(ah, requestPtr)))
+    {
+      jam();
+      break;
+    }
     new (requestPtr.p) Request(ah);
     do_init(requestPtr.p, req, signal->getSendersBlockRef());
 
@@ -690,8 +700,18 @@ Dbspj::execSCAN_FRAGREQ(Signal* signal)
     if (unlikely(!m_arenaAllocator.seize(ah)))
       break;
 
-    m_request_pool.seize(ah, requestPtr);
-
+    if (ERROR_INSERTED(17002))
+    {
+      ndbout_c("Injecting OutOfQueryMem error 17002 at line %d file %s",
+                __LINE__,  __FILE__);
+      jam();
+      break;
+    }
+    if (unlikely(!m_request_pool.seize(ah, requestPtr)))
+    {
+      jam();
+      break;
+    }
     new (requestPtr.p) Request(ah);
     do_init(requestPtr.p, req, signal->getSendersBlockRef());
 
@@ -899,6 +919,14 @@ Dbspj::build(Build_context& ctx,
     if (unlikely(node_op != param_op))
     {
       DEBUG_CRASH();
+      jam();
+      goto error;
+    }
+    if (ERROR_INSERTED_CLEAR(17006))
+    {
+      ndbout_c("Injecting UnknowQueryOperation error 17006 at line %d file %s",
+                __LINE__,  __FILE__);
+      jam();
       goto error;
     }
 
@@ -925,9 +953,6 @@ Dbspj::build(Build_context& ctx,
      */
     ctx.m_start_signal = 0;
 
-    /**
-     * TODO handle error, by aborting request
-     */
     ndbrequire(ctx.m_cnt < NDB_ARRAY_SIZE(ctx.m_node_list));
     ctx.m_cnt++;
   }
@@ -992,6 +1017,13 @@ Dbspj::createNode(Build_context& ctx, Pt
    *   that can be setup using the Build_context
    *
    */
+  if (ERROR_INSERTED(17005))
+  {
+    ndbout_c("Injecting OutOfOperations error 17005 at line %d file %s",
+             __LINE__,  __FILE__);
+    jam();
+    return DbspjErr::OutOfOperations;
+  }
   if (m_treenode_pool.seize(requestPtr.p->m_arena, treeNodePtr))
   {
     DEBUG("createNode - seize -> ptrI: " << treeNodePtr.i);
@@ -1650,6 +1682,22 @@ Dbspj::abort(Signal* signal, Ptr<Request
 {
   jam();
 
+  /**
+   * Need to handle online upgrade as the protocoll for 
+   * signaling errors for Lookup-request changed in 7.2.5.
+   * If API-version is <= 7.2.4 we increase the severity 
+   * of the error to a 'NodeFailure' as this is the only
+   * errorcode for which the API will stop further
+   * 'outstanding-counting' in pre 7.2.5.
+   * (Starting from 7.2.5 we will stop counting for all 'hard errors')
+   */
+  if (requestPtr.p->isLookup() &&
+      !ndbd_fixed_lookup_query_abort(getNodeInfo(getResultRef(requestPtr)).m_version))
+  {
+    jam();
+    errCode = DbspjErr::NodeFailure;
+  }
+
   if ((requestPtr.p->m_state & Request::RS_ABORTING) != 0)
   {
     jam();
@@ -1779,6 +1827,7 @@ Dbspj::complete(Signal* signal, Ptr<Requ
 void
 Dbspj::cleanup(Ptr<Request> requestPtr)
 {
+  CLEAR_ERROR_INSERT_VALUE; // clear any injected error
   ndbrequire(requestPtr.p->m_cnt_active == 0);
   {
     Ptr<TreeNode> nodePtr;
@@ -2113,7 +2162,7 @@ Dbspj::execTRANSID_AI(Signal* signal)
   Ptr<Request> requestPtr;
   m_request_pool.getPtr(requestPtr, treeNodePtr.p->m_requestPtrI);
 
-  ndbrequire(signal->getNoOfSections() != 0); // TODO check if this can happen
+  ndbrequire(signal->getNoOfSections() != 0);
 
   SegmentedSectionPtr dataPtr;
   {
@@ -2637,11 +2686,19 @@ Dbspj::allocPage(Ptr<RowPage> & ptr)
   if (m_free_page_list.firstItem == RNIL)
   {
     jam();
+    if (ERROR_INSERTED(17003))
+    {
+      ndbout_c("Injecting failed '::allocPage', error 17003 at line %d file %s",
+               __LINE__,  __FILE__);
+      jam();
+      return false;
+    }
     ptr.p = (RowPage*)m_ctx.m_mm.alloc_page(RT_SPJ_DATABUFFER,
                                             &ptr.i,
                                             Ndbd_mem_manager::NDB_ZONE_ANY);
     if (ptr.p == 0)
     {
+      jam();
       return false;
     }
     return true;
@@ -3019,6 +3076,23 @@ Dbspj::lookup_send(Signal* signal,
   getSection(handle.m_ptr[1], attrInfoPtrI);
   handle.m_cnt = 2;
 
+  /**
+   * Inject error to test LQHKEYREF handling:
+   * Tampering with tableSchemaVersion such that LQH will 
+   * return LQHKEYREF('1227: Invalid schema version')
+   * May happen for different treeNodes in the request:
+   * - 17030: Fail on any lookup_send()
+   * - 17031: Fail on lookup_send() if 'isLeaf'
+   * - 17032: Fail on lookup_send() if treeNode not root 
+   */
+  if (ERROR_INSERTED_CLEAR(17030) ||
+      (treeNodePtr.p->isLeaf() && ERROR_INSERTED_CLEAR(17031)) ||
+      (treeNodePtr.p->m_parentPtrI != RNIL && ERROR_INSERTED_CLEAR(17032)))
+  {
+    jam();
+    req->tableSchemaVersion += (1 << 16); // Provoke 'Invalid schema version'
+  }
+
 #if defined DEBUG_LQHKEYREQ
   ndbout_c("LQHKEYREQ to %x", ref);
   printLQHKEYREQ(stdout, signal->getDataPtrSend(),
@@ -3040,6 +3114,22 @@ Dbspj::lookup_send(Signal* signal,
     c_Counters.incr_counter(CI_REMOTE_READS_SENT, 1);
   }
 
+  /**
+   * Test execution terminated due to 'NodeFailure' which
+   * may happen for different treeNodes in the request:
+   * - 17020: Fail on any lookup_send()
+   * - 17021: Fail on lookup_send() if 'isLeaf'
+   * - 17022: Fail on lookup_send() if treeNode not root 
+   */
+  if (ERROR_INSERTED_CLEAR(17020) ||
+      (treeNodePtr.p->isLeaf() && ERROR_INSERTED_CLEAR(17021)) ||
+      (treeNodePtr.p->m_parentPtrI != RNIL && ERROR_INSERTED_CLEAR(17022)))
+  {
+    jam();
+    releaseSections(handle);
+    abort(signal, requestPtr, DbspjErr::NodeFailure);
+    return;
+  }
   if (unlikely(!c_alive_nodes.get(Tnode)))
   {
     jam();
@@ -3099,14 +3189,18 @@ Dbspj::lookup_execTRANSID_AI(Signal* sig
     LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
     Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
     Dependency_map::ConstDataBufferIterator it;
+
     for (list.first(it); !it.isNull(); list.next(it))
     {
-      jam();
-      Ptr<TreeNode> childPtr;
-      m_treenode_pool.getPtr(childPtr, * it.data);
-      ndbrequire(childPtr.p->m_info != 0&&childPtr.p->m_info->m_parent_row!=0);
-      (this->*(childPtr.p->m_info->m_parent_row))(signal,
-                                                  requestPtr, childPtr,rowRef);
+      if (likely(requestPtr.p->m_state & Request::RS_RUNNING))
+      {
+        jam();
+        Ptr<TreeNode> childPtr;
+        m_treenode_pool.getPtr(childPtr, * it.data);
+        ndbrequire(childPtr.p->m_info!=0 && childPtr.p->m_info->m_parent_row!=0);
+        (this->*(childPtr.p->m_info->m_parent_row))(signal,
+                                                    requestPtr, childPtr,rowRef);
+      }
     }
   }
   ndbrequire(!(requestPtr.p->isLookup() && treeNodePtr.p->isLeaf()));
@@ -3143,85 +3237,89 @@ Dbspj::lookup_execLQHKEYREF(Signal* sign
 
   c_Counters.incr_counter(CI_READS_NOT_FOUND, 1);
 
-  if (requestPtr.p->isLookup())
+  DEBUG("lookup_execLQHKEYREF, errorCode:" << errCode);
+
+  /**
+   * If Request is still actively running: API need to
+   * be informed about error. 
+   * Error code may either indicate a 'hard error' which should
+   * terminate the query execution, or a 'soft error' which 
+   * should be signaled NDBAPI, and execution continued.
+   */
+  if (likely(requestPtr.p->m_state & Request::RS_RUNNING))
   {
-    jam();
+    switch(errCode){
+    case 626: // 'Soft error' : Row not found
+    case 899: // 'Soft error' : Interpreter_exit_nok
 
-    /* CONF/REF not requested for lookup-Leaf: */
-    ndbrequire(!treeNodePtr.p->isLeaf());
+      jam();
+      /**
+       * Only Lookup-request need to send TCKEYREF...
+       */
+      if (requestPtr.p->isLookup())
+      {
+        jam();
 
-    /**
-     * Scan-request does not need to
-     *   send TCKEYREF...
-     */
-    /**
-     * Return back to api...
-     *   NOTE: assume that signal is tampered with
-     */
-    Uint32 resultRef = treeNodePtr.p->m_lookup_data.m_api_resultRef;
-    Uint32 resultData = treeNodePtr.p->m_lookup_data.m_api_resultData;
-    TcKeyRef* ref = (TcKeyRef*)signal->getDataPtr();
-    ref->connectPtr = resultData;
-    ref->transId[0] = requestPtr.p->m_transId[0];
-    ref->transId[1] = requestPtr.p->m_transId[1];
-    ref->errorCode = errCode;
-    ref->errorData = 0;
+        /* CONF/REF not requested for lookup-Leaf: */
+        ndbrequire(!treeNodePtr.p->isLeaf());
 
-    DEBUG("lookup_execLQHKEYREF, errorCode:" << errCode);
+        /**
+         * Return back to api...
+         *   NOTE: assume that signal is tampered with
+         */
+        Uint32 resultRef = treeNodePtr.p->m_lookup_data.m_api_resultRef;
+        Uint32 resultData = treeNodePtr.p->m_lookup_data.m_api_resultData;
+        TcKeyRef* ref = (TcKeyRef*)signal->getDataPtr();
+        ref->connectPtr = resultData;
+        ref->transId[0] = requestPtr.p->m_transId[0];
+        ref->transId[1] = requestPtr.p->m_transId[1];
+        ref->errorCode = errCode;
+        ref->errorData = 0;
 
-    sendTCKEYREF(signal, resultRef, requestPtr.p->m_senderRef);
+        sendTCKEYREF(signal, resultRef, requestPtr.p->m_senderRef);
 
-    if (treeNodePtr.p->m_bits & TreeNode::T_UNIQUE_INDEX_LOOKUP)
-    {
-      /**
-       * If this is a "leaf" unique index lookup
-       *   emit extra TCKEYCONF as would have been done with ordinary
-       *   operation
-       */
-      LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
-      Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
-      Dependency_map::ConstDataBufferIterator it;
-      ndbrequire(list.first(it));
-      ndbrequire(list.getSize() == 1); // should only be 1 child
-      Ptr<TreeNode> childPtr;
-      m_treenode_pool.getPtr(childPtr, * it.data);
-      if (childPtr.p->m_bits & TreeNode::T_LEAF)
-      {
-        jam();
-        Uint32 resultRef = childPtr.p->m_lookup_data.m_api_resultRef;
-        Uint32 resultData = childPtr.p->m_lookup_data.m_api_resultData;
-        TcKeyConf* conf = (TcKeyConf*)signal->getDataPtr();
-        conf->apiConnectPtr = RNIL;
-        conf->confInfo = 0;
-        conf->gci_hi = 0;
-        TcKeyConf::setNoOfOperations(conf->confInfo, 1);
-        conf->transId1 = requestPtr.p->m_transId[0];
-        conf->transId2 = requestPtr.p->m_transId[1];
-        conf->operations[0].apiOperationPtr = resultData;
-        conf->operations[0].attrInfoLen =
-          TcKeyConf::DirtyReadBit |getOwnNodeId();
-        sendTCKEYCONF(signal, TcKeyConf::StaticLength + 2, resultRef, requestPtr.p->m_senderRef);
-      }
-    }
-  }
-  else
-  {
-    jam();
-    switch(errCode){
-    case 626: // Row not found
-    case 899: // Interpreter_exit_nok
-      jam();
+        if (treeNodePtr.p->m_bits & TreeNode::T_UNIQUE_INDEX_LOOKUP)
+        {
+          /**
+           * If this is a "leaf" unique index lookup
+           *   emit extra TCKEYCONF as would have been done with ordinary
+           *   operation
+           */
+          LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
+          Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
+          Dependency_map::ConstDataBufferIterator it;
+          ndbrequire(list.first(it));
+          ndbrequire(list.getSize() == 1); // should only be 1 child
+          Ptr<TreeNode> childPtr;
+          m_treenode_pool.getPtr(childPtr, * it.data);
+          if (childPtr.p->m_bits & TreeNode::T_LEAF)
+          {
+            jam();
+            Uint32 resultRef = childPtr.p->m_lookup_data.m_api_resultRef;
+            Uint32 resultData = childPtr.p->m_lookup_data.m_api_resultData;
+            TcKeyConf* conf = (TcKeyConf*)signal->getDataPtr();
+            conf->apiConnectPtr = RNIL;
+            conf->confInfo = 0;
+            conf->gci_hi = 0;
+            TcKeyConf::setNoOfOperations(conf->confInfo, 1);
+            conf->transId1 = requestPtr.p->m_transId[0];
+            conf->transId2 = requestPtr.p->m_transId[1];
+            conf->operations[0].apiOperationPtr = resultData;
+            conf->operations[0].attrInfoLen =
+              TcKeyConf::DirtyReadBit |getOwnNodeId();
+            sendTCKEYCONF(signal, TcKeyConf::StaticLength + 2, resultRef, requestPtr.p->m_senderRef);
+          }
+        }
+      } // isLookup()
       break;
-    default:
+
+    default: // 'Hard error' : abort query
       jam();
       abort(signal, requestPtr, errCode);
     }
   }
 
-  Uint32 cnt = 2;
-  if (treeNodePtr.p->isLeaf())  // Can't be a lookup-Leaf, asserted above
-    cnt = 1;
-
+  Uint32 cnt = (treeNodePtr.p->isLeaf()) ? 1 : 2;
   ndbassert(requestPtr.p->m_lookup_node_data[Tnode] >= cnt);
   requestPtr.p->m_lookup_node_data[Tnode] -= cnt;
 
@@ -3291,7 +3389,7 @@ Dbspj::lookup_parent_row(Signal* signal,
    *   2) compute hash     (normally TC)
    *   3) get node for row (normally TC)
    */
-  Uint32 err;
+  Uint32 err = 0;
   const LqhKeyReq* src = (LqhKeyReq*)treeNodePtr.p->m_lookup_data.m_lqhKeyReq;
   const Uint32 tableId = LqhKeyReq::getTableId(src->tableSchemaVersion);
   const Uint32 corrVal = rowRef.m_src_correlation;
@@ -3300,6 +3398,22 @@ Dbspj::lookup_parent_row(Signal* signal,
 
   do
   {
+    /**
+     * Test execution terminated due to 'OutOfQueryMemory' which
+     * may happen multiple places below:
+     * - 17040: Fail on any lookup_parent_row()
+     * - 17041: Fail on lookup_parent_row() if 'isLeaf'
+     * - 17042: Fail on lookup_parent_row() if treeNode not root 
+     */
+    if (ERROR_INSERTED_CLEAR(17040) ||
+        (treeNodePtr.p->isLeaf() && ERROR_INSERTED_CLEAR(17041)) ||
+        (treeNodePtr.p->m_parentPtrI != RNIL && ERROR_INSERTED_CLEAR(17042)))
+    {
+      jam();
+      err = DbspjErr::OutOfQueryMemory;
+      break;
+    }
+
     Uint32 ptrI = RNIL;
     if (treeNodePtr.p->m_bits & TreeNode::T_KEYINFO_CONSTRUCTED)
     {
@@ -3471,7 +3585,10 @@ Dbspj::lookup_parent_row(Signal* signal,
     return;
   } while (0);
 
-  ndbrequire(false);
+  // If we fail it will always be a 'hard error' -> abort
+  ndbrequire(err);
+  jam();
+  abort(signal, requestPtr, err);
 }
 
 void
@@ -3691,9 +3808,14 @@ Dbspj::computePartitionHash(Signal* sign
       const KeyDescriptor::KeyAttr& keyAttr = desc->keyAttr[i];
       if (AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
       {
+        Uint32 attrLen =
         xfrm_attr(keyAttr.attributeDescriptor, keyAttr.charsetInfo,
                   src, srcPos, dst, dstPos,
                   NDB_ARRAY_SIZE(signal->theData) - 24);
+        if (unlikely(attrLen == 0))
+        {
+          return 290;  // 'Corrupt key in TC, unable to xfrm'
+        }
       }
     }
     tmp64 = (Uint64*)dst;
@@ -3743,10 +3865,6 @@ Dbspj::getNodes(Signal* signal, BuildKey
   return 0;
 
 error:
-  /**
-   * TODO handle error
-   */
-  ndbrequire(false);
   return err;
 }
 
@@ -3818,10 +3936,19 @@ Dbspj::scanFrag_build(Build_context& ctx
 
     treeNodePtr.p->m_scanfrag_data.m_scanFragHandlePtrI = RNIL;
     Ptr<ScanFragHandle> scanFragHandlePtr;
+    if (ERROR_INSERTED(17004))
+    {
+      ndbout_c("Injecting OutOfQueryMemory error 17004 at line %d file %s",
+               __LINE__,  __FILE__);
+      jam();
+      err = DbspjErr::OutOfQueryMemory;
+      break;
+    }
     if (unlikely(m_scanfraghandle_pool.seize(requestPtr.p->m_arena,
                                              scanFragHandlePtr) != true))
     {
       err = DbspjErr::OutOfQueryMemory;
+      jam();
       break;
     }
 
@@ -4072,19 +4199,22 @@ Dbspj::scanFrag_execTRANSID_AI(Signal* s
   jam();
   treeNodePtr.p->m_scanfrag_data.m_rows_received++;
 
-  LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
-  Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
-  Dependency_map::ConstDataBufferIterator it;
-
   {
+    LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
+    Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
+    Dependency_map::ConstDataBufferIterator it;
+
     for (list.first(it); !it.isNull(); list.next(it))
     {
-      jam();
-      Ptr<TreeNode> childPtr;
-      m_treenode_pool.getPtr(childPtr, * it.data);
-      ndbrequire(childPtr.p->m_info != 0&&childPtr.p->m_info->m_parent_row!=0);
-      (this->*(childPtr.p->m_info->m_parent_row))(signal,
-                                                  requestPtr, childPtr,rowRef);
+      if (likely(requestPtr.p->m_state & Request::RS_RUNNING))
+      {
+        jam();
+        Ptr<TreeNode> childPtr;
+        m_treenode_pool.getPtr(childPtr, * it.data);
+        ndbrequire(childPtr.p->m_info!=0 && childPtr.p->m_info->m_parent_row!=0);
+        (this->*(childPtr.p->m_info->m_parent_row))(signal,
+                                                    requestPtr, childPtr,rowRef);
+      }
     }
   }
 
@@ -4646,128 +4776,153 @@ Dbspj::execDIH_SCAN_TAB_CONF(Signal* sig
   // the same subset of frags fram all SPJ requests in case of
   // the scan not being ' T_SCAN_PARALLEL'
   Uint16 fragNoOffs = requestPtr.p->m_rootFragId % fragCount;
+  Uint32 err = 0;
 
-  Ptr<ScanFragHandle> fragPtr;
-  Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
-  if (likely(m_scanfraghandle_pool.seize(requestPtr.p->m_arena, fragPtr)))
-  {
-    jam();
-    fragPtr.p->init(fragNoOffs);
-    fragPtr.p->m_treeNodePtrI = treeNodePtr.i;
-    list.addLast(fragPtr);
-  }
-  else
+  do
   {
-    jam();
-    goto error1;
-  }
+    Ptr<ScanFragHandle> fragPtr;
+    Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
 
-  if (treeNodePtr.p->m_bits & TreeNode::T_CONST_PRUNE)
-  {
-    jam();
+    if (ERROR_INSERTED_CLEAR(17012))
+    {
+      jam();
+      ndbout_c("Injecting OutOfQueryMemory error 17012 at line %d file %s",
+               __LINE__,  __FILE__);
+      err = DbspjErr::OutOfQueryMemory;
+      break;
+    }
 
-    // TODO we need a different variant of computeHash here,
-    // since m_constPrunePtrI does not contain full primary key
-    // but only parts in distribution key
+    if (likely(m_scanfraghandle_pool.seize(requestPtr.p->m_arena, fragPtr)))
+    {
+      jam();
+      fragPtr.p->init(fragNoOffs);
+      fragPtr.p->m_treeNodePtrI = treeNodePtr.i;
+      list.addLast(fragPtr);
+    }
+    else
+    {
+      jam();
+      err = DbspjErr::OutOfQueryMemory;
+      break;
+    }
 
-    BuildKeyReq tmp;
-    Uint32 indexId = dst->tableId;
-    Uint32 tableId = g_key_descriptor_pool.getPtr(indexId)->primaryTableId;
-    Uint32 err = computePartitionHash(signal, tmp, tableId, data.m_constPrunePtrI);
-    if (unlikely(err != 0))
-      goto error;
+    if (treeNodePtr.p->m_bits & TreeNode::T_CONST_PRUNE)
+    {
+      jam();
 
-    releaseSection(data.m_constPrunePtrI);
-    data.m_constPrunePtrI = RNIL;
+      // TODO we need a different variant of computeHash here,
+      // since m_constPrunePtrI does not contain full primary key
+      // but only parts in distribution key
 
-    err = getNodes(signal, tmp, tableId);
-    if (unlikely(err != 0))
-      goto error;
+      BuildKeyReq tmp;
+      Uint32 indexId = dst->tableId;
+      Uint32 tableId = g_key_descriptor_pool.getPtr(indexId)->primaryTableId;
+      err = computePartitionHash(signal, tmp, tableId, data.m_constPrunePtrI);
+      if (unlikely(err != 0))
+      {
+        jam();
+        break;
+      }
 
-    fragPtr.p->m_fragId = tmp.fragId;
-    fragPtr.p->m_ref = tmp.receiverRef;
-    data.m_fragCount = 1;
-  }
-  else if (fragCount == 1)
-  {
-    jam();
-    /**
-     * This is roughly equivalent to T_CONST_PRUNE
-     *   pretend that it is const-pruned
-     */
-    if (treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN)
-    {
-      jam();
-      LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
-      Local_pattern_store pattern(pool, data.m_prunePattern);
-      pattern.release();
-    }
-    data.m_constPrunePtrI = RNIL;
-    Uint32 clear = TreeNode::T_PRUNE_PATTERN | TreeNode::T_SCAN_PARALLEL;
-    treeNodePtr.p->m_bits &= ~clear;
-    treeNodePtr.p->m_bits |= TreeNode::T_CONST_PRUNE;
+      releaseSection(data.m_constPrunePtrI);
+      data.m_constPrunePtrI = RNIL;
 
-    /**
-     * We must get fragPtr.p->m_ref...so set pruned=false
-     */
-    pruned = false;
-  }
-  else
-  {
-    for (Uint32 i = 1; i<fragCount; i++)
+      err = getNodes(signal, tmp, tableId);
+      if (unlikely(err != 0))
+      {
+        jam();
+        break;
+      }
+
+      fragPtr.p->m_fragId = tmp.fragId;
+      fragPtr.p->m_ref = tmp.receiverRef;
+      data.m_fragCount = 1;
+    }
+    else if (fragCount == 1)
     {
       jam();
-      Ptr<ScanFragHandle> fragPtr;
-      Uint16 fragNo = (fragNoOffs+i) % fragCount;
-      if (likely(m_scanfraghandle_pool.seize(requestPtr.p->m_arena, fragPtr)))
+      /**
+       * This is roughly equivalent to T_CONST_PRUNE
+       *   pretend that it is const-pruned
+       */
+      if (treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN)
       {
         jam();
-        fragPtr.p->init(fragNo);
-        fragPtr.p->m_treeNodePtrI = treeNodePtr.i;
-        list.addLast(fragPtr);
+        LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
+        Local_pattern_store pattern(pool, data.m_prunePattern);
+        pattern.release();
       }
-      else
+      data.m_constPrunePtrI = RNIL;
+      Uint32 clear = TreeNode::T_PRUNE_PATTERN | TreeNode::T_SCAN_PARALLEL;
+      treeNodePtr.p->m_bits &= ~clear;
+      treeNodePtr.p->m_bits |= TreeNode::T_CONST_PRUNE;
+
+      /**
+       * We must get fragPtr.p->m_ref...so set pruned=false
+       */
+      pruned = false;
+    }
+    else
+    {
+      for (Uint32 i = 1; i<fragCount; i++)
       {
-        goto error1;
+        jam();
+        Ptr<ScanFragHandle> fragPtr;
+        Uint16 fragNo = (fragNoOffs+i) % fragCount;
+        if (likely(m_scanfraghandle_pool.seize(requestPtr.p->m_arena, fragPtr)))
+        {
+          jam();
+          fragPtr.p->init(fragNo);
+          fragPtr.p->m_treeNodePtrI = treeNodePtr.i;
+          list.addLast(fragPtr);
+        }
+        else
+        {
+          jam();
+          err = DbspjErr::OutOfQueryMemory;
+          goto error;
+        }
       }
     }
-  }
-  data.m_frags_complete = data.m_fragCount;
+    data.m_frags_complete = data.m_fragCount;
 
-  if (!pruned)
-  {
-    jam();
-    Uint32 tableId = ((ScanFragReq*)data.m_scanFragReq)->tableId;
-    DihScanGetNodesReq * req = (DihScanGetNodesReq*)signal->getDataPtrSend();
-    req->senderRef = reference();
-    req->tableId = tableId;
-    req->scanCookie = cookie;
+    if (!pruned)
+    {
+      jam();
+      Uint32 tableId = ((ScanFragReq*)data.m_scanFragReq)->tableId;
+      DihScanGetNodesReq * req = (DihScanGetNodesReq*)signal->getDataPtrSend();
+      req->senderRef = reference();
+      req->tableId = tableId;
+      req->scanCookie = cookie;
 
-    Uint32 cnt = 0;
-    for (list.first(fragPtr); !fragPtr.isNull(); list.next(fragPtr))
+      Uint32 cnt = 0;
+      for (list.first(fragPtr); !fragPtr.isNull(); list.next(fragPtr))
+      {
+        jam();
+        req->senderData = fragPtr.i;
+        req->fragId = fragPtr.p->m_fragId;
+        sendSignal(DBDIH_REF, GSN_DIH_SCAN_GET_NODES_REQ, signal,
+                   DihScanGetNodesReq::SignalLength, JBB);
+        cnt++;
+      }
+      data.m_frags_outstanding = cnt;
+      requestPtr.p->m_outstanding++;
+    }
+    else
     {
       jam();
-      req->senderData = fragPtr.i;
-      req->fragId = fragPtr.p->m_fragId;
-      sendSignal(DBDIH_REF, GSN_DIH_SCAN_GET_NODES_REQ, signal,
-                 DihScanGetNodesReq::SignalLength, JBB);
-      cnt++;
+      treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
     }
-    data.m_frags_outstanding = cnt;
-    requestPtr.p->m_outstanding++;
-  }
-  else
-  {
-    jam();
-    treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
-  }
-  checkPrepareComplete(signal, requestPtr, 1);
+    checkPrepareComplete(signal, requestPtr, 1);
 
-  return;
+    return;
+  } while (0);
 
-error1:
 error:
-  ndbrequire(false);
+  ndbrequire(requestPtr.p->isScan());
+  ndbrequire(requestPtr.p->m_outstanding >= 1);
+  requestPtr.p->m_outstanding -= 1;
+  abort(signal, requestPtr, err);
 }
 
 void
@@ -4825,7 +4980,7 @@ Dbspj::scanIndex_findFrag(Local_ScanFrag
     }
   }
 
-  return 99; // TODO
+  return DbspjErr::IndexFragNotFound;
 }
 
 void
@@ -4881,16 +5036,12 @@ Dbspj::scanIndex_parent_row(Signal* sign
         return;  // Bailout, SCANREQ would have returned 0 rows anyway
       }
 
-      // TODO we need a different variant of computeHash here,
-      // since pruneKeyPtrI does not contain full primary key
-      // but only parts in distribution key
-
       BuildKeyReq tmp;
       ScanFragReq * dst = (ScanFragReq*)data.m_scanFragReq;
       Uint32 indexId = dst->tableId;
       Uint32 tableId = g_key_descriptor_pool.getPtr(indexId)->primaryTableId;
       err = computePartitionHash(signal, tmp, tableId, pruneKeyPtrI);
-      releaseSection(pruneKeyPtrI); // see ^ TODO
+      releaseSection(pruneKeyPtrI);
       if (unlikely(err != 0))
       {
         DEBUG_CRASH();
@@ -4938,6 +5089,27 @@ Dbspj::scanIndex_parent_row(Signal* sign
     {
       jam();
       Local_pattern_store pattern(pool, treeNodePtr.p->m_keyPattern);
+
+     /**
+     * Test execution terminated due to 'OutOfSectionMemory':
+     * - 17060: Fail on scanIndex_parent_row at first call
+     * - 17061: Fail on scanIndex_parent_row if 'isLeaf'
+     * - 17062: Fail on scanIndex_parent_row if treeNode not root
+     * - 17063: Fail on scanIndex_parent_row at a random node of the query tree
+     * - 
+     */
+
+      if (ERROR_INSERTED(17060) ||
+          (rand() % 7) == 0 && ERROR_INSERTED(17061) ||
+          (treeNodePtr.p->isLeaf() &&  ERROR_INSERTED(17062)) ||
+          (treeNodePtr.p->m_parentPtrI != RNIL &&  ERROR_INSERTED(17063)))
+      {
+        ndbout_c("Injecting OutOfSectionMemory error at line %d file %s",
+                 __LINE__,  __FILE__);
+        err = DbspjErr::OutOfSectionMemory;
+        break;
+      }
+
       err = expand(ptrI, pattern, rowRef, hasNull);
       if (unlikely(err != 0))
       {
@@ -4968,7 +5140,9 @@ Dbspj::scanIndex_parent_row(Signal* sign
     return;
   } while (0);
 
-  ndbrequire(false);
+  ndbrequire(err);
+  jam();
+  abort(signal, requestPtr, err);
 }
 
 
@@ -5405,19 +5579,22 @@ Dbspj::scanIndex_execTRANSID_AI(Signal* 
 {
   jam();
 
-  LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
-  Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
-  Dependency_map::ConstDataBufferIterator it;
-
   {
+    LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
+    Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
+    Dependency_map::ConstDataBufferIterator it;
+
     for (list.first(it); !it.isNull(); list.next(it))
     {
-      jam();
-      Ptr<TreeNode> childPtr;
-      m_treenode_pool.getPtr(childPtr, * it.data);
-      ndbrequire(childPtr.p->m_info != 0&&childPtr.p->m_info->m_parent_row!=0);
-      (this->*(childPtr.p->m_info->m_parent_row))(signal,
-                                                  requestPtr, childPtr,rowRef);
+      if (likely(requestPtr.p->m_state & Request::RS_RUNNING))
+      {
+        jam();
+        Ptr<TreeNode> childPtr;
+        m_treenode_pool.getPtr(childPtr, * it.data);
+        ndbrequire(childPtr.p->m_info != 0&&childPtr.p->m_info->m_parent_row!=0);
+        (this->*(childPtr.p->m_info->m_parent_row))(signal,
+                                                    requestPtr, childPtr,rowRef);
+      }
     }
   }
 
@@ -6062,7 +6239,8 @@ Dbspj::scanIndex_release_rangekeys(Ptr<R
   else
   {
     jam();
-    list.first(fragPtr);
+    if (!list.first(fragPtr))
+      return;
     if (fragPtr.p->m_rangePtrI != RNIL)
     {
       releaseSection(fragPtr.p->m_rangePtrI);
@@ -6251,8 +6429,15 @@ Dbspj::appendToPattern(Local_pattern_sto
   if (unlikely(tree.ptr + len > tree.end))
     return DbspjErr::InvalidTreeNodeSpecification;
 
+  if (ERROR_INSERTED(17008))
+  {
+    ndbout_c("Injecting OutOfQueryMemory error 17008 at line %d file %s",
+             __LINE__,  __FILE__);
+    jam();
+    return DbspjErr::OutOfQueryMemory;
+  }
   if (unlikely(pattern.append(tree.ptr, len)==0))
-    return  DbspjErr::OutOfQueryMemory;
+    return DbspjErr::OutOfQueryMemory;
 
   tree.ptr += len;
   return 0;
@@ -6271,6 +6456,15 @@ Dbspj::appendParamToPattern(Local_patter
   Uint32 len = AttributeHeader::getDataSize(* ptr ++);
   /* Param COL's converted to DATA when appended to pattern */
   Uint32 info = QueryPattern::data(len);
+
+  if (ERROR_INSERTED(17009))
+  {
+    ndbout_c("Injecting OutOfQueryMemory error 17009 at line %d file %s",
+             __LINE__,  __FILE__);
+    jam();
+    return DbspjErr::OutOfQueryMemory;
+  }
+
   return dst.append(&info,1) && dst.append(ptr,len) ? 0 : DbspjErr::OutOfQueryMemory;
 }
 
@@ -6287,6 +6481,15 @@ Dbspj::appendParamHeadToPattern(Local_pa
   Uint32 len = AttributeHeader::getDataSize(*ptr);
   /* Param COL's converted to DATA when appended to pattern */
   Uint32 info = QueryPattern::data(len+1);
+
+  if (ERROR_INSERTED(17010))
+  {
+    ndbout_c("Injecting OutOfQueryMemory error 17010 at line %d file %s",
+             __LINE__,  __FILE__);
+    jam();
+    return DbspjErr::OutOfQueryMemory;
+  }
+
   return dst.append(&info,1) && dst.append(ptr,len+1) ? 0 : DbspjErr::OutOfQueryMemory;
 }
 
@@ -6303,16 +6506,16 @@ Dbspj::appendTreeToSection(Uint32 & ptrI
   {
     jam();
     tree.getWords(tmp, SZ);
-    ndbrequire(appendToSection(ptrI, tmp, SZ));
+    if (!appendToSection(ptrI, tmp, SZ))
+      return DbspjErr::OutOfSectionMemory;
     len -= SZ;
   }
 
   tree.getWords(tmp, len);
-  return appendToSection(ptrI, tmp, len) ? 0 : /** todo error code */ 1;
-#if TODO
-err:
-  return 1;
-#endif
+  if (!appendToSection(ptrI, tmp, len))
+    return DbspjErr::OutOfSectionMemory;
+
+  return 0;
 }
 
 void
@@ -6380,9 +6583,6 @@ Dbspj::appendColToSection(Uint32 & dst, 
                           Uint32 col, bool& hasNull)
 {
   jam();
-  /**
-   * TODO handle errors
-   */
   Uint32 offset = row.m_header->m_offset[col];
   const Uint32 * ptr = row.m_data + offset;
   Uint32 len = AttributeHeader::getDataSize(* ptr ++);
@@ -6392,7 +6592,7 @@ Dbspj::appendColToSection(Uint32 & dst, 
     hasNull = true;  // NULL-value in key
     return 0;
   }
-  return appendToSection(dst, ptr, len) ? 0 : DbspjErr::InvalidPattern;
+  return appendToSection(dst, ptr, len) ? 0 : DbspjErr::OutOfSectionMemory;
 }
 
 Uint32
@@ -6400,9 +6600,6 @@ Dbspj::appendAttrinfoToSection(Uint32 & 
                                Uint32 col, bool& hasNull)
 {
   jam();
-  /**
-   * TODO handle errors
-   */
   Uint32 offset = row.m_header->m_offset[col];
   const Uint32 * ptr = row.m_data + offset;
   Uint32 len = AttributeHeader::getDataSize(* ptr);
@@ -6411,7 +6608,7 @@ Dbspj::appendAttrinfoToSection(Uint32 & 
     jam();
     hasNull = true;  // NULL-value in key
   }
-  return appendToSection(dst, ptr, 1 + len) ? 0 : DbspjErr::InvalidPattern;
+  return appendToSection(dst, ptr, 1 + len) ? 0 : DbspjErr::OutOfSectionMemory;
 }
 
 Uint32
@@ -6635,7 +6832,7 @@ Dbspj::appendDataToSection(Uint32 & ptrI
       if (!appendToSection(ptrI, tmp, dstIdx))
       {
         DEBUG_CRASH();
-        return DbspjErr::InvalidPattern;
+        return DbspjErr::OutOfSectionMemory;
       }
       dstIdx = 0;
     }
@@ -6643,7 +6840,7 @@ Dbspj::appendDataToSection(Uint32 & ptrI
   if (remaining > 0)
   {
     DEBUG_CRASH();
-    return DbspjErr::InvalidPattern;
+    return DbspjErr::OutOfSectionMemory;
   }
   else
   {
@@ -6983,6 +7180,28 @@ Dbspj::parseDA(Build_context& ctx,
 
   do
   {
+     /**
+     * Test execution terminated due to 'OutOfSectionMemory' which
+     * may happen multiple places (eg. appendtosection, expand) below:
+     * - 17050: Fail on parseDA at first call
+     * - 17051: Fail on parseDA if 'isLeaf'
+     * - 17052: Fail on parseDA if treeNode not root
+     * - 17053: Fail on parseDA at a random node of the query tree
+     * -
+     */
+
+     if (ERROR_INSERTED(17050) ||
+        (treeNodePtr.p->isLeaf() &&  ERROR_INSERTED(17051)) ||
+        (treeNodePtr.p->m_parentPtrI != RNIL &&  ERROR_INSERTED(17052)) ||
+	 (rand() % 7) == 0 && ERROR_INSERTED(17053))
+    {
+      ndbout_c("Injecting OutOfSectionMemory error at line %d file %s",
+                __LINE__,  __FILE__);
+      jam();
+      err = DbspjErr::OutOfSectionMemory;
+      break;
+    }
+
     if (treeBits & DABits::NI_REPEAT_SCAN_RESULT)
     {
       jam();
@@ -7011,16 +7230,16 @@ Dbspj::parseDA(Build_context& ctx,
         break;
       }
 
-      err = 0;
-
       if (unlikely(cnt!=1))
       {
         /**
          * Only a single parent supported for now, i.e only trees
          */
         DEBUG_CRASH();
+        break;
       }
 
+      err = 0;
       for (Uint32 i = 0; i<cnt; i++)
       {
         DEBUG("adding " << dst[i] << " as parent");

=== modified file 'storage/ndb/src/kernel/blocks/pgman.cpp'
--- a/storage/ndb/src/kernel/blocks/pgman.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/pgman.cpp	2012-03-05 13:04:02 +0000
@@ -27,7 +27,7 @@
 #include <dbtup/Dbtup.hpp>
 
 #include <DebuggerNames.hpp>
-#include <sha1.h>
+#include <md5_hash.hpp>
 
 /**
  * Requests that make page dirty
@@ -2629,21 +2629,13 @@ operator<<(NdbOut& out, Ptr<Pgman::Page_
     if (pe.m_state & Pgman::Page_entry::MAPPED) {
       Ptr<GlobalPage> gptr;
       pe.m_this->m_global_page_pool.getPtr(gptr, pe.m_real_page_i);
-      SHA1_CONTEXT c;
-      uint8 digest[SHA1_HASH_SIZE];
-      mysql_sha1_reset(&c);
-      mysql_sha1_input(&c, (uchar*)gptr.p->data, sizeof(gptr.p->data));
-      mysql_sha1_result(&c, digest);
-      char buf[100];
-      int i;
-      for (i = 0; i < 20; i++) {
-        const char* const hexdigit = "0123456789abcdef";
-        uint8 x = digest[i];
-        buf[2*i + 0] = hexdigit[x >> 4];
-        buf[2*i + 1] = hexdigit[x & 0xF];
-      }
-      buf[2*i] = 0;
-      out << " sha1=" << buf;
+      Uint32 hash_result[4];      
+      /* NOTE: Assuming "data" is 64 bit aligned as required by 'md5_hash' */
+      md5_hash(hash_result,
+               (Uint64*)gptr.p->data, sizeof(gptr.p->data)/sizeof(Uint32));
+      out.print(" md5=%08x%08x%08x%08x",
+                hash_result[0], hash_result[1],
+                hash_result[2], hash_result[3]);
     }
 #endif
   }

=== modified file 'storage/ndb/src/mgmsrv/ConfigManager.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigManager.cpp	2011-02-22 21:29:46 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigManager.cpp	2012-03-05 12:49:33 +0000
@@ -123,7 +123,8 @@ ConfigManager::find_nodeid_from_configdi
   BaseString config_name;
   NdbDir::Iterator iter;
 
-  if (iter.open(m_configdir) != 0)
+  if (!m_configdir ||
+      iter.open(m_configdir) != 0)
     return 0;
 
   const char* name;
@@ -2102,8 +2103,9 @@ ConfigManager::delete_saved_configs(void
 {
   NdbDir::Iterator iter;
 
-  if (iter.open(m_configdir) != 0)
-    return false;
+  if (!m_configdir ||
+      iter.open(m_configdir) != 0)
+    return 0;
 
   bool result = true;
   const char* name;
@@ -2144,8 +2146,9 @@ ConfigManager::saved_config_exists(BaseS
 {
   NdbDir::Iterator iter;
 
-  if (iter.open(m_configdir) != 0)
-    return false;
+  if (!m_configdir ||
+      iter.open(m_configdir) != 0)
+    return 0;
 
   const char* name;
   unsigned nodeid;
@@ -2182,8 +2185,9 @@ ConfigManager::failed_config_change_exis
 {
   NdbDir::Iterator iter;
 
-  if (iter.open(m_configdir) != 0)
-    return false;
+  if (!m_configdir ||
+      iter.open(m_configdir) != 0)
+    return 0;
 
   const char* name;
   char tmp;

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-12-15 17:19:26 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2012-03-05 12:51:16 +0000
@@ -333,8 +333,18 @@ MgmtSrvr::init()
   DBUG_ENTER("MgmtSrvr::init");
 
   const char* configdir;
-  if (!(configdir= check_configdir()))
-    DBUG_RETURN(false);
+
+  if (!m_opts.config_cache)
+  {
+    g_eventLogger->info("Skipping check of config directory since "
+                        "config cache is disabled.");
+    configdir = NULL;
+  }
+  else
+  {
+    if (!(configdir= check_configdir()))
+      DBUG_RETURN(false);
+  }
 
   if (!(m_config_manager= new ConfigManager(m_opts, configdir)))
   {
@@ -4311,6 +4321,7 @@ MgmtSrvr::show_variables(NdbOut& out)
   out << "no_nodeid_checks: " << yes_no(m_opts.no_nodeid_checks) << endl;
   out << "print_full_config: " << yes_no(m_opts.print_full_config) << endl;
   out << "configdir: " << str_null(m_opts.configdir) << endl;
+  out << "config_cache: " << yes_no(m_opts.config_cache) << endl;
   out << "verbose: " << yes_no(m_opts.verbose) << endl;
   out << "reload: " << yes_no(m_opts.reload) << endl;
 

=== modified file 'storage/ndb/src/ndbapi/NdbOperationSearch.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp	2012-03-05 13:04:02 +0000
@@ -34,7 +34,6 @@ Adjust:  971022  UABMNST   First version
 #include <AttributeHeader.hpp>
 #include <signaldata/TcKeyReq.hpp>
 #include <signaldata/KeyInfo.hpp>
-#include <md5_hash.hpp>
 
 /******************************************************************************
 CondIdType equal(const char* anAttrName, char* aValue, Uint32 aVarKeylen);

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2012-02-23 15:41:31 +0000
@@ -1040,12 +1040,15 @@ NdbQueryBuilder::scanIndex(const NdbDict
                 rootOrder == NdbQueryOptions::ScanOrdering_descending,
                 QRY_MULTIPLE_SCAN_SORTED);
 
-    // A child scan should not be sorted.
-    const NdbQueryOptions::ScanOrdering order =
-      options->getImpl().getOrdering();
-    returnErrIf(order == NdbQueryOptions::ScanOrdering_ascending ||
-                order == NdbQueryOptions::ScanOrdering_descending,
-                QRY_MULTIPLE_SCAN_SORTED);
+    if (options != NULL)
+    {
+      // A child scan should not be sorted.
+      const NdbQueryOptions::ScanOrdering order =
+        options->getImpl().getOrdering();
+      returnErrIf(order == NdbQueryOptions::ScanOrdering_ascending ||
+                  order == NdbQueryOptions::ScanOrdering_descending,
+                  QRY_MULTIPLE_SCAN_SORTED);
+    }
   }
 
   const NdbIndexImpl& indexImpl = NdbIndexImpl::getImpl(*index);

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-11-16 08:17:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-02-23 15:41:31 +0000
@@ -56,6 +56,7 @@ static const bool useDoubleBuffers = tru
 
 /* Various error codes that are not specific to NdbQuery. */
 static const int Err_TupleNotFound = 626;
+static const int Err_FalsePredicate = 899;
 static const int Err_MemoryAlloc = 4000;
 static const int Err_SendFailed = 4002;
 static const int Err_FunctionNotImplemented = 4003;
@@ -225,12 +226,24 @@ public:
 
   void incrOutstandingResults(Int32 delta)
   {
+    if (traceSignals) {
+      ndbout << "incrOutstandingResults: " << m_outstandingResults
+             << ", with: " << delta
+             << endl;
+    }
     m_outstandingResults += delta;
+    assert(!(m_confReceived && m_outstandingResults<0));
   }
 
-  void clearOutstandingResults()
+  void throwRemainingResults()
   {
+    if (traceSignals) {
+      ndbout << "throwRemainingResults: " << m_outstandingResults
+             << endl;
+    }
     m_outstandingResults = 0;
+    m_confReceived = true; 
+    postFetchRelease();
   }
 
   void setConfReceived(Uint32 tcPtrI);
@@ -4977,23 +4990,34 @@ NdbQueryOperationImpl::execTCKEYREF(cons
 
   NdbRootFragment& rootFrag = getQuery().m_rootFrags[0];
 
-  if (ref->errorCode != DbspjErr::NodeFailure)
+  /**
+   * Error may be either a 'soft' or a 'hard' error.
+   * 'Soft error' are regarded 'informational', and we are
+   * allowed to continue execution of the query. A 'hard error'
+   * will terminate query, close comminication, and further
+   * incomming signals to this NdbReceiver will be discarded.
+   */
+  switch (ref->errorCode)
   {
-    // Compensate for children results not produced.
-    // (doSend() assumed all child results to be materialized)
-    Uint32 cnt = 0;
-    cnt += 1; // self
+  case Err_TupleNotFound:  // 'Soft error' : Row not found
+  case Err_FalsePredicate: // 'Soft error' : Interpreter_exit_nok
+  {
+    /**
+     * Need to update 'outstanding' count:
+     * Compensate for children results not produced.
+     * (doSend() assumed all child results to be materialized)
+     */
+    Uint32 cnt = 1; // self
     cnt += getNoOfDescendantOperations();
     if (getNoOfChildOperations() > 0)
     {
       cnt += getNoOfLeafOperations();
     }
     rootFrag.incrOutstandingResults(- Int32(cnt));
+    break;
   }
-  else
-  {
-    // consider frag-batch complete
-    rootFrag.clearOutstandingResults();
+  default:                             // 'Hard error':
+    rootFrag.throwRemainingResults();  // Terminate receive -> complete
   }
 
   bool ret = false;
@@ -5004,7 +5028,6 @@ NdbQueryOperationImpl::execTCKEYREF(cons
 
   if (traceSignals) {
     ndbout << "NdbQueryOperationImpl::execTCKEYREF(): returns:" << ret
-           << ", resultStream= {" << rootFrag.getResultStream(*this) << "}"
            << ", *this=" << *this <<  endl;
   }
   return ret;
@@ -5283,10 +5306,6 @@ NdbOut& operator<<(NdbOut& out, const Nd
   }
   out << "  m_queryImpl: " << &op.m_queryImpl;
   out << "  m_operationDef: " << &op.m_operationDef;
-  for(Uint32 i = 0; i<op.m_queryImpl.getRootFragCount(); i++){
-    NdbRootFragment& rootFrag = op.m_queryImpl.m_rootFrags[i];
-    out << "  m_resultStream[" << i << "]{" << rootFrag.getResultStream(op) << "}";
-  }
   out << " m_isRowNull " << op.m_isRowNull;
   out << " ]";
   return out;

=== modified file 'storage/ndb/src/ndbapi/Ndbif.cpp'
--- a/storage/ndb/src/ndbapi/Ndbif.cpp	2011-09-09 13:33:52 +0000
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp	2012-02-23 15:41:31 +0000
@@ -350,6 +350,7 @@ Ndb::handleReceivedSignal(const NdbApiSi
       else
       {
         tCon = lookupTransactionFromOperation(keyConf);
+        if (tCon == NULL) goto InvalidSignal;
       }
       const BlockReference aTCRef = aSignal->theSendersBlockRef;
 

=== modified file 'storage/ndb/src/ndbapi/ndb_cluster_connection.cpp'
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp	2011-09-19 20:03:43 +0000
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp	2012-03-05 13:04:02 +0000
@@ -29,7 +29,6 @@
 #include <ndb_version.h>
 #include <mgmapi_debug.h>
 #include <mgmapi_internal.h>
-#include <md5_hash.hpp>
 #include "NdbImpl.hpp"
 #include "NdbDictionaryImpl.hpp"
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2012-01-28 10:11:10 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2012-03-05 09:38:49 +0000
@@ -98,6 +98,7 @@ static const char* empty_string = "";
  * 4700 - "" Event
  * 4800 - API, QueryBuilder
  * 5000 - Management server
+ * 20000 - SPJ
  */
 
 static
@@ -140,7 +141,30 @@ ErrorBundle ErrorCodes[] = {
     "Transaction was committed but all read information was not "
     "received due to node crash" },
   { 4119, DMEC, NR, "Simple/dirty read failed due to node failure" },
+
+  /**
+   * SPJ error codes
+   */ 
+
+  { 20000, DMEC, IS, "Query aborted due out of operation records" },
+  { 20001, DMEC, IE, "Query aborted due to empty query tree" },
+  { 20002, DMEC, IE, "Query aborted due to invalid request" },
+  { 20003, DMEC, IE, "Query aborted due to  unknown query operation" },
+  { 20004, DMEC, IE, "Query aborted due to invalid tree node specification" },
+  { 20005, DMEC, IE, "Query aborted due to invalid tree parameter specification" },
+  { 20006, DMEC, IS, "Query aborted due to out of section memory" },
+  { 20007, DMEC, IE, "Query aborted due to invalid pattern" },
+  { 20008, DMEC, IS, "Query aborted due to out of query memory" },
+  { 20009, DMEC, IE, "Query aborted due to query node too big" },
+  { 20010, DMEC, IE, "Query aborted due to query node parameters too big" },
+  { 20011, DMEC, IE, "Query aborted due to both tree and parameters contain interpreted program" },
+  { 20012, DMEC, IE, "Query aborted due to invalid tree parameter specification: Key parameter bits mismatch" },
+  { 20013, DMEC, IE, "Query aborted due to invalid tree parameter specification: Incorrect key parameter count" },
+  { 20014, DMEC, IE, "Query aborted due to internal error" },
+  { 20015, DMEC, IS, "Query aborted due to out of row memory" },
   { 20016, DMEC, NR, "Query aborted due to node failure" },
+  { 20017, DMEC, IE, "Query aborted due to invalid node count" },
+  { 20018, DMEC, IE, "Query aborted due to index fragment not found" },
   
   /**
    * Node shutdown

=== modified file 'storage/ndb/test/include/HugoQueryBuilder.hpp'
--- a/storage/ndb/test/include/HugoQueryBuilder.hpp	2011-10-20 19:52:11 +0000
+++ b/storage/ndb/test/include/HugoQueryBuilder.hpp	2012-03-05 09:38:49 +0000
@@ -60,9 +60,15 @@ public:
     /**
      * Query might table scan
      */
-    O_TABLE_SCAN = 0x20
+    O_TABLE_SCAN = 0x20,
+
+    /**
+     * Column referrences may also include grandparents (Default 'on')
+     */
+    O_GRANDPARENT = 0x100
   };
-  static const OptionMask OM_RANDOM_OPTIONS = (OptionMask)(O_PK_INDEX | O_UNIQUE_INDEX | O_ORDERED_INDEX | O_TABLE_SCAN);
+  static const OptionMask OM_RANDOM_OPTIONS = 
+       (OptionMask)(O_PK_INDEX | O_UNIQUE_INDEX | O_ORDERED_INDEX | O_TABLE_SCAN | O_GRANDPARENT);
 
   HugoQueryBuilder(Ndb* ndb, const NdbDictionary::Table**tabptr, 
                    OptionMask om = OM_RANDOM_OPTIONS){

=== modified file 'storage/ndb/test/ndbapi/testMgmd.cpp'
--- a/storage/ndb/test/ndbapi/testMgmd.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/ndbapi/testMgmd.cpp	2012-03-05 12:51:16 +0000
@@ -662,7 +662,41 @@ int runTestNoConfigCache(NDBT_Context* c
                                         "ndb_1_config.bin.1", 
                                         NULL).c_str());
   CHECK(bin_conf_file == false);
-  
+
+  mgmd->stop();
+  return NDBT_OK;
+}
+
+
+/* Test for BUG#13428853 */
+int runTestNoConfigCache_DontCreateConfigDir(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NDBT_Workingdir wd("test_mgmd"); // temporary working directory
+
+  g_err << "** Create config.ini" << endl;
+  Properties config = ConfigFactory::create();
+  CHECK(ConfigFactory::write_config_ini(config,
+                                        path(wd.path(),
+                                             "config.ini",
+                                             NULL).c_str()));
+
+  // Start ndb_mgmd  from config.ini
+  Mgmd* mgmd = new Mgmd(1);
+  CHECK(mgmd->start_from_config_ini(wd.path(),
+                                    "--skip-config-cache",
+                                    "--config-dir=dir37",
+                                    NULL));
+
+  // Connect the ndb_mgmd(s)
+  CHECK(mgmd->connect(config));
+
+  // wait for confirmed config
+  CHECK(mgmd->wait_confirmed_config());
+
+  // Check configdir not created
+  bool conf_dir_exist = file_exists(path(wd.path(), "dir37", NULL).c_str());
+  CHECK(conf_dir_exist == false);
+
   mgmd->stop();  
   return NDBT_OK;
 }  
@@ -1185,7 +1219,13 @@ TESTCASE("NoCfgCache",
 {
   INITIALIZER(runTestNoConfigCache);
 }
-
+TESTCASE("NoCfgCacheOrConfigDir",
+         "Test that when an mgmd is started with --skip-config-cache, "
+         "no ndb_xx_config.xx.bin file is created, but you can "
+         "connect to the mgm node and retrieve the config.")
+{
+  INITIALIZER(runTestNoConfigCache_DontCreateConfigDir);
+}
 TESTCASE("Bug45495",
          "Test that mgmd can be restarted in any order")
 {

=== modified file 'storage/ndb/test/ndbapi/testSpj.cpp'
--- a/storage/ndb/test/ndbapi/testSpj.cpp	2011-09-14 10:30:08 +0000
+++ b/storage/ndb/test/ndbapi/testSpj.cpp	2012-03-01 15:13:54 +0000
@@ -27,6 +27,13 @@
 #include <HugoQueries.hpp>
 #include <NdbSchemaCon.hpp>
 
+static int faultToInject = 0;
+
+enum faultsToInject {
+  FI_START = 17001,
+  FI_END = 17063
+};
+
 int
 runLoadTable(NDBT_Context* ctx, NDBT_Step* step)
 {
@@ -92,6 +99,66 @@ runLookupJoin(NDBT_Context* ctx, NDBT_St
 }
 
 int
+runLookupJoinError(NDBT_Context* ctx, NDBT_Step* step){
+  int loops = ctx->getNumLoops();
+  int joinlevel = ctx->getProperty("JoinLevel", 8);
+  int records = ctx->getNumRecords();
+  int until_stopped = ctx->getProperty("UntilStopped", (Uint32)0);
+  Uint32 stepNo = step->getStepNo();
+
+  int i = 0;
+  HugoQueryBuilder qb(GETNDB(step), ctx->getTab(), HugoQueryBuilder::O_LOOKUP);
+  qb.setJoinLevel(joinlevel);
+  const NdbQueryDef * query = qb.createQuery(GETNDB(step));
+  HugoQueries hugoTrans(*query);
+
+  NdbRestarter restarter;
+  int lookupFaults[] = {
+      17001, 17005, 17006, 17008,
+      17012, // testing abort in :execDIH_SCAN_TAB_CONF
+      17020, 17021, 17022, // lookup_send() encounter dead node -> NodeFailure
+      17030, 17031, 17032, // LQHKEYREQ reply is LQHKEYREF('Invalid..')
+      17040, 17041, 17042, // lookup_parent_row -> OutOfQueryMemory
+      17050, 17051, 17052, 17053, // parseDA -> outOfSectionMem
+      17060, 17061, 17062, 17063 // scanIndex_parent_row -> outOfSectionMem
+  }; 
+  loops =  faultToInject ? 1 : sizeof(lookupFaults)/sizeof(int);
+
+  while ((i<loops || until_stopped) && !ctx->isTestStopped())
+  {
+    g_info << i << ": ";
+
+    int inject_err = faultToInject ? faultToInject : lookupFaults[i];
+    int randomId = rand() % restarter.getNumDbNodes();
+    int nodeId = restarter.getDbNodeId(randomId);
+
+    ndbout << "LookupJoinError: Injecting error "<<  inject_err <<
+      " in node " << nodeId << " loop "<< i << endl;
+
+    if (restarter.insertErrorInNode(nodeId, inject_err) != 0)
+    {
+      ndbout << "Could not insert error in node "<< nodeId <<endl;
+      g_info << endl;
+      return NDBT_FAILED;
+    }
+
+    // It'd be better if test could differentiates failures from
+    // fault injection and others.
+    // We expect to fail, and it's a failure if we don't
+    if (!hugoTrans.runLookupQuery(GETNDB(step), records))
+    {
+      g_info << "LookUpJoinError didn't fail as expected."<< endl;
+      // return NDBT_FAILED;
+    }
+
+    addMask(ctx, (1 << stepNo), "Running");
+    i++;
+  }
+  g_info << endl;
+  return NDBT_OK;
+}
+
+int
 runScanJoin(NDBT_Context* ctx, NDBT_Step* step){
   int loops = ctx->getNumLoops();
   int joinlevel = ctx->getProperty("JoinLevel", 3);
@@ -119,6 +186,65 @@ runScanJoin(NDBT_Context* ctx, NDBT_Step
 }
 
 int
+runScanJoinError(NDBT_Context* ctx, NDBT_Step* step){
+  int loops = ctx->getNumLoops();
+  int joinlevel = ctx->getProperty("JoinLevel", 3);
+  int until_stopped = ctx->getProperty("UntilStopped", (Uint32)0);
+  Uint32 stepNo = step->getStepNo();
+
+  int i = 0;
+  HugoQueryBuilder qb(GETNDB(step), ctx->getTab(), HugoQueryBuilder::O_SCAN);
+  qb.setJoinLevel(joinlevel);
+  const NdbQueryDef * query = qb.createQuery(GETNDB(step));
+  HugoQueries hugoTrans(* query);
+
+  NdbRestarter restarter;
+  int scanFaults[] = {
+      17002, 17004, 17005, 17006, 17008,
+      17012, // testing abort in :execDIH_SCAN_TAB_CONF
+      17020, 17021, 17022, // lookup_send() encounter dead node -> NodeFailure
+      17030, 17031, 17032, // LQHKEYREQ reply is LQHKEYREF('Invalid..')
+      17040, 17041, 17042, // lookup_parent_row -> OutOfQueryMemory
+      17050, 17051, 17052, 17053, // parseDA -> outOfSectionMem
+      17060, 17061, 17062, 17063 // scanIndex_parent_row -> outOfSectionMem
+  }; 
+  loops =  faultToInject ? 1 : sizeof(scanFaults)/sizeof(int);
+
+  while ((i<loops || until_stopped) && !ctx->isTestStopped())
+  {
+    g_info << i << ": ";
+
+    int inject_err = faultToInject ? faultToInject : scanFaults[i];
+    int randomId = rand() % restarter.getNumDbNodes();
+    int nodeId = restarter.getDbNodeId(randomId);
+
+    ndbout << "ScanJoin: Injecting error "<<  inject_err <<
+              " in node " << nodeId << " loop "<< i<< endl;
+
+    if (restarter.insertErrorInNode(nodeId, inject_err) != 0)
+    {
+      ndbout << "Could not insert error in node "<< nodeId <<endl;
+      return NDBT_FAILED;
+    }
+
+    // It'd be better if test could differentiates failures from
+    // fault injection and others.
+    // We expect to fail, and it's a failure if we don't
+    if (!hugoTrans.runScanQuery(GETNDB(step)))
+    {
+      g_info << "ScanJoinError didn't fail as expected."<< endl;
+      // return NDBT_FAILED;
+    }
+
+    addMask(ctx, (1 << stepNo), "Running");
+    i++;
+  }
+
+  g_info << endl;
+  return NDBT_OK;
+}
+
+int
 runJoin(NDBT_Context* ctx, NDBT_Step* step){
   int loops = ctx->getNumLoops();
   int joinlevel = ctx->getProperty("JoinLevel", 3);
@@ -1229,12 +1355,36 @@ TESTCASE("NF_Join", ""){
   STEP(runRestarter);
   FINALIZER(runClearTable);
 }
+
+TESTCASE("LookupJoinError", ""){
+  INITIALIZER(runLoadTable);
+  STEP(runLookupJoinError);
+  VERIFIER(runClearTable);
+}
+TESTCASE("ScanJoinError", ""){
+  INITIALIZER(runLoadTable);
+  TC_PROPERTY("NodeNumber", 2);
+  STEP(runScanJoinError);
+  FINALIZER(runClearTable);
+}
 NDBT_TESTSUITE_END(testSpj);
 
 
 int main(int argc, const char** argv){
   ndb_init();
+
+  /* To inject a single fault, for testing fault injection.
+     Add the required fault number at the end
+     of the command line. */
+
+  if (argc > 0) sscanf(argv[argc-1], "%d",  &faultToInject);
+  if (faultToInject && (faultToInject < FI_START || faultToInject > FI_END))
+  {
+    ndbout_c("Illegal fault to inject: %d. Legal range is between %d and %d",
+             faultToInject, FI_START, FI_END);
+    exit(1);
+  }
+
   NDBT_TESTSUITE_INSTANCE(testSpj);
   return testSpj.execute(argc, argv);
 }
-

=== modified file 'storage/ndb/test/run-test/CMakeLists.txt'
--- a/storage/ndb/test/run-test/CMakeLists.txt	2011-12-09 09:25:48 +0000
+++ b/storage/ndb/test/run-test/CMakeLists.txt	2012-02-23 13:55:39 +0000
@@ -48,4 +48,5 @@ INSTALL(FILES   daily-basic-tests.txt da
                 conf-techra29.cnf conf-test.cnf conf-tyr64.cnf conf-upgrade.cnf
                 test-tests.txt upgrade-tests.txt release-bigmem-tests.txt
                 conf-tyr13.cnf
+		conf-blade08.cnf
         DESTINATION mysql-test/ndb)

=== modified file 'storage/ndb/test/src/HugoQueryBuilder.cpp'
--- a/storage/ndb/test/src/HugoQueryBuilder.cpp	2011-11-16 08:17:17 +0000
+++ b/storage/ndb/test/src/HugoQueryBuilder.cpp	2012-03-05 09:38:49 +0000
@@ -66,6 +66,7 @@ HugoQueryBuilder::fixOptions()
   setOption(O_UNIQUE_INDEX);
   setOption(O_TABLE_SCAN);
   setOption(O_ORDERED_INDEX);
+  setOption(O_GRANDPARENT);
   if (testOption(O_LOOKUP))
   {
     clearOption(O_TABLE_SCAN);
@@ -326,11 +327,10 @@ HugoQueryBuilder::getParents(OpIdx oi)
       continue;
     set.push_back(op);
 
-#if 0
     /**
-     * add parents
+     * Also add grandparents
      */
-    if (testOption(O_MULTI_PARENT))
+    if (testOption(O_GRANDPARENT))
     {
       while (op.m_parent != -1)
       {
@@ -338,7 +338,6 @@ HugoQueryBuilder::getParents(OpIdx oi)
         set.push_back(op);
       }
     }
-#endif
 
     if (checkBindable(cols, set, allow_bind_nullable))
       return set;
@@ -477,11 +476,17 @@ HugoQueryBuilder::createOp(NdbQueryBuild
 loop:
     OpIdx oi = getOp();
     Vector<Op> parents = getParents(oi);
+    NdbQueryOptions options;
     if (parents.size() == 0)
     {
       // no possible parents found for pTab...try another
       goto loop;
     }
+    if (parents.size() > 1)
+    {
+      // We have grandparents, 'parents[0]' is real parent
+      options.setParent(parents[0].m_op);
+    }
     switch(oi.m_type){
     case NdbQueryOperationDef::PrimaryKeyAccess:{
       int opNo = 0;
@@ -497,7 +502,7 @@ loop:
       operands[opNo] = 0;
 
       op.m_parent = parents[0].m_idx;
-      op.m_op = builder.readTuple(oi.m_table, operands);
+      op.m_op = builder.readTuple(oi.m_table, operands, &options);
       break;
     }
     case NdbQueryOperationDef::UniqueIndexAccess: {
@@ -513,7 +518,7 @@ loop:
       operands[opNo] = 0;
 
       op.m_parent = parents[0].m_idx;
-      op.m_op = builder.readTuple(oi.m_index, oi.m_table, operands);
+      op.m_op = builder.readTuple(oi.m_index, oi.m_table, operands, &options);
       break;
     }
     case NdbQueryOperationDef::TableScan:
@@ -533,7 +538,7 @@ loop:
 
       op.m_parent = parents[0].m_idx;
       NdbQueryIndexBound bounds(operands); // Only EQ for now
-      op.m_op = builder.scanIndex(oi.m_index, oi.m_table, &bounds);
+      op.m_op = builder.scanIndex(oi.m_index, oi.m_table, &bounds, &options);
       if (op.m_op == 0)
       {
         ndbout << "Failed to add to " << endl;

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk-cluster branch (magnus.blaudd:3434 to 3435) magnus.blaudd5 Mar