List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:June 25 2012 1:14pm
Subject:bzr push into mysql-5.5-cluster-7.3 branch (magnus.blaudd:3893 to 3894)
View as plain text  
 3894 magnus.blaudd@stripped	2012-06-25 [merge]
      Merge 7.2 -> 7.3

    modified:
      mysql-test/suite/ndb/r/ndb_alter_table_online.result
      mysql-test/suite/ndb/r/ndb_index_stat_restart.result
      mysql-test/suite/ndb/r/ndb_reconnect.result
      mysql-test/suite/ndb/t/ndb_alter_table_online.test
      mysql-test/suite/ndb/t/ndb_reconnect.test
      mysql-test/suite/ndb_big/my.cnf
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ndb_thd_ndb.h
      sql/sql_show.cc
      sql/sql_show.h
      sql/sql_table.cc
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/kernel/signaldata/CreateTable.hpp
      storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
      storage/ndb/include/kernel/signaldata/ScanTab.hpp
      storage/ndb/include/ndb_version.h.in
      storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp
      storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
      storage/ndb/src/kernel/blocks/suma/Suma.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbScanOperation.cpp
      storage/ndb/test/include/NdbBackup.hpp
      storage/ndb/test/include/NdbConfig.hpp
      storage/ndb/test/ndbapi/testBackup.cpp
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/ndbapi/testSystemRestart.cpp
      storage/ndb/test/ndbapi/testTimeout.cpp
      storage/ndb/test/ndbapi/testUpgrade.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      support-files/mysql.spec.sh
 3893 magnus.blaudd@stripped	2012-06-13 [merge]
      Merge 7.2 -> 7.3

    removed:
      mysql-test/suite/innodb/include/innodb-index.inc
      mysql-test/t/cache_innodb-master.opt
    added:
      mysql-test/include/show_slave_hosts.inc
      mysql-test/r/blackhole.result
      mysql-test/r/bug12427262.result
      mysql-test/r/partition_cache.result
      mysql-test/suite/innodb/r/innodb_bug11766634.result
      mysql-test/suite/innodb/r/innodb_bug12902967.result
      mysql-test/suite/innodb/r/innodb_bug13635833.result
      mysql-test/suite/innodb/r/innodb_bug14007649.result
      mysql-test/suite/innodb/t/innodb_bug11766634-master.opt
      mysql-test/suite/innodb/t/innodb_bug11766634.test
      mysql-test/suite/innodb/t/innodb_bug12902967.test
      mysql-test/suite/innodb/t/innodb_bug13635833.test
      mysql-test/suite/innodb/t/innodb_bug14007649.test
      mysql-test/suite/rpl/r/rpl_auto_increment_bug45679.result
      mysql-test/suite/rpl/r/rpl_parallel_show_binlog_events_purge_logs.result
      mysql-test/suite/rpl/r/rpl_report_port.result
      mysql-test/suite/rpl/r/rpl_row_merge_engine.result
      mysql-test/suite/rpl/r/rpl_stm_relay_ign_space.result
      mysql-test/suite/rpl/t/rpl_auto_increment_bug45679.test
      mysql-test/suite/rpl/t/rpl_parallel_show_binlog_events_purge_logs.test
      mysql-test/suite/rpl/t/rpl_report_port-master.opt
      mysql-test/suite/rpl/t/rpl_report_port.test
      mysql-test/suite/rpl/t/rpl_row_merge_engine.test
      mysql-test/suite/rpl/t/rpl_stm_relay_ign_space-slave.opt
      mysql-test/suite/rpl/t/rpl_stm_relay_ign_space.test
      mysql-test/t/blackhole.test
      mysql-test/t/bug12427262.test
      mysql-test/t/partition_cache.test
      sql/mem_root_array.h
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryOrderingTest.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/QueryOrderingTest.java
      storage/ndb/tools/ndb_blob_tool.cpp
    renamed:
      mysql-test/include/rpl_multi_engine2.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_engine2.inc
    modified:
      VERSION
      client/mysqldump.c
      client/mysqlslap.c
      config.h.cmake
      configure.cmake
      include/my_base.h
      include/violite.h
      libmysqld/lib_sql.cc
      mysql-test/extra/rpl_tests/rpl_row_basic.test
      mysql-test/include/query_cache.inc
      mysql-test/mysql-test-run.pl
      mysql-test/r/alter_table.result
      mysql-test/r/cache_innodb.result
      mysql-test/r/cast.result
      mysql-test/r/ctype_ucs.result
      mysql-test/r/errors.result
      mysql-test/r/filesort_debug.result
      mysql-test/r/func_in.result
      mysql-test/r/gis.result
      mysql-test/r/help.result
      mysql-test/r/mysqld--help-notwin.result
      mysql-test/r/mysqld--help-win.result
      mysql-test/r/mysqldump.result
      mysql-test/r/mysqlslap.result
      mysql-test/r/partition_innodb.result
      mysql-test/r/partition_pruning.result
      mysql-test/r/ps.result
      mysql-test/r/select.result
      mysql-test/r/sp-bugs.result
      mysql-test/r/sp-code.result
      mysql-test/r/sp.result
      mysql-test/r/sp_notembedded.result
      mysql-test/r/sql_mode.result
      mysql-test/r/subselect.result
      mysql-test/r/subselect_innodb.result
      mysql-test/r/user_var.result
      mysql-test/r/view.result
      mysql-test/r/xa.result
      mysql-test/suite/binlog/r/binlog_grant.result
      mysql-test/suite/binlog/r/binlog_sql_mode.result
      mysql-test/suite/binlog/r/binlog_unsafe.result
      mysql-test/suite/binlog/t/binlog_grant.test
      mysql-test/suite/binlog/t/binlog_sql_mode.test
      mysql-test/suite/binlog/t/binlog_unsafe.test
      mysql-test/suite/innodb/r/innodb-autoinc.result
      mysql-test/suite/innodb/t/innodb-autoinc.test
      mysql-test/suite/innodb/t/innodb_bug34300.test
      mysql-test/suite/ndb/r/ndb_alter_table_online.result
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/r/ndb_gis.result
      mysql-test/suite/ndb/r/ndb_join_pushdown_default.result
      mysql-test/suite/ndb/t/ndb_alter_table_online.test
      mysql-test/suite/ndb/t/ndb_join_pushdown.inc
      mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_advance.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_2multi_basic.inc
      mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_advance.test
      mysql-test/suite/rpl/r/rpl_filter_tables_not_exist.result
      mysql-test/suite/rpl/r/rpl_known_bugs_detection.result
      mysql-test/suite/rpl/r/rpl_row_basic_2myisam.result
      mysql-test/suite/rpl/r/rpl_row_basic_3innodb.result
      mysql-test/suite/rpl/r/rpl_row_basic_allow_batching.result
      mysql-test/suite/rpl/r/rpl_show_slave_hosts.result
      mysql-test/suite/rpl/t/rpl_filter_tables_not_exist.test
      mysql-test/suite/rpl/t/rpl_show_slave_hosts.test
      mysql-test/suite/sys_vars/t/report_port_basic.test
      mysql-test/t/alter_table.test
      mysql-test/t/cast.test
      mysql-test/t/ctype_ucs.test
      mysql-test/t/errors.test
      mysql-test/t/filesort_debug.test
      mysql-test/t/func_in.test
      mysql-test/t/gis.test
      mysql-test/t/help.test
      mysql-test/t/mysqldump.test
      mysql-test/t/mysqlslap.test
      mysql-test/t/partition_innodb.test
      mysql-test/t/ps.test
      mysql-test/t/select.test
      mysql-test/t/sp-bugs.test
      mysql-test/t/sp-code.test
      mysql-test/t/sp.test
      mysql-test/t/sp_notembedded.test
      mysql-test/t/sql_mode.test
      mysql-test/t/subselect.test
      mysql-test/t/subselect_innodb.test
      mysql-test/t/user_var.test
      mysql-test/t/view.test
      mysql-test/t/xa.test
      mysys/lf_alloc-pin.c
      mysys/my_handler_errors.h
      mysys/stacktrace.c
      sql/field.cc
      sql/field.h
      sql/field_conv.cc
      sql/filesort.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_push.cc
      sql/ha_partition.cc
      sql/ha_partition.h
      sql/handler.cc
      sql/handler.h
      sql/item.cc
      sql/item.h
      sql/item_cmpfunc.h
      sql/item_subselect.cc
      sql/item_timefunc.cc
      sql/log.cc
      sql/log.h
      sql/log_event.cc
      sql/log_event.h
      sql/log_event_old.cc
      sql/mysqld.cc
      sql/password.c
      sql/rpl_rli.cc
      sql/rpl_rli.h
      sql/rpl_utility.cc
      sql/rpl_utility.h
      sql/share/errmsg-utf8.txt
      sql/signal_handler.cc
      sql/slave.cc
      sql/sp_head.cc
      sql/spatial.cc
      sql/sql_base.cc
      sql/sql_class.cc
      sql/sql_class.h
      sql/sql_lex.cc
      sql/sql_lex.h
      sql/sql_parse.cc
      sql/sql_prepare.cc
      sql/sql_repl.cc
      sql/sql_select.cc
      sql/sql_show.cc
      sql/sql_table.cc
      sql/sql_view.cc
      sql/sys_vars.cc
      sql/table.cc
      sql/transaction.cc
      storage/blackhole/ha_blackhole.cc
      storage/example/ha_example.cc
      storage/innobase/btr/btr0btr.c
      storage/innobase/btr/btr0cur.c
      storage/innobase/btr/btr0pcur.c
      storage/innobase/buf/buf0buf.c
      storage/innobase/buf/buf0flu.c
      storage/innobase/buf/buf0lru.c
      storage/innobase/dict/dict0dict.c
      storage/innobase/dict/dict0load.c
      storage/innobase/fil/fil0fil.c
      storage/innobase/fsp/fsp0fsp.c
      storage/innobase/handler/ha_innodb.cc
      storage/innobase/handler/ha_innodb.h
      storage/innobase/handler/handler0alter.cc
      storage/innobase/ibuf/ibuf0ibuf.c
      storage/innobase/include/btr0btr.h
      storage/innobase/include/buf0buf.h
      storage/innobase/include/buf0lru.h
      storage/innobase/include/buf0types.h
      storage/innobase/include/db0err.h
      storage/innobase/include/dict0dict.h
      storage/innobase/include/dict0dict.ic
      storage/innobase/include/dict0mem.h
      storage/innobase/include/fil0fil.h
      storage/innobase/include/mtr0mtr.ic
      storage/innobase/include/srv0srv.h
      storage/innobase/include/sync0rw.h
      storage/innobase/include/sync0rw.ic
      storage/innobase/include/trx0purge.h
      storage/innobase/include/univ.i
      storage/innobase/lock/lock0lock.c
      storage/innobase/log/log0log.c
      storage/innobase/row/row0ins.c
      storage/innobase/row/row0merge.c
      storage/innobase/row/row0mysql.c
      storage/innobase/row/row0row.c
      storage/innobase/srv/srv0srv.c
      storage/innobase/srv/srv0start.c
      storage/innobase/sync/sync0arr.c
      storage/innobase/trx/trx0purge.c
      storage/innobase/ut/ut0ut.c
      storage/myisam/ha_myisam.cc
      storage/myisam/mi_dynrec.c
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Query.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanOperation.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java
      storage/ndb/clusterj/clusterj-openjpa/CMakeLists.txt
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanOperationImpl.java
      storage/ndb/clusterj/clusterj-unit/src/main/java/junit/framework/TestCase.java
      storage/ndb/include/ndb_config.h.in
      storage/ndb/include/ndb_version.h.in
      storage/ndb/include/ndbapi/Ndb.hpp
      storage/ndb/include/ndbapi/NdbScanOperation.hpp
      storage/ndb/include/portlib/NdbSleep.h
      storage/ndb/include/transporter/TransporterRegistry.hpp
      storage/ndb/ndb_configure.cmake
      storage/ndb/src/common/transporter/TCP_Transporter.cpp
      storage/ndb/src/common/transporter/Transporter.cpp
      storage/ndb/src/common/transporter/Transporter.hpp
      storage/ndb/src/common/util/HashMap2.cpp
      storage/ndb/src/common/util/version.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/Ndblist.cpp
      storage/ndb/test/ndbapi/flexAsynch.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/perfschema/pfs_instr.cc
      vio/viosocket.c
=== modified file 'mysql-test/suite/ndb/r/ndb_alter_table_online.result'
--- a/mysql-test/suite/ndb/r/ndb_alter_table_online.result	2012-06-13 19:32:58 +0000
+++ b/mysql-test/suite/ndb/r/ndb_alter_table_online.result	2012-06-21 10:12:34 +0000
@@ -649,3 +649,39 @@ alter online table t1 add e varchar(20) 
 ERROR 42000: This version of MySQL doesn't yet support 'alter online table t1 add e varchar(20) default 'x' column_format dynamic'
 alter online table t1 add e varchar(20) default null column_format dynamic;
 drop table t1;
+CREATE TABLE categorylinks (
+cl_from int(10) unsigned NOT NULL DEFAULT '0',
+cl_to varbinary(255) NOT NULL DEFAULT '',
+cl_sortkey varbinary(70) NOT NULL DEFAULT '',
+cl_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE
+CURRENT_TIMESTAMP,
+UNIQUE KEY cl_from (cl_from,cl_to),
+KEY cl_sortkey (cl_to,cl_sortkey,cl_from),
+KEY cl_timestamp (cl_to,cl_timestamp)
+) ENGINE=ndb DEFAULT CHARSET=binary;
+ALTER TABLE categorylinks
+CHANGE COLUMN cl_sortkey cl_sortkey varbinary(230) NOT NULL default
+'',
+ADD COLUMN cl_sortkey_prefix varchar(255) binary NOT NULL default '',
+ADD COLUMN cl_collation varbinary(32) NOT NULL default '',
+ADD COLUMN cl_type ENUM('page', 'subcat', 'file') NOT NULL default
+'page',
+ADD INDEX (cl_collation),
+DROP INDEX cl_sortkey,
+ADD INDEX cl_sortkey (cl_to, cl_type, cl_sortkey, cl_from);
+SHOW CREATE TABLE categorylinks;
+Table	Create Table
+categorylinks	CREATE TABLE `categorylinks` (
+  `cl_from` int(10) unsigned NOT NULL DEFAULT '0',
+  `cl_to` varbinary(255) NOT NULL DEFAULT '',
+  `cl_sortkey` varbinary(230) NOT NULL DEFAULT '',
+  `cl_timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+  `cl_sortkey_prefix` varbinary(255) NOT NULL DEFAULT '',
+  `cl_collation` varbinary(32) NOT NULL DEFAULT '',
+  `cl_type` enum('page','subcat','file') NOT NULL DEFAULT 'page',
+  UNIQUE KEY `cl_from` (`cl_from`,`cl_to`),
+  KEY `cl_timestamp` (`cl_to`,`cl_timestamp`),
+  KEY `cl_collation` (`cl_collation`),
+  KEY `cl_sortkey` (`cl_to`,`cl_type`,`cl_sortkey`,`cl_from`)
+) ENGINE=ndbcluster DEFAULT CHARSET=binary
+DROP TABLE categorylinks;

=== modified file 'mysql-test/suite/ndb/r/ndb_index_stat_restart.result'
--- a/mysql-test/suite/ndb/r/ndb_index_stat_restart.result	2012-03-27 09:47:51 +0000
+++ b/mysql-test/suite/ndb/r/ndb_index_stat_restart.result	2012-06-15 10:58:32 +0000
@@ -44,7 +44,7 @@ Ndb_index_stat_cache_clean	0
 set global ndb_dbg_check_shares=1;
 drop table if exists t1;
 Warnings:
-Warning	155	Table 'test.t1' doesn't exist
+Note	1051	Unknown table 't1'
 create table t1 (
 a int primary key,
 b int,

=== modified file 'mysql-test/suite/ndb/r/ndb_reconnect.result'
--- a/mysql-test/suite/ndb/r/ndb_reconnect.result	2009-02-03 13:35:56 +0000
+++ b/mysql-test/suite/ndb/r/ndb_reconnect.result	2012-06-14 11:48:36 +0000
@@ -26,3 +26,33 @@ a	b	c
 1	row 1	2
 insert into t1 values (2, "row 1", 37);
 drop table t1;
+create table t1(a int, b varchar(10), c date) engine=ndb;
+CREATE TRIGGER trg1 BEFORE UPDATE ON t1 FOR EACH ROW BEGIN
+SET new.c = '1901-01-01 01:01:01';
+End //
+insert into t1 values (1, "row 1", NULL),(2, "row 2", NULL);
+select * from t1 order by a;
+a	b	c
+1	row 1	NULL
+2	row 2	NULL
+create table t2(a int, b varchar(10), c date) engine=myisam;
+CREATE TRIGGER trg2 BEFORE UPDATE ON t2 FOR EACH ROW BEGIN
+SET new.c = '1901-01-01 01:01:01';
+End //
+create table t1(a int, b varchar(10), c date) engine=ndb;
+CREATE TRIGGER trg1 BEFORE UPDATE ON t1 FOR EACH ROW BEGIN
+SET new.c = '1902-02-02 02:02:02';
+End //
+insert into t1 values (1, "row 1", NULL),(2, "row 2", NULL);
+select * from t1 order by a;
+a	b	c
+1	row 1	NULL
+2	row 2	NULL
+create table t2(a int, b varchar(10), c date) engine=myisam;
+ERROR 42S01: Table 't2' already exists
+drop table t2;
+create table t2(a int, b varchar(10), c date) engine=myisam;
+CREATE TRIGGER trg2 BEFORE UPDATE ON t2 FOR EACH ROW BEGIN
+SET new.c = '1901-01-01 01:01:01';
+End //
+drop table t1, t2;

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table_online.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_online.test	2012-06-13 19:32:58 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_online.test	2012-06-21 10:12:34 +0000
@@ -703,3 +703,33 @@ create table t1(a int(10) unsigned not n
 alter online table t1 add e varchar(20) default 'x' column_format dynamic;
 alter online table t1 add e varchar(20) default null column_format dynamic;
 drop table t1;
+
+#
+# Bug #12755722 61528: INNODB BACKEND CRASHES ON ALTER TABLE STATEMENT (MYSQL SERVER HAS GONE AWAY
+#
+
+CREATE TABLE categorylinks (
+  cl_from int(10) unsigned NOT NULL DEFAULT '0',
+  cl_to varbinary(255) NOT NULL DEFAULT '',
+  cl_sortkey varbinary(70) NOT NULL DEFAULT '',
+  cl_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE
+CURRENT_TIMESTAMP,
+  UNIQUE KEY cl_from (cl_from,cl_to),
+  KEY cl_sortkey (cl_to,cl_sortkey,cl_from),
+  KEY cl_timestamp (cl_to,cl_timestamp)
+) ENGINE=ndb DEFAULT CHARSET=binary;
+
+ALTER TABLE categorylinks
+        CHANGE COLUMN cl_sortkey cl_sortkey varbinary(230) NOT NULL default
+'',
+        ADD COLUMN cl_sortkey_prefix varchar(255) binary NOT NULL default '',
+        ADD COLUMN cl_collation varbinary(32) NOT NULL default '',
+        ADD COLUMN cl_type ENUM('page', 'subcat', 'file') NOT NULL default
+'page',
+        ADD INDEX (cl_collation),
+        DROP INDEX cl_sortkey,
+        ADD INDEX cl_sortkey (cl_to, cl_type, cl_sortkey, cl_from);
+
+SHOW CREATE TABLE categorylinks;
+
+DROP TABLE categorylinks;
\ No newline at end of file

=== modified file 'mysql-test/suite/ndb/t/ndb_reconnect.test'
--- a/mysql-test/suite/ndb/t/ndb_reconnect.test	2009-06-06 13:04:45 +0000
+++ b/mysql-test/suite/ndb/t/ndb_reconnect.test	2012-06-15 09:14:25 +0000
@@ -76,3 +76,69 @@ insert into t1 values (2, "row 1", 37);
 
 # cleanup
 drop table t1;
+
+#
+#Bug #13824846 FRM FILES ARE CREATED FOR MYSQLD, BUT TABLE DOES NOT EXIST IN CLUSTER
+#
+
+connection default;
+create table t1(a int, b varchar(10), c date) engine=ndb;
+delimiter //;
+CREATE TRIGGER trg1 BEFORE UPDATE ON t1 FOR EACH ROW BEGIN
+   SET new.c = '1901-01-01 01:01:01';
+End //
+delimiter ;//
+
+insert into t1 values (1, "row 1", NULL),(2, "row 2", NULL);
+select * from t1 order by a;
+
+create table t2(a int, b varchar(10), c date) engine=myisam;
+delimiter //;
+CREATE TRIGGER trg2 BEFORE UPDATE ON t2 FOR EACH ROW BEGIN
+   SET new.c = '1901-01-01 01:01:01';
+End //
+delimiter ;//
+
+# drop the ndb table inside ndb
+system exec $NDB_DROP_TABLE --no-defaults -d test t1 >> $NDB_TOOLS_OUTPUT ; 
+
+# Restart cluster nodes and clear all meta-data
+--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all restart" >> $NDB_TOOLS_OUTPUT
+# Wait for all nodes to enter "started"
+--exec $NDB_WAITER --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" >> $NDB_TOOLS_OUTPUT
+
+#
+# Wait until the connection to the
+# cluster has been restored or timeout occurs
+#
+connection default;
+--disable_result_log
+--disable_query_log
+--source include/ndb_not_readonly.inc
+--enable_result_log
+--enable_query_log
+
+# Create the table again to check there are no conflicts
+create table t1(a int, b varchar(10), c date) engine=ndb;
+delimiter //;
+CREATE TRIGGER trg1 BEFORE UPDATE ON t1 FOR EACH ROW BEGIN
+   SET new.c = '1902-02-02 02:02:02';
+End //
+delimiter ;//
+
+insert into t1 values (1, "row 1", NULL),(2, "row 2", NULL);
+select * from t1 order by a;
+
+# Check that only ndb tables have been cleaned away
+--error ER_TABLE_EXISTS_ERROR
+create table t2(a int, b varchar(10), c date) engine=myisam;
+drop table t2;
+create table t2(a int, b varchar(10), c date) engine=myisam;
+delimiter //;
+CREATE TRIGGER trg2 BEFORE UPDATE ON t2 FOR EACH ROW BEGIN
+   SET new.c = '1901-01-01 01:01:01';
+End //
+delimiter ;//
+
+# cleanup
+drop table t1, t2;

=== modified file 'mysql-test/suite/ndb_big/my.cnf'
--- a/mysql-test/suite/ndb_big/my.cnf	2011-11-10 13:33:56 +0000
+++ b/mysql-test/suite/ndb_big/my.cnf	2012-06-20 07:36:18 +0000
@@ -16,7 +16,7 @@ mysqld=
 
 DataMemory=700M
 IndexMemory=220M
-MaxNoOfConcurrentOperations=32768
+MaxNoOfConcurrentOperations=262144
 StringMemory=25
 MaxNoOfTables=12800
 MaxNoOfOrderedIndexes=12800

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2012-06-13 19:43:03 +0000
+++ b/sql/ha_ndbcluster.cc	2012-06-25 13:08:47 +0000
@@ -11605,8 +11605,19 @@ ndbcluster_find_files(handlerton *hton, 
     {
       DBUG_PRINT("info", ("NDB says %s does not exists", file_name->str));
       it.remove();
-      // Put in list of tables to remove from disk
-      delete_list.push_back(thd->strdup(file_name->str));
+      if (thd == injector_thd &&
+	  thd_ndb->options & TNTO_NO_REMOVE_STRAY_FILES)
+      {
+	/*
+	  Don't delete anything when called from
+	  the binlog thread. This is a kludge to avoid
+	  that something is deleted when "Ndb schema dist"
+	  uses find_files() to check for "local tables in db"
+	*/
+      }
+      else
+	// Put in list of tables to remove from disk
+	delete_list.push_back(thd->strdup(file_name->str));
     }
   }
 

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2012-03-28 15:55:23 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2012-06-15 10:08:39 +0000
@@ -1090,6 +1090,51 @@ static void ndb_notify_tables_writable()
 }
 
 /*
+
+ */
+
+static void clean_away_stray_files(THD *thd)
+{
+  /*
+    Clean-up any stray files for non-existing NDB tables
+  */
+  LOOKUP_FIELD_VALUES lookup_field_values;
+  bool with_i_schema;
+  List<LEX_STRING> db_names;
+  List_iterator_fast<LEX_STRING> it(db_names);
+  LEX_STRING *db_name;
+  List<LEX_STRING> tab_names;
+  char path[FN_REFLEN + 1];
+ 
+  DBUG_ENTER("clean_away_stray_files");
+  bzero((char*) &lookup_field_values, sizeof(LOOKUP_FIELD_VALUES));
+  if (make_db_list(thd, &db_names, &lookup_field_values, &with_i_schema))
+  {
+    thd->clear_error();
+    DBUG_PRINT("info", ("Failed to find databases"));
+    DBUG_VOID_RETURN;
+  }
+  it.rewind();
+  while ((db_name= it++))
+  {
+    DBUG_PRINT("info", ("Found database %s", db_name->str));
+    if (strcmp(NDB_REP_DB, db_name->str)) /* Skip system database */
+    {
+      sql_print_information("NDB: Cleaning stray tables from database '%s'",
+                            db_name->str);
+      build_table_filename(path, sizeof(path) - 1, db_name->str, "", "", 0);
+      if (find_files(thd, &tab_names, db_name->str, path, NullS, 0)
+          != FIND_FILES_OK)
+      {
+        thd->clear_error();
+        DBUG_PRINT("info", ("Failed to find tables"));
+      }
+    }
+  }
+  DBUG_VOID_RETURN;
+}
+
+/*
   Ndb has no representation of the database schema objects.
   The mysql.ndb_schema table contains the latest schema operations
   done via a mysqld, and thus reflects databases created/dropped/altered
@@ -1227,7 +1272,7 @@ static int ndbcluster_find_all_databases
           if (database_exists)
           {
             /* drop missing database */
-            sql_print_information("NDB: Discovered reamining database '%s'", db);
+            sql_print_information("NDB: Discovered remaining database '%s'", db);
           }
         }
       }
@@ -1442,6 +1487,8 @@ ndb_binlog_setup(THD *thd)
     }
   }
 
+  clean_away_stray_files(thd);
+
   if (ndbcluster_find_all_databases(thd))
   {
     return false;

=== modified file 'sql/ndb_thd_ndb.h'
--- a/sql/ndb_thd_ndb.h	2011-12-20 13:26:37 +0000
+++ b/sql/ndb_thd_ndb.h	2012-06-15 10:08:39 +0000
@@ -46,6 +46,7 @@ enum THD_NDB_TRANS_OPTIONS
   TNTO_INJECTED_APPLY_STATUS= 1 << 0
   ,TNTO_NO_LOGGING=           1 << 1
   ,TNTO_TRANSACTIONS_OFF=     1 << 2
+  ,TNTO_NO_REMOVE_STRAY_FILES=  1 << 3
 };
 
 class Thd_ndb 

=== modified file 'sql/sql_show.cc'
--- a/sql/sql_show.cc	2012-06-08 11:56:49 +0000
+++ b/sql/sql_show.cc	2012-06-15 10:08:39 +0000
@@ -2400,11 +2400,13 @@ void calc_sum_of_all_status(STATUS_VAR *
 /* This is only used internally, but we need it here as a forward reference */
 extern ST_SCHEMA_TABLE schema_tables[];
 
+#ifdef MCP_WL1735
 typedef struct st_lookup_field_values
 {
   LEX_STRING db_value, table_value;
   bool wild_db_value, wild_table_value;
 } LOOKUP_FIELD_VALUES;
+#endif
 
 
 /*

=== modified file 'sql/sql_show.h'
--- a/sql/sql_show.h	2011-08-31 10:39:08 +0000
+++ b/sql/sql_show.h	2012-06-15 10:08:39 +0000
@@ -35,6 +35,14 @@ typedef struct st_schema_table ST_SCHEMA
 struct TABLE;
 typedef struct system_status_var STATUS_VAR;
 
+#ifndef MCP_WL1735
+typedef struct st_lookup_field_values
+{
+  LEX_STRING db_value, table_value;
+  bool wild_db_value, wild_table_value;
+} LOOKUP_FIELD_VALUES;
+#endif
+
 enum find_files_result {
   FIND_FILES_OK,
   FIND_FILES_OOM,
@@ -81,6 +89,12 @@ enum find_files_result {
 #define IS_FILES_STATUS              36
 #define IS_FILES_EXTRA               37
 
+#ifndef MCP_WL1735
+int make_db_list(THD *thd, List<LEX_STRING> *files,
+                 LOOKUP_FIELD_VALUES *lookup_field_vals,
+                 bool *with_i_schema);
+#endif
+
 find_files_result find_files(THD *thd, List<LEX_STRING> *files, const char *db,
                              const char *path, const char *wild, bool dir);
 

=== modified file 'sql/sql_table.cc'
--- a/sql/sql_table.cc	2012-06-08 11:56:49 +0000
+++ b/sql/sql_table.cc	2012-06-21 10:12:34 +0000
@@ -5394,8 +5394,11 @@ compare_tables(THD *thd,
     for(; key_part != end; key_part++)
     {
       /* Mark field to be part of new key */
-      if ((field= table->field[key_part->fieldnr]))
+      if (key_part->fieldnr < table->s->fields)
+      {
+        field= table->field[key_part->fieldnr];
         field->flags|= FIELD_IN_ADD_INDEX;
+      }
     }
     *table_changes= IS_EQUAL_NO;
     DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
@@ -5429,16 +5432,18 @@ compare_tables(THD *thd,
       end= key_part + new_key->key_parts;
       for(; key_part != end; key_part++)
       {
-        /*
-          Check if all fields in key are declared
-          NOT NULL
-         */
         if (key_part->fieldnr < table->s->fields)
         {
-          /* Mark field to be part of new key */
           field= table->field[key_part->fieldnr];
+          /* Mark field to be part of new key */
           field->flags|= FIELD_IN_ADD_INDEX;
-          is_not_null= (is_not_null && (!field->maybe_null()));
+          /*
+            Check if all fields in key are declared
+            NOT NULL
+          */
+          is_not_null=
+            (is_not_null && 
+             (!field->maybe_null()));
         }
         else
         {

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2012-02-23 15:41:31 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2012-06-21 15:24:52 +0000
@@ -96,8 +96,26 @@
 #define MAX_KEY_SIZE_IN_WORDS 1023
 #define MAX_FRM_DATA_SIZE 6000
 #define MAX_NULL_BITS 4096
-#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
+/*
+ * Fragmentation data are Uint16, first two are #replicas,
+ * and #fragments, then for each fragment, first log-part-id
+ * then nodeid for each replica.
+ * See creation in Dbdih::execCREATE_FRAGMENTATION_REQ()
+ * and read in Dbdih::execDIADDTABREQ()
+ */
+#define MAX_FRAGMENT_DATA_ENTRIES (2 + (1 + MAX_REPLICAS) * MAX_NDB_PARTITIONS)
+#define MAX_FRAGMENT_DATA_BYTES (2 * MAX_FRAGMENT_DATA_ENTRIES)
+#define MAX_FRAGMENT_DATA_WORDS ((MAX_FRAGMENT_DATA_BYTES + 3) / 4)
+
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define MAX_NDB_PARTITIONS 240
+#else
 #define MAX_NDB_PARTITIONS 1024
+#endif
+
+#define NDB_PARTITION_BITS 16
+#define NDB_PARTITION_MASK ((Uint32)((1 << NDB_PARTITION_BITS) - 1))
+
 #define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
 
 #define MAX_WORDS_META_FILE 24576
@@ -191,7 +209,22 @@
  */
 #define LCP_RESTORE_BUFFER (4*32)
 
-#define NDB_DEFAULT_HASHMAP_BUCKTETS 240
+
+/**
+ * Support at least one partition per LDM. And
+ * also try to make size a multiple of all possible
+ * data node counts, so that all partitions are
+ * related to the same number of hashmap buckets
+ * as possible, otherwise some partitions will be
+ * bigger than others.
+ */
+
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define NDB_DEFAULT_HASHMAP_BUCKETS 240
+#else
+#define NDB_DEFAULT_HASHMAP_BUCKETS (48 * 16 * 5) /* 3840 */
+#endif
+#define NDB_DEFAULT_HASHMAP_BUCKETS_BYTES (2 * NDB_DEFAULT_HASHMAP_BUCKETS)
 
 /**
  * Bits/mask used for coding/decoding blockno/blockinstance
@@ -220,6 +253,16 @@
 
 #define NDB_FILE_BUFFER_SIZE (256*1024)
 
+/*
+ * NDB_FS_RW_PAGES must be big enough for biggest request,
+ * probably PACK_TABLE_PAGES (see Dbdih.hpp)
+ */
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define NDB_FS_RW_PAGES 32
+#else
+#define NDB_FS_RW_PAGES 134
+#endif
+
 /**
  * MAX_ATTRIBUTES_IN_TABLE old handling
  */
@@ -257,4 +300,22 @@
 #define MAX_INDEX_STAT_VALUE_CSIZE  512 /* Longvarbinary(2048) */
 #define MAX_INDEX_STAT_VALUE_FORMAT 1
 
+#ifdef NDB_STATIC_ASSERT
+
+static inline void ndb_limits_constraints()
+{
+  NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS <= NDB_DEFAULT_HASHMAP_BUCKETS);
+
+  NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS - 1 <= NDB_PARTITION_MASK);
+
+  // MAX_NDB_NODES should be 48, but code assumes it is 49
+  STATIC_CONST(MAX_NDB_DATA_NODES = MAX_DATA_NODE_ID);
+  NDB_STATIC_ASSERT(MAX_NDB_NODES == MAX_NDB_DATA_NODES + 1);
+
+  // Default partitioning is 1 partition per LDM
+  NDB_STATIC_ASSERT(MAX_NDB_DATA_NODES * MAX_NDBMT_LQH_THREADS <= MAX_NDB_PARTITIONS);
+}
+
+#endif
+
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/CreateTable.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2012-06-21 15:24:52 +0000
@@ -76,7 +76,8 @@ struct CreateTableRef {
     NoLoggingTemporaryTable = 778,
     InvalidHashMap = 790,
     TableDefinitionTooBig = 793,
-    FeatureRequiresUpgrade = 794
+    FeatureRequiresUpgrade = 794,
+    TooManyFragments = 1224
   };
 
   Uint32 senderRef;

=== modified file 'storage/ndb/include/kernel/signaldata/DictTabInfo.hpp'
--- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp	2012-06-21 15:24:52 +0000
@@ -360,7 +360,7 @@ public:
     char   FrmData[MAX_FRM_DATA_SIZE];
     Uint32 FragmentCount;
     Uint32 ReplicaDataLen;
-    Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES];
+    Uint16 ReplicaData[MAX_FRAGMENT_DATA_ENTRIES];
     Uint32 FragmentDataLen;
     Uint16 FragmentData[3*MAX_NDB_PARTITIONS];
 
@@ -802,7 +802,7 @@ struct DictHashMapInfo {
   struct HashMap {
     char   HashMapName[MAX_TAB_NAME_SIZE];
     Uint32 HashMapBuckets;
-    Uint16 HashMapValues[512];
+    Uint16 HashMapValues[NDB_DEFAULT_HASHMAP_BUCKETS];
     Uint32 HashMapObjectId;
     Uint32 HashMapVersion;
     HashMap() {}

=== modified file 'storage/ndb/include/kernel/signaldata/ScanTab.hpp'
--- a/storage/ndb/include/kernel/signaldata/ScanTab.hpp	2011-09-02 09:16:56 +0000
+++ b/storage/ndb/include/kernel/signaldata/ScanTab.hpp	2012-06-21 15:24:52 +0000
@@ -132,7 +132,14 @@ private:
 /**
  * Request Info
  *
- p = Parallelism           - 8  Bits -> Max 256 (Bit 0-7)
+ p = Parallelism           - 8  Bits -> Max 255 (Bit 0-7).
+                                        Note: these bits are ignored since
+                                        7.0.34, 7.1.23, 7.2.7 and should be
+                                        zero-filled until future reuse.
+                                        For signal sent to old nodes they
+                                        should be filled in.
+                                        Check version with
+                                        ndbd_scan_tabreq_implicit_parallelism().
  l = Lock mode             - 1  Bit 8
  h = Hold lock mode        - 1  Bit 10
  c = Read Committed        - 1  Bit 11

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2012-06-13 19:32:58 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2012-06-21 15:24:52 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2004, 2010, 2011, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -750,6 +750,28 @@ ndbd_128_instances_address(Uint32 x)
   return x >= NDBD_128_INSTANCES_ADDRESS_72;
 }
 
+#define NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_70 NDB_MAKE_VERSION(7,0,34)
+#define NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_71 NDB_MAKE_VERSION(7,1,23)
+#define NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_72 NDB_MAKE_VERSION(7,2,7)
+
+static
+inline
+int
+ndbd_scan_tabreq_implicit_parallelism(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor < 2)
+  {
+    if (minor == 0)
+      return x >= NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_70;
+    else if (minor == 1)
+      return x >= NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_71;
+  }
+  return x >= NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_72;
+}
+
 #define NDBD_FIXED_LOOKUP_QUERY_ABORT_72 NDB_MAKE_VERSION(7,2,5)
 
 static

=== modified file 'storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp	2012-06-21 15:24:52 +0000
@@ -48,8 +48,8 @@ DictTabInfo::TableMapping[] = {
   DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE),
   DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen),
   DTIMAP2(Table, FragmentCount, FragmentCount, 0, MAX_NDB_PARTITIONS),
-  DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, 2*MAX_FRAGMENT_DATA_BYTES),
-  DTIMAPB(Table, ReplicaData, ReplicaData, 0, 2*MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen),
+  DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, MAX_FRAGMENT_DATA_BYTES),
+  DTIMAPB(Table, ReplicaData, ReplicaData, 0, MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen),
   DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, 6*MAX_NDB_PARTITIONS),
   DTIMAPB(Table, FragmentData, FragmentData, 0, 6*MAX_NDB_PARTITIONS, FragmentDataLen),
   DTIMAP2(Table, TablespaceDataLen, TablespaceDataLen, 0, 8*MAX_NDB_PARTITIONS),
@@ -337,14 +337,15 @@ const
 SimpleProperties::SP2StructMapping
 DictHashMapInfo::Mapping[] = {
   DHMIMAPS(HashMap, HashMapName, HashMapName, 0, MAX_TAB_NAME_SIZE),
-  DHMIMAP2(HashMap, HashMapBuckets, HashMapBuckets, 0, 256),
+  DHMIMAP2(HashMap, HashMapBuckets, HashMapBuckets, 0, NDB_DEFAULT_HASHMAP_BUCKETS),
   DTIMAP(HashMap, HashMapObjectId, HashMapObjectId),
   DTIMAP(HashMap, HashMapVersion, HashMapVersion),
 
   /**
    * This *should* change to Uint16 or similar once endian is pushed
    */
-  DHMIMAPB(HashMap, HashMapValues, HashMapValues, 0, 256*2, HashMapBuckets)
+  DHMIMAPB(HashMap, HashMapValues, HashMapValues, 0,
+           NDB_DEFAULT_HASHMAP_BUCKETS_BYTES, HashMapBuckets)
 };
 
 //static

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2012-02-23 15:41:31 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2012-06-21 15:24:52 +0000
@@ -4931,9 +4931,12 @@ void Dbdict::handleTabInfoInit(Signal * 
       tablePtr.p->fragmentCount = fragments = get_default_fragments(signal);
     }
 
+    tabRequire(fragments <= MAX_NDB_PARTITIONS,
+               CreateTableRef::TooManyFragments);
+
     char buf[MAX_TAB_NAME_SIZE+1];
     BaseString::snprintf(buf, sizeof(buf), "DEFAULT-HASHMAP-%u-%u",
-                         NDB_DEFAULT_HASHMAP_BUCKTETS,
+                         NDB_DEFAULT_HASHMAP_BUCKETS,
                          fragments);
     DictObject* dictObj = get_object(buf);
     if (dictObj && dictObj->m_type == DictTabInfo::HashMap)
@@ -6452,17 +6455,18 @@ Dbdict::createTab_dih(Signal* signal, Sc
 
   // fragmentation in long signal section
   {
-    Uint32 page[1024];
+    Uint32 page[MAX_FRAGMENT_DATA_WORDS];
     LinearSectionPtr ptr[3];
     Uint32 noOfSections = 0;
 
     const Uint32 size = fragSec.getSize();
+    ndbrequire(size <= NDB_ARRAY_SIZE(page));
 
     // wl3600_todo add ndbrequire on SR, NR
     if (size != 0) {
       jam();
       LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena,c_opSectionBufferPool);
-      bool ok = copyOut(op_sec_pool, fragSec, page, 1024);
+      bool ok = copyOut(op_sec_pool, fragSec, page, size);
       ndbrequire(ok);
       ptr[noOfSections].sz = size;
       ptr[noOfSections].p = page;
@@ -23053,7 +23057,7 @@ Dbdict::createNodegroup_subOps(Signal* s
      *   and still continue transaction
      *   but that i dont know how
      */
-    Uint32 buckets = 240;
+    Uint32 buckets = NDB_DEFAULT_HASHMAP_BUCKETS;
     Uint32 fragments = get_default_fragments(signal, 1);
     char buf[MAX_TAB_NAME_SIZE+1];
     BaseString::snprintf(buf, sizeof(buf), "DEFAULT-HASHMAP-%u-%u",
@@ -28615,7 +28619,7 @@ Dbdict::createHashMap_parse(Signal* sign
     if (impl_req->requestType & CreateHashMapReq::CreateDefault)
     {
       jam();
-      impl_req->buckets = NDB_DEFAULT_HASHMAP_BUCKTETS;
+      impl_req->buckets = NDB_DEFAULT_HASHMAP_BUCKETS;
       impl_req->fragments = 0;
     }
 
@@ -28628,6 +28632,13 @@ Dbdict::createHashMap_parse(Signal* sign
       fragments = get_default_fragments(signal);
     }
 
+    if (fragments > MAX_NDB_PARTITIONS)
+    {
+      jam();
+      setError(error, CreateTableRef::TooManyFragments, __LINE__);
+      return;
+    }
+
     BaseString::snprintf(hm.HashMapName, sizeof(hm.HashMapName),
                          "DEFAULT-HASHMAP-%u-%u",
                          buckets,
@@ -28858,13 +28869,18 @@ Dbdict::createHashMap_parse(Signal* sign
     Uint32 tmp = 0;
     for (Uint32 i = 0; i<hm.HashMapBuckets; i++)
     {
-      ndbrequire(hm.HashMapValues[i] < 256);
-      map_ptr.p->m_map[i] = (Uint8)hm.HashMapValues[i];
+      map_ptr.p->m_map[i] = hm.HashMapValues[i];
       if (hm.HashMapValues[i] > tmp)
         tmp = hm.HashMapValues[i];
     }
     map_ptr.p->m_fragments = tmp + 1;
   }
+  if (map_ptr.p->m_fragments > MAX_NDB_PARTITIONS)
+  {
+    jam();
+    setError(error, CreateTableRef::TooManyFragments, __LINE__);
+    goto error;
+  }
 
   if (ERROR_INSERTED(6211))
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2012-02-03 13:37:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2012-06-21 15:24:52 +0000
@@ -100,12 +100,30 @@
 /*#########*/
 /* SIZES   */
 /*#########*/
-#define ZPAGEREC 100
+/*
+ * Only pages enough for one table needed, since only
+ * one metadata change at the time is allowed.
+ */
+#define ZPAGEREC PACK_TABLE_PAGES
 #define ZCREATE_REPLICA_FILE_SIZE 4
 #define ZPROXY_MASTER_FILE_SIZE 10
 #define ZPROXY_FILE_SIZE 10
 #endif
 
+/*
+ * Pack table into pages.
+ * See use of writePageWord() in
+ * packTableIntoPagesLab() and helper
+ * functions to determine the constants
+ * below.
+ */
+#define MAX_CRASHED_REPLICAS 8
+#define PACK_REPLICAS_WORDS (4 + 4 * MAX_LCP_STORED + 2 * MAX_CRASHED_REPLICAS)
+#define PACK_FRAGMENT_WORDS (6 + 2 * MAX_REPLICAS * PACK_REPLICAS_WORDS)
+#define PACK_TABLE_WORDS (10 + MAX_NDB_PARTITIONS * PACK_FRAGMENT_WORDS)
+#define PACK_TABLE_PAGE_WORDS (2048 - 32)
+#define PACK_TABLE_PAGES ((PACK_TABLE_WORDS + PACK_TABLE_PAGE_WORDS - 1) / PACK_TABLE_PAGE_WORDS)
+
 class Dbdih: public SimulatedBlock {
 #ifdef ERROR_INSERT
   typedef void (Dbdih::* SendFunction)(Signal*, Uint32, Uint32);
@@ -515,12 +533,12 @@ public:
     Method method;
     Storage tabStorage;
 
-    Uint32 pageRef[32];
+    Uint32 pageRef[PACK_TABLE_PAGES]; // TODO: makedynamic
 //-----------------------------------------------------------------------------
 // Each entry in this array contains a reference to 16 fragment records in a
 // row. Thus finding the correct record is very quick provided the fragment id.
 //-----------------------------------------------------------------------------
-    Uint32 startFid[MAX_NDB_NODES * MAX_FRAG_PER_LQH / NO_OF_FRAGS_PER_CHUNK];
+    Uint32 startFid[(MAX_NDB_PARTITIONS - 1) / NO_OF_FRAGS_PER_CHUNK + 1];
 
     Uint32 tabFile[2];
     Uint32 connectrec;                                    
@@ -547,7 +565,7 @@ public:
 
     Uint8 kvalue;
     Uint8 noOfBackups;
-    Uint8 noPages;
+    Uint16 noPages;
     Uint16 tableType;
     Uint16 primaryTableId;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2012-04-24 14:41:37 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2012-06-21 15:24:52 +0000
@@ -85,7 +85,6 @@
 extern EventLogger * g_eventLogger;
 
 #define SYSFILE ((Sysfile *)&sysfileData[0])
-#define MAX_CRASHED_REPLICAS 8
 #define ZINIT_CREATE_GCI Uint32(0)
 #define ZINIT_REPLICA_LAST_GCI Uint32(-1)
 
@@ -7981,7 +7980,7 @@ void Dbdih::execDIADDTABREQ(Signal* sign
   }
 
   union {
-    Uint16 fragments[2 + MAX_FRAG_PER_LQH*MAX_REPLICAS*MAX_NDB_NODES];
+    Uint16 fragments[MAX_FRAGMENT_DATA_ENTRIES];
     Uint32 align;
   };
   (void)align; // kill warning
@@ -17457,6 +17456,7 @@ void Dbdih::writeTabfile(Signal* signal,
   signal->theData[4] = ZVAR_NO_WORD;
   signal->theData[5] = tab->noPages;
 
+  NDB_STATIC_ASSERT(NDB_ARRAY_SIZE(tab->pageRef) <= NDB_FS_RW_PAGES);
   Uint32 section[2 * NDB_ARRAY_SIZE(tab->pageRef)];
   for (Uint32 i = 0; i < tab->noPages; i++)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2012-05-07 07:51:09 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2012-06-25 11:35:54 +0000
@@ -5025,6 +5025,7 @@ void Dbtc::seizeApiConnectCopy(Signal* s
   ptrCheckGuard(locApiConnectptr, TapiConnectFilesize, localApiConnectRecord);
   cfirstfreeApiConnectCopy = locApiConnectptr.p->nextApiConnect;
   locApiConnectptr.p->nextApiConnect = RNIL;
+  ndbassert(regApiPtr->apiCopyRecord == RNIL);
   regApiPtr->apiCopyRecord = locApiConnectptr.i;
   tc_clearbit(regApiPtr->m_flags,
               ApiConnectRecord::TF_TRIGGER_PENDING);
@@ -5067,7 +5068,9 @@ void Dbtc::execDIVERIFYCONF(Signal* sign
    * WE WILL INSERT THE TRANSACTION INTO ITS PROPER QUEUE OF 
    * TRANSACTIONS FOR ITS GLOBAL CHECKPOINT.              
    *-------------------------------------------------------------------------*/
-  if (TApifailureNr != Tfailure_nr) {
+  if (TApifailureNr != Tfailure_nr ||
+      ERROR_INSERTED(8094)) {
+    jam();
     DIVER_node_fail_handling(signal, Tgci);
     return;
   }//if
@@ -5563,7 +5566,12 @@ Dbtc::sendApiCommit(Signal* signal)
 err8055:
   Ptr<ApiConnectRecord> copyPtr;
   UintR TapiConnectFilesize = capiConnectFilesize;
+  /**
+   * Unlink copy connect record from main connect record to allow main record 
+   * re-use.
+   */
   copyPtr.i = regApiPtr.p->apiCopyRecord;
+  regApiPtr.p->apiCopyRecord = RNIL;
   UintR TapiFailState = regApiPtr.p->apiFailState;
   ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
 
@@ -6209,6 +6217,7 @@ void Dbtc::handleGcp(Signal* signal, Ptr
 void Dbtc::releaseApiConCopy(Signal* signal) 
 {
   ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+  ndbassert(regApiPtr->nextApiConnect == RNIL);
   UintR TfirstfreeApiConnectCopyOld = cfirstfreeApiConnectCopy;
   cfirstfreeApiConnectCopy = apiConnectptr.i;
   regApiPtr->nextApiConnect = TfirstfreeApiConnectCopyOld;
@@ -10443,9 +10452,7 @@ void Dbtc::execSCAN_TABREQ(Signal* signa
   const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
   Uint32 currSavePointId = 0;
   
-  Uint32 scanConcurrency = scanTabReq->getParallelism(ri);
   Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri);
-  Uint32 scanParallel = scanConcurrency;
   Uint32 errCode;
   ScanRecordPtr scanptr;
 
@@ -10461,6 +10468,9 @@ void Dbtc::execSCAN_TABREQ(Signal* signa
   SectionHandle handle(this, signal);
   SegmentedSectionPtr api_op_ptr;
   handle.getSection(api_op_ptr, 0);
+  // Scan parallelism is determined by the number of receiver ids sent
+  Uint32 scanParallel = api_op_ptr.sz;
+  Uint32 scanConcurrency = scanParallel;
   Uint32 * apiPtr = signal->theData+25; // temp storage
   copy(apiPtr, api_op_ptr);
 
@@ -12255,6 +12265,7 @@ void Dbtc::initApiConnect(Signal* signal
     apiConnectptr.p->currSavePointId = 0;
     apiConnectptr.p->m_transaction_nodes.clear();
     apiConnectptr.p->singleUserMode = 0;
+    apiConnectptr.p->apiCopyRecord = RNIL;
   }//for
   apiConnectptr.i = tiacTmp - 1;
   ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -12284,6 +12295,7 @@ void Dbtc::initApiConnect(Signal* signal
       apiConnectptr.p->currSavePointId = 0;
       apiConnectptr.p->m_transaction_nodes.clear();
       apiConnectptr.p->singleUserMode = 0;
+      apiConnectptr.p->apiCopyRecord = RNIL;
     }//for
   apiConnectptr.i = (2 * tiacTmp) - 1;
   ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -12313,6 +12325,7 @@ void Dbtc::initApiConnect(Signal* signal
     apiConnectptr.p->currSavePointId = 0;
     apiConnectptr.p->m_transaction_nodes.clear();
     apiConnectptr.p->singleUserMode = 0;
+    apiConnectptr.p->apiCopyRecord = RNIL;
   }//for
   apiConnectptr.i = (3 * tiacTmp) - 1;
   ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -12573,6 +12586,19 @@ void Dbtc::releaseAbortResources(Signal*
   TcConnectRecordPtr rarTcConnectptr;
 
   c_counters.cabortCount++;
+  if (apiConnectptr.p->apiCopyRecord != RNIL)
+  {
+    // Put apiCopyRecord back in free list.
+    jam();
+    ApiConnectRecordPtr copyPtr;
+    copyPtr.i = apiConnectptr.p->apiCopyRecord;
+    ptrCheckGuard(copyPtr, capiConnectFilesize, apiConnectRecord);
+    ndbassert(copyPtr.p->apiCopyRecord == RNIL);
+    ndbassert(copyPtr.p->nextApiConnect == RNIL);
+    copyPtr.p->nextApiConnect = cfirstfreeApiConnectCopy;
+    cfirstfreeApiConnectCopy = copyPtr.i;
+    apiConnectptr.p->apiCopyRecord = RNIL;
+  }
   if (apiConnectptr.p->cachePtr != RNIL) {
     cachePtr.i = apiConnectptr.p->cachePtr;
     ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
@@ -12675,6 +12701,8 @@ void Dbtc::releaseApiCon(Signal* signal,
 
   TlocalApiConnectptr.i = TapiConnectPtr;
   ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
+  ndbassert(TlocalApiConnectptr.p->apiCopyRecord == RNIL);
+  ndbassert(TlocalApiConnectptr.p->nextApiConnect == RNIL);
   TlocalApiConnectptr.p->nextApiConnect = cfirstfreeApiConnect;
   cfirstfreeApiConnect = TlocalApiConnectptr.i;
   setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2012-02-23 15:41:31 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2012-06-21 15:24:52 +0000
@@ -2182,7 +2182,7 @@ Ndbcntr::createHashMap(Signal* signal, U
   req->requestInfo = 0;
   req->transId = c_schemaTransId;
   req->transKey = c_schemaTransKey;
-  req->buckets = 240;
+  req->buckets = NDB_DEFAULT_HASHMAP_BUCKETS;
   req->fragments = 0;
   sendSignal(DBDICT_REF, GSN_CREATE_HASH_MAP_REQ, signal,
 	     CreateHashMapReq::SignalLength, JBB);

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2012-06-21 15:24:52 +0000
@@ -82,7 +82,7 @@ public:
 	char *buf;
 	size_t size;
 	off_t offset;
-      } pages[32];
+      } pages[NDB_FS_RW_PAGES];
     } readWrite;
     struct {
       const char * buf;

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2011-11-18 06:47:23 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2012-06-21 15:24:52 +0000
@@ -585,7 +585,7 @@ Ndbfs::execFSCLOSEREQ(Signal * signal)
 void 
 Ndbfs::readWriteRequest(int action, Signal * signal)
 {
-  Uint32 theData[25 + 2 * 32];
+  Uint32 theData[25 + 2 * NDB_FS_RW_PAGES];
   memcpy(theData, signal->theData, 4 * signal->getLength());
   SectionHandle handle(this, signal);
   if (handle.m_cnt > 0)

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.hpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp	2011-12-02 13:16:43 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp	2012-06-21 15:24:52 +0000
@@ -119,11 +119,13 @@ public:
   void suma_ndbrequire(bool v);
 
   // wl4391_todo big enough for now
+  // Keep m_fragDesc within 32 bit,
+  // m_dummy is used to pass value.
   union FragmentDescriptor { 
     struct  {
-      Uint8 m_fragmentNo;
+      Uint16 m_fragmentNo;
       Uint8 m_lqhInstanceKey;
-      Uint16 m_nodeId;
+      Uint8 m_nodeId;
     } m_fragDesc;
     Uint32 m_dummy;
   };

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2012-02-23 15:41:31 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2012-06-21 15:24:52 +0000
@@ -1362,10 +1362,10 @@ SectionHandle::~SectionHandle()
 
 struct Hash2FragmentMap
 {
-  STATIC_CONST( MAX_MAP = 240 );
+  STATIC_CONST( MAX_MAP = NDB_DEFAULT_HASHMAP_BUCKETS );
   Uint32 m_cnt;
   Uint32 m_fragments;
-  Uint8 m_map[MAX_MAP];
+  Uint16 m_map[MAX_MAP];
   Uint32 nextPool;
   Uint32 m_object_id;
 };

=== modified file 'storage/ndb/src/kernel/vm/mt.cpp'
--- a/storage/ndb/src/kernel/vm/mt.cpp	2012-05-23 06:40:37 +0000
+++ b/storage/ndb/src/kernel/vm/mt.cpp	2012-06-18 13:54:07 +0000
@@ -4106,10 +4106,10 @@ may_communicate(unsigned from, unsigned 
   }
   else if (is_tc_thread(from))
   {
-    // TC threads can communicate with LQH-, main- and itself
+    // TC threads can communicate with SPJ-, LQH-, main- and itself
     return is_main_thread(to) ||
            is_ldm_thread(to)  ||
-           (to == from);
+           is_tc_thread(to);      // Cover both SPJs and itself 
   }
   else
   {
@@ -4426,12 +4426,13 @@ compute_jb_pages(struct EmulatorData * e
          job_queue_pages_per_thread;
 
   /**
-   * TC threads can communicate with LQH threads and main threads.
+   * TC threads can communicate with SPJ-, LQH- and main threads.
    * Cannot communicate with receive threads and other TC threads,
-   * but it can communicate with itself.
+   * but as SPJ is located together with TC, it is counted as it
+   * communicate with all TC threads.
    */
   tot += num_tc_threads *
-         (num_lqh_threads + num_main_threads + 1) *
+         (num_lqh_threads + num_main_threads + num_tc_threads) *
          job_queue_pages_per_thread;
 
   /**

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2012-02-23 15:41:31 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2012-06-21 15:24:52 +0000
@@ -1896,7 +1896,7 @@ NdbDictionary::Dictionary::getDefaultHas
 {
   BaseString tmp;
   tmp.assfmt("DEFAULT-HASHMAP-%u-%u",
-             NDB_DEFAULT_HASHMAP_BUCKTETS, fragments);
+             NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
 
   return getHashMap(dst, tmp.c_str());
 }
@@ -1928,12 +1928,12 @@ NdbDictionary::Dictionary::initDefaultHa
 {
   BaseString tmp;
   tmp.assfmt("DEFAULT-HASHMAP-%u-%u",
-             NDB_DEFAULT_HASHMAP_BUCKTETS, fragments);
+             NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
 
   dst.setName(tmp.c_str());
 
   Vector<Uint32> map;
-  for (Uint32 i = 0; i<NDB_DEFAULT_HASHMAP_BUCKTETS; i++)
+  for (Uint32 i = 0; i < NDB_DEFAULT_HASHMAP_BUCKETS; i++)
   {
     map.push_back(i % fragments);
   }
@@ -2057,14 +2057,14 @@ retry:
     if (cnt == 0)
     {
       newmap.m_name.assfmt("HASHMAP-%u-%u-%u",
-                           NDB_DEFAULT_HASHMAP_BUCKTETS,
+                           NDB_DEFAULT_HASHMAP_BUCKETS,
                            oldcnt,
                            newcnt);
     }
     else
     {
       newmap.m_name.assfmt("HASHMAP-%u-%u-%u-#%u",
-                           NDB_DEFAULT_HASHMAP_BUCKTETS,
+                           NDB_DEFAULT_HASHMAP_BUCKETS,
                            oldcnt,
                            newcnt,
                            cnt);

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2012-03-19 12:09:39 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2012-06-21 15:24:52 +0000
@@ -2539,7 +2539,7 @@ NdbDictInterface::getTable(class NdbApiS
       }
       for (Uint32 i = 0; i<tmp.m_map.size(); i++)
       {
-        assert(tmp.m_map[i] <= 255);
+        assert(tmp.m_map[i] <= NDB_PARTITION_MASK);
         rt->m_hash_map.push_back(tmp.m_map[i]);
       }
     }
@@ -8216,6 +8216,7 @@ NdbDictInterface::create_hashmap(const N
   hm.HashMapBuckets = src.getMapLen();
   for (Uint32 i = 0; i<hm.HashMapBuckets; i++)
   {
+    assert(NdbHashMapImpl::getImpl(src).m_map[i] <= NDB_PARTITION_MASK);
     hm.HashMapValues[i] = NdbHashMapImpl::getImpl(src).m_map[i];
   }
 

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2012-06-21 15:24:52 +0000
@@ -219,7 +219,7 @@ public:
   Uint32 m_hashValueMask;
   Uint32 m_hashpointerValue;
   Vector<Uint16> m_fragments;
-  Vector<Uint8> m_hash_map;
+  Vector<Uint16> m_hash_map;
 
   Uint64 m_max_rows;
   Uint64 m_min_rows;

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-05-08 09:23:25 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-06-25 13:08:47 +0000
@@ -3105,7 +3105,21 @@ NdbQueryImpl::doSend(int nodeId, bool la
 
     ScanTabReq::setViaSPJFlag(reqInfo, 1);
     ScanTabReq::setPassAllConfsFlag(reqInfo, 1);
-    ScanTabReq::setParallelism(reqInfo, getRootFragCount());
+
+    Uint32 nodeVersion = impl->getNodeNdbVersion(nodeId);
+    if (!ndbd_scan_tabreq_implicit_parallelism(nodeVersion))
+    {
+      // Implicit parallelism implies support for greater
+      // parallelism than storable explicitly in old reqInfo.
+      Uint32 fragments = getRootFragCount();
+      if (fragments > PARALLEL_MASK)
+      {
+        setErrorCode(Err_SendFailed /* TODO: TooManyFragments, to too old cluster version */);
+        return -1;
+      }
+      ScanTabReq::setParallelism(reqInfo, fragments);
+    }
+
     ScanTabReq::setRangeScanFlag(reqInfo, rangeScan);
     ScanTabReq::setDescendingFlag(reqInfo, descending);
     ScanTabReq::setTupScanFlag(reqInfo, tupScan);
@@ -5205,7 +5219,7 @@ int NdbQueryOperationImpl::setParallelis
     getQuery().setErrorCode(Err_FunctionNotImplemented);
     return -1;
   }
-  else if (parallelism < 1 || parallelism > MAX_NDB_PARTITIONS)
+  else if (parallelism < 1 || parallelism > NDB_PARTITION_MASK)
   {
     getQuery().setErrorCode(Err_ParameterError);
     return -1;

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-11-16 08:17:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2012-06-21 15:24:52 +0000
@@ -1413,6 +1413,9 @@ NdbScanOperation::processTableScanDefs(N
     return -1;
   }//if
   
+  NdbImpl* impl = theNdb->theImpl;
+  Uint32 nodeId = theNdbCon->theDBnode;
+  Uint32 nodeVersion = impl->getNodeNdbVersion(nodeId);
   theSCAN_TABREQ->setSignal(GSN_SCAN_TABREQ, refToBlock(theNdbCon->m_tcRef));
   ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
   req->apiConnectPtr = theNdbCon->theTCConPtr;
@@ -1424,7 +1427,17 @@ NdbScanOperation::processTableScanDefs(N
   req->first_batch_size = batch; // Save user specified batch size
   
   Uint32 reqInfo = 0;
-  ScanTabReq::setParallelism(reqInfo, parallel);
+  if (!ndbd_scan_tabreq_implicit_parallelism(nodeVersion))
+  {
+    // Implicit parallelism implies support for greater
+    // parallelism than storable explicitly in old reqInfo.
+    if (parallel > PARALLEL_MASK)
+    {
+      setErrorCodeAbort(4000 /* TODO: TooManyFragments, to too old cluster version */);
+      return -1;
+    }
+    ScanTabReq::setParallelism(reqInfo, parallel);
+  }
   ScanTabReq::setScanBatch(reqInfo, 0);
   ScanTabReq::setRangeScanFlag(reqInfo, rangeScan);
   ScanTabReq::setTupScanFlag(reqInfo, tupScan);

=== modified file 'storage/ndb/test/include/NdbBackup.hpp'
--- a/storage/ndb/test/include/NdbBackup.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/include/NdbBackup.hpp	2012-06-25 13:08:04 +0000
@@ -26,8 +26,8 @@
 
 class NdbBackup : public NdbConfig {
 public:
-  NdbBackup(int _own_id, const char* _addr = 0) 
-    : NdbConfig(_own_id, _addr) {};
+  NdbBackup(const char* _addr = 0)
+    : NdbConfig(_addr) {};
 
   int start(unsigned & _backup_id,
 	    int flags = 2,

=== modified file 'storage/ndb/test/include/NdbConfig.hpp'
--- a/storage/ndb/test/include/NdbConfig.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/include/NdbConfig.hpp	2012-06-25 12:55:27 +0000
@@ -27,15 +27,12 @@
 
 class NdbConfig : public NdbRestarter {
 public:
-  NdbConfig(int own_id, const char* addr = 0) 
-    : NdbRestarter(addr), 
-      ownNodeId(own_id) {};
+  NdbConfig(const char* addr = 0)
+    : NdbRestarter(addr)
+  {};
 
   bool getProperty(unsigned nodeid, unsigned type, unsigned key, Uint32 * val);
-
   bool getHostName(unsigned int node_id, const char ** hostname);
-  //protected:  
-  int ownNodeId;
 };
 
 #endif

=== modified file 'storage/ndb/test/ndbapi/testBackup.cpp'
--- a/storage/ndb/test/ndbapi/testBackup.cpp	2011-12-08 14:37:07 +0000
+++ b/storage/ndb/test/ndbapi/testBackup.cpp	2012-06-25 12:55:27 +0000
@@ -35,7 +35,7 @@ int
 clearOldBackups(NDBT_Context* ctx, NDBT_Step* step)
 {
   strcpy(tabname, ctx->getTab()->getName());
-  NdbBackup backup(GETNDB(step)->getNodeId());
+  NdbBackup backup;
   backup.clearOldBackups();
   return NDBT_OK;
 }
@@ -70,7 +70,7 @@ int setSlave(NDBT_Context* ctx, NDBT_Ste
 }
 
 int runAbort(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
 
   NdbRestarter restarter;
 
@@ -104,7 +104,7 @@ int runAbort(NDBT_Context* ctx, NDBT_Ste
 }
 
 int runFail(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
 
   NdbRestarter restarter;
 
@@ -138,7 +138,7 @@ int runFail(NDBT_Context* ctx, NDBT_Step
 }
 
 int runBackupOne(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   unsigned backupId = 0;
 
   if (backup.start(backupId) == -1){
@@ -151,7 +151,7 @@ int runBackupOne(NDBT_Context* ctx, NDBT
 }
 
 int runBackupRandom(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   unsigned backupId = rand() % (MAX_BACKUPS);
 
   if (backup.start(backupId) == -1){
@@ -165,7 +165,7 @@ int runBackupRandom(NDBT_Context* ctx, N
 
 int
 runBackupLoop(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   
   int loops = ctx->getNumLoops();
   while(!ctx->isTestStopped() && loops--)
@@ -233,7 +233,7 @@ int runDropTablesRestart(NDBT_Context* c
 }
 
 int runRestoreOne(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   unsigned backupId = ctx->getProperty("BackupId"); 
 
   ndbout << "Restoring backup " << backupId << endl;
@@ -379,7 +379,7 @@ int runBackupBank(NDBT_Context* ctx, NDB
   int l = 0;
   int maxSleep = 30; // Max seconds between each backup
   Ndb* pNdb = GETNDB(step);
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   unsigned minBackupId = ~0;
   unsigned maxBackupId = 0;
   unsigned backupId = 0;
@@ -425,7 +425,7 @@ int runBackupBank(NDBT_Context* ctx, NDB
 
 int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
   NdbRestarter restarter;
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   unsigned minBackupId = ctx->getProperty("MinBackupId");
   unsigned maxBackupId = ctx->getProperty("MaxBackupId");
   unsigned backupId = minBackupId;
@@ -499,7 +499,7 @@ int runRestoreBankAndVerify(NDBT_Context
   return result;
 }
 int runBackupUndoWaitStarted(NDBT_Context* ctx, NDBT_Step* step){
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   unsigned backupId = 0;
   int undoError = 10041;
   NdbRestarter restarter;
@@ -567,7 +567,7 @@ int runChangeUndoDataDuringBackup(NDBT_C
   hugoTrans.closeTransaction(pNdb);
 
   // make sure backup have finish
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
 
   // start log event
   if(backup.startLogEvent() != 0) {
@@ -651,7 +651,7 @@ int runVerifyUndoData(NDBT_Context* ctx,
 int
 runBug57650(NDBT_Context* ctx, NDBT_Step* step)
 {
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   NdbRestarter res;
 
   int node0 = res.getNode(NdbRestarter::NS_RANDOM);

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2011-06-20 07:17:57 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2012-06-25 12:55:27 +0000
@@ -24,6 +24,7 @@
 #include <Bitmask.hpp>
 #include <random.h>
 #include <signaldata/DumpStateOrd.hpp>
+#include <NdbConfig.hpp>
 
 /**
  * TODO 
@@ -3002,6 +3003,63 @@ int runRefreshTuple(NDBT_Context* ctx, N
   return rc;
 };
 
+// An 'assert' that is always executed, so that 'cond' may have side effects.
+#ifdef NDEBUG
+#define ASSERT_ALWAYS(cond) if(!(cond)){abort();}
+#else
+#define ASSERT_ALWAYS assert
+#endif
+
+// Regression test for bug #14208924
+static int
+runLeakApiConnectObjects(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter restarter;
+  /**
+   * This error insert inc ombination with bug #14208924 will 
+   * cause TC to leak ApiConnectRecord objects.
+   */
+  restarter.insertErrorInAllNodes(8094);
+
+  Ndb* const ndb = GETNDB(step);
+  Uint32 maxTrans = 0;
+  NdbConfig conf;
+  ASSERT_ALWAYS(conf.getProperty(conf.getMasterNodeId(),
+                                 NODE_TYPE_DB,
+                                 CFG_DB_NO_TRANSACTIONS,
+                                 &maxTrans));
+  ASSERT_ALWAYS(maxTrans > 0);
+
+  HugoOperations hugoOps(*ctx->getTab());
+  // One ApiConnectRecord object is leaked for each iteration.
+  for (uint i = 0; i < maxTrans+1; i++)
+  {
+    ASSERT_ALWAYS(hugoOps.startTransaction(ndb) == 0);
+    ASSERT_ALWAYS(hugoOps.pkInsertRecord(ndb, i) == 0);
+    NdbTransaction* const trans = hugoOps.getTransaction();
+    /**
+     * The error insert causes trans->execute(Commit) to fail with error code
+     * 286 even if the bug is fixed. Therefore, we ignore this error code.
+     */
+    if (trans->execute(Commit) != 0 && 
+        trans->getNdbError().code != 286)
+    {
+      g_err << "trans->execute() gave unexpected error : " 
+            << trans->getNdbError() << endl;
+      restarter.insertErrorInAllNodes(0);
+      return NDBT_FAILED;
+    }
+    ASSERT_ALWAYS(hugoOps.closeTransaction(ndb) == 0);
+  }
+  restarter.insertErrorInAllNodes(0);
+
+  UtilTransactions utilTrans(*ctx->getTab());
+  if (utilTrans.clearTable(ndb) != 0){
+    return NDBT_FAILED;
+  }
+  return NDBT_OK;
+}
+
 enum PreRefreshOps
 {
   PR_NONE,
@@ -3584,6 +3642,10 @@ TESTCASE("899", "")
   STEP(runTest899);
   FINALIZER(runEnd899);
 }
+TESTCASE("LeakApiConnectObjects", "")
+{
+  INITIALIZER(runLeakApiConnectObjects);
+}
 TESTCASE("RefreshLocking",
          "Test Refresh locking properties")
 {

=== modified file 'storage/ndb/test/ndbapi/testSystemRestart.cpp'
--- a/storage/ndb/test/ndbapi/testSystemRestart.cpp	2012-01-20 10:15:22 +0000
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp	2012-06-25 13:08:04 +0000
@@ -38,7 +38,7 @@ int runLoadTable(NDBT_Context* ctx, NDBT
 int
 clearOldBackups(NDBT_Context* ctx, NDBT_Step* step)
 {
-  NdbBackup backup(GETNDB(step)->getNodeId());
+  NdbBackup backup;
   backup.clearOldBackups();
   return NDBT_OK;
 }
@@ -1391,7 +1391,7 @@ int runSR_DD_1(NDBT_Context* ctx, NDBT_S
   int result = NDBT_OK;
   Uint32 loops = ctx->getNumLoops();
   NdbRestarter restarter;
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   bool lcploop = ctx->getProperty("LCP", (unsigned)0);
   bool all = ctx->getProperty("ALL", (unsigned)0);
 
@@ -1491,7 +1491,7 @@ int runSR_DD_2(NDBT_Context* ctx, NDBT_S
   Uint32 loops = ctx->getNumLoops();
   Uint32 rows = ctx->getNumRecords();
   NdbRestarter restarter;
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   bool lcploop = ctx->getProperty("LCP", (unsigned)0);
   bool all = ctx->getProperty("ALL", (unsigned)0);
   int error = (int)ctx->getProperty("ERROR", (unsigned)0);
@@ -1597,7 +1597,7 @@ int runSR_DD_3(NDBT_Context* ctx, NDBT_S
   Uint32 loops = ctx->getNumLoops();
   Uint32 rows = ctx->getNumRecords();
   NdbRestarter restarter;
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
   bool lcploop = ctx->getProperty("LCP", (unsigned)0);
   bool all = ctx->getProperty("ALL", (unsigned)0);
   int error = (int)ctx->getProperty("ERROR", (unsigned)0);

=== modified file 'storage/ndb/test/ndbapi/testTimeout.cpp'
--- a/storage/ndb/test/ndbapi/testTimeout.cpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/ndbapi/testTimeout.cpp	2012-06-25 13:08:04 +0000
@@ -32,9 +32,8 @@ setTransactionTimeout(NDBT_Context* ctx,
   NdbRestarter restarter;
   int timeout = ctx->getProperty("TransactionInactiveTimeout",TIMEOUT);
 
-  NdbConfig conf(GETNDB(step)->getNodeId()+1);
-  unsigned int nodeId = conf.getMasterNodeId();
-  if (!conf.getProperty(nodeId,
+  NdbConfig conf;
+  if (!conf.getProperty(conf.getMasterNodeId(),
 			NODE_TYPE_DB, 
 			CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
 			&g_org_timeout)){
@@ -66,9 +65,8 @@ setDeadlockTimeout(NDBT_Context* ctx, ND
   NdbRestarter restarter;
   int timeout = ctx->getProperty("TransactionDeadlockTimeout", TIMEOUT);
   
-  NdbConfig conf(GETNDB(step)->getNodeId()+1);
-  unsigned int nodeId = conf.getMasterNodeId();
-  if (!conf.getProperty(nodeId,
+  NdbConfig conf;
+  if (!conf.getProperty(conf.getMasterNodeId(),
 			NODE_TYPE_DB, 
 			CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
 			&g_org_deadlock))
@@ -88,9 +86,8 @@ getDeadlockTimeout(NDBT_Context* ctx, ND
   NdbRestarter restarter;
   
   Uint32 val = 0;
-  NdbConfig conf(GETNDB(step)->getNodeId()+1);
-  unsigned int nodeId = conf.getMasterNodeId();
-  if (!conf.getProperty(nodeId,
+  NdbConfig conf;
+  if (!conf.getProperty(conf.getMasterNodeId(),
 			NODE_TYPE_DB, 
 			CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
 			&val))
@@ -308,9 +305,8 @@ int runDeadlockTimeoutTrans(NDBT_Context
   int stepNo = step->getStepNo();
 
   Uint32 deadlock_timeout;
-  NdbConfig conf(GETNDB(step)->getNodeId()+1);
-  unsigned int nodeId = conf.getMasterNodeId();
-  if (!conf.getProperty(nodeId,
+  NdbConfig conf;
+  if (!conf.getProperty(conf.getMasterNodeId(),
                         NODE_TYPE_DB,
                         CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
                         &deadlock_timeout)){

=== modified file 'storage/ndb/test/ndbapi/testUpgrade.cpp'
--- a/storage/ndb/test/ndbapi/testUpgrade.cpp	2012-05-03 09:54:05 +0000
+++ b/storage/ndb/test/ndbapi/testUpgrade.cpp	2012-06-25 12:55:27 +0000
@@ -896,7 +896,7 @@ runPostUpgradeChecks(NDBT_Context* ctx, 
    *   automatically by NDBT...
    *   so when we enter here, this is already tested
    */
-  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+  NdbBackup backup;
 
   ndbout << "Starting backup..." << flush;
   if (backup.start() != 0)

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2012-04-24 14:41:37 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2012-06-25 11:35:54 +0000
@@ -1867,3 +1867,7 @@ max-time: 300
 cmd: testDict
 args: -n IndexStatCreate T1
 
+max-time: 300
+cmd: testBasic
+args: -n LeakApiConnectObjects T1
+

=== modified file 'support-files/mysql.spec.sh'
--- a/support-files/mysql.spec.sh	2012-05-23 09:04:42 +0000
+++ b/support-files/mysql.spec.sh	2012-06-25 10:23:35 +0000
@@ -1032,6 +1032,8 @@ echo "====="                            
 %doc %attr(644, root, man) %{_mandir}/man1/resolve_stack_dump.1*
 %doc %attr(644, root, man) %{_mandir}/man1/resolveip.1*
 
+%doc %attr(644, root, man) %{_mandir}/man1/ndb-common-options.1*
+
 %ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf
 
 %attr(755, root, root) %{_bindir}/innochecksum
@@ -1063,6 +1065,7 @@ echo "====="                            
 %attr(755, root, root) %{_bindir}/resolve_stack_dump
 %attr(755, root, root) %{_bindir}/resolveip
 
+%attr(755, root, root) %{_bindir}/ndb_blob_tool
 %attr(755, root, root) %{_bindir}/ndb_config
 %attr(755, root, root) %{_bindir}/ndb_delete_all
 %attr(755, root, root) %{_bindir}/ndb_desc

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.3 branch (magnus.blaudd:3893 to 3894) magnus.blaudd25 Jun