List:Commits« Previous MessageNext Message »
From:Martin Skold Date:October 15 2009 3:46pm
Subject:bzr commit into mysql-5.1-telco-6.2 branch (Martin.Skold:3018) Bug#38502
Bug#44607 Bug#45964 Bug#46113 Bug#46662 Bug#47674 Bug#47816 Bug#47935
View as plain text  
#At file:///home/marty/MySQL/mysql-5.1-telco-6.2/

 3018 Martin Skold	2009-10-15 [merge]
      Merge
      modified:
        config/ac-macros/ha_ndbcluster.m4
        mysql-test/suite/ndb/my.cnf
        mysql-test/suite/ndb/r/ndb_config.result
        mysql-test/suite/ndb_binlog/r/ndb_binlog_variants.result
        mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test
        mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result
        sql/ha_ndbcluster.cc
        sql/ha_ndbcluster.h
        storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp
        storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
        storage/ndb/include/kernel/signaldata/NodeFailRep.hpp
        storage/ndb/include/ndb_global.h.in
        storage/ndb/include/ndbapi/NdbDictionary.hpp
        storage/ndb/include/ndbapi/NdbOperation.hpp
        storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp
        storage/ndb/src/kernel/blocks/backup/Backup.cpp
        storage/ndb/src/kernel/blocks/backup/Backup.hpp
        storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
        storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
        storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
        storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
        storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
        storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp
        storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp
        storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
        storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp
        storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
        storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
        storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
        storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
        storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
        storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
        storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
        storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
        storage/ndb/src/kernel/blocks/lgman.cpp
        storage/ndb/src/kernel/blocks/lgman.hpp
        storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
        storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
        storage/ndb/src/kernel/blocks/suma/Suma.cpp
        storage/ndb/src/kernel/blocks/suma/Suma.hpp
        storage/ndb/src/kernel/blocks/trix/Trix.hpp
        storage/ndb/src/kernel/blocks/tsman.cpp
        storage/ndb/src/kernel/blocks/tsman.hpp
        storage/ndb/src/kernel/vm/DLFifoList.hpp
        storage/ndb/src/kernel/vm/DLHashTable.hpp
        storage/ndb/src/kernel/vm/DLList.hpp
        storage/ndb/src/kernel/vm/DataBuffer.hpp
        storage/ndb/src/kernel/vm/SimulatedBlock.cpp
        storage/ndb/src/kernel/vm/SimulatedBlock.hpp
        storage/ndb/src/mgmapi/LocalConfig.cpp
        storage/ndb/src/mgmapi/Makefile.am
        storage/ndb/src/mgmsrv/ConfigInfo.cpp
        storage/ndb/src/ndbapi/NdbDictionary.cpp
        storage/ndb/src/ndbapi/NdbOperation.cpp
        storage/ndb/test/ndbapi/testNdbApi.cpp
        storage/ndb/test/run-test/daily-basic-tests.txt

=== modified file 'config/ac-macros/ha_ndbcluster.m4'
--- a/config/ac-macros/ha_ndbcluster.m4	2009-05-08 09:57:18 +0000
+++ b/config/ac-macros/ha_ndbcluster.m4	2009-09-24 16:17:14 +0000
@@ -63,10 +63,21 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
               [ndb_docs="$withval"],
               [ndb_docs=no])
   AC_ARG_WITH([ndb-port],
-              [AC_HELP_STRING([--with-ndb-port],
-                              [Port for NDB Cluster management server])],
-              [ndb_port="$withval"],
-              [ndb_port="default"])
+              [AC_HELP_STRING([--with-ndb-port=port-number],
+              [Default port used by NDB Cluster management server])],
+              [ndb_port="$withval"],[ndb_port="no"])
+  case "$ndb_port" in
+    "yes" )
+      AC_MSG_ERROR([--with-ndb-port=<port-number> needs an argument])
+      ;;
+    "no" )
+      ;;
+    * )
+      AC_DEFINE_UNQUOTED([NDB_PORT], [$ndb_port],
+                         [Default port used by NDB Cluster management server])
+      ;;
+  esac
+
   AC_ARG_WITH([ndb-port-base],
               [AC_HELP_STRING([--with-ndb-port-base],
                               [Base port for NDB Cluster transporters])],
@@ -215,11 +226,6 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
     fi
   fi
 
-  if test X"$ndb_port" = Xdefault
-  then
-    ndb_port="1186"
-  fi
-  
   have_ndb_binlog="no"
   if test X"$ndb_binlog" = Xdefault ||
      test X"$ndb_binlog" = Xyes
@@ -322,7 +328,6 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
   AC_SUBST(NDB_SCI_LIBS)
 
   AC_SUBST(ndb_transporter_opt_objs)
-  AC_SUBST(ndb_port)
   AC_SUBST(ndb_bin_am_ldflags)
   AC_SUBST(ndb_opt_subdirs)
 

=== modified file 'mysql-test/suite/ndb/my.cnf'
--- a/mysql-test/suite/ndb/my.cnf	2009-08-25 19:44:04 +0000
+++ b/mysql-test/suite/ndb/my.cnf	2009-10-07 07:32:39 +0000
@@ -8,16 +8,28 @@ ndb_mgmd=
 mysqld=,
 ndbapi=,,,,,,,,,,,
 
-[cluster_config.ndbapi.8.1]
+[cluster_config.mysqld.1.1]
+NodeId=49
+
+[cluster_config.mysqld.2.1]
+NodeId=16
+
+[cluster_config.ndbapi.1.1]
+NodeId=32
+
+[cluster_config.ndbapi.2.1]
+NodeId=48
+
+[cluster_config.ndbapi.3.1]
 NodeId=63
 
-[cluster_config.ndbapi.9.1]
+[cluster_config.ndbapi.4.1]
 NodeId=127
 
-[cluster_config.ndbapi.10.1]
+[cluster_config.ndbapi.5.1]
 NodeId=192
 
-[cluster_config.ndbapi.11.1]
+[cluster_config.ndbapi.6.1]
 NodeId=228
 
 [cluster_config.ndbapi.12.1]

=== modified file 'mysql-test/suite/ndb/r/ndb_config.result'
--- a/mysql-test/suite/ndb/r/ndb_config.result	2009-02-02 15:29:43 +0000
+++ b/mysql-test/suite/ndb/r/ndb_config.result	2009-10-07 07:32:39 +0000
@@ -1,5 +1,5 @@
 == 1 ==
-ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4,localhost mysqld,5,localhost mysqld,6,localhost mysqld,7,localhost mysqld,8,localhost mysqld,9,localhost mysqld,10,localhost mysqld,11,localhost mysqld,12,localhost mysqld,63,localhost mysqld,127,localhost mysqld,192,localhost mysqld,228,localhost mysqld,255,localhost
+ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,49,localhost mysqld,16,localhost mysqld,32,localhost mysqld,48,localhost mysqld,63,localhost mysqld,127,localhost mysqld,192,localhost mysqld,228,localhost mysqld,229,localhost mysqld,230,localhost mysqld,231,localhost mysqld,232,localhost mysqld,233,localhost mysqld,255,localhost
 == 2 ==
 1,localhost,20971520,1048576 2,localhost,20971520,1048576
 == 3 ==

=== modified file 'mysql-test/suite/ndb_binlog/r/ndb_binlog_variants.result'
--- a/mysql-test/suite/ndb_binlog/r/ndb_binlog_variants.result	2009-09-29 14:25:03 +0000
+++ b/mysql-test/suite/ndb_binlog/r/ndb_binlog_variants.result	2009-10-07 16:27:03 +0000
@@ -3,6 +3,10 @@ insert into ba values (1, 1, 1), (2,2,2)
 update ba set lp=40 where ks=4;
 delete from ba where ks=2;
 flush logs;
+flush logs;
+flush logs;
+flush logs;
+delete from ba;
 show variables like '%log_update%';
 Variable_name	Value
 sql_log_update	ON
@@ -14,7 +18,12 @@ INSERT INTO test.ba SET   @1=2   @2=2   
 INSERT INTO test.ba SET   @1=3   @2=3   @3=3
 INSERT INTO test.ba SET   @1=4   @2=4   @3=4
 INSERT INTO test.ba SET   @1=4   @3=40
-flush logs;
+select * from ba order by ks;
+ks	st	lp
+1	1	1
+3	3	3
+4	4	40
+delete from ba;
 show variables like '%log_update%';
 Variable_name	Value
 sql_log_update	ON
@@ -26,7 +35,12 @@ INSERT INTO test.ba SET   @1=2   @2=2   
 INSERT INTO test.ba SET   @1=3   @2=3   @3=3
 INSERT INTO test.ba SET   @1=4   @2=4   @3=4
 INSERT INTO test.ba SET   @1=4   @2=4   @3=40
-flush logs;
+select * from ba order by ks;
+ks	st	lp
+1	1	1
+3	3	3
+4	4	40
+delete from ba;
 show variables like '%log_update%';
 Variable_name	Value
 sql_log_update	ON
@@ -38,7 +52,12 @@ INSERT INTO test.ba SET   @1=2   @2=2   
 INSERT INTO test.ba SET   @1=3   @2=3   @3=3
 INSERT INTO test.ba SET   @1=4   @2=4   @3=4
 UPDATE test.ba WHERE   @1=4   @3=4 SET   @1=4   @3=40
-flush logs;
+select * from ba order by ks;
+ks	st	lp
+1	1	1
+3	3	3
+4	4	40
+delete from ba;
 show variables like '%log_update%';
 Variable_name	Value
 sql_log_update	ON
@@ -50,4 +69,25 @@ INSERT INTO test.ba SET   @1=2   @2=2   
 INSERT INTO test.ba SET   @1=3   @2=3   @3=3
 INSERT INTO test.ba SET   @1=4   @2=4   @3=4
 UPDATE test.ba WHERE   @1=4   @2=4   @3=4 SET   @1=4   @2=4   @3=40
+select * from ba order by ks;
+ks	st	lp
+1	1	1
+3	3	3
+4	4	40
 drop table ba;
+reset master;
+show variables like '%log_update%';
+Variable_name	Value
+sql_log_update	ON
+create table bah (tst int primary key, cvy int, sqs int, unique(sqs)) engine=ndb;
+insert into bah values (1,1,1);
+update bah set cvy= 2 where tst=1;
+select * from bah order by tst;
+tst	cvy	sqs
+1	2	1
+drop table bah;
+Manually applying captured binlog
+select * from bah order by tst;
+tst	cvy	sqs
+1	2	1
+drop table bah;

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test	2009-09-29 14:25:03 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test	2009-10-07 16:27:03 +0000
@@ -70,34 +70,146 @@ drop table stream_marker;
 --let $wait_binlog_event=stream_marker
 --enable_query_log
 
-# Now let's have a look at what's in the Binlog on each server
+# Now let's trim the Binlogs on each server
 
+connection mysqld1;
 --source include/wait_for_binlog_event.inc
 flush logs;
+connection mysqld2;
+--source include/wait_for_binlog_event.inc
+flush logs;
+connection mysqld3;
+--source include/wait_for_binlog_event.inc
+flush logs;
+connection mysqld4;
+--source include/wait_for_binlog_event.inc
+flush logs;
+
+# Empty the table
+delete from ba;
+
+# Now let's examine the contents of the first binlog
+# on each server
+# We'll also apply the Binlog and check that the
+# table contents are as expected in each case.
+# As each server is recording in a new binlog, the
+# new updates will go there.
+
+connection mysqld1;
+
 show variables like '%log_update%';
 --source suite/ndb_binlog/t/ndb_binlog_get_binlog_stmts.inc
 
+--disable_query_log
+let $MYSQLD_DATADIR= `select @@datadir;`;
+--exec $MYSQL_BINLOG $MYSQLD_DATADIR/mysqld-bin.000001 > $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+--exec $MYSQL -uroot < $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+
+--enable_query_log
+select * from ba order by ks;
+delete from ba;
+
 connection mysqld2;
 
---source include/wait_for_binlog_event.inc 
-flush logs;
 show variables like '%log_update%';
 --source suite/ndb_binlog/t/ndb_binlog_get_binlog_stmts.inc
 
+--disable_query_log
+let $MYSQLD_DATADIR= `select @@datadir;`;
+--exec $MYSQL_BINLOG $MYSQLD_DATADIR/mysqld-bin.000001 > $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+--exec $MYSQL -uroot < $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+
+--enable_query_log
+
+select * from ba order by ks;
+delete from ba;
+
 connection mysqld3;
 
---source include/wait_for_binlog_event.inc 
-flush logs;
 show variables like '%log_update%';
 --source suite/ndb_binlog/t/ndb_binlog_get_binlog_stmts.inc
 
+--disable_query_log
+let $MYSQLD_DATADIR= `select @@datadir;`;
+--exec $MYSQL_BINLOG $MYSQLD_DATADIR/mysqld-bin.000001 > $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+--exec $MYSQL -uroot < $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+
+--enable_query_log
+select * from ba order by ks;
+delete from ba;
+
 connection mysqld4;
 
---source include/wait_for_binlog_event.inc 
-flush logs;
 show variables like '%log_update%';
 --source suite/ndb_binlog/t/ndb_binlog_get_binlog_stmts.inc
 
+--disable_query_log
+let $MYSQLD_DATADIR= `select @@datadir;`;
+--exec $MYSQL_BINLOG $MYSQLD_DATADIR/mysqld-bin.000001 > $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+--exec $MYSQL -uroot < $MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql
+
+--enable_query_log
+select * from ba order by ks;
 
 drop table ba;
 
+
+# Bug#46662
+# Replicating changes to tables with unique indexes
+# The fix to bug#27378 results in the slave using NdbApi's write()
+# mechanism when applying WRITE_ROW events to tables with unique
+# indices.
+# 
+# If this is not done then the slave attempts to partially use SQL 
+# REPLACE semantics when applying WRITE_ROW events to tables with 
+# unique indexes, which is not good and the slave fails with a 
+# duplicate key error on the primary key.
+#
+# The fix to Bug#46662 aims to correct this, so that replicated
+# updates to tables with unique indices can work.
+# Note that other issues with replicating into tables with unique
+# indexes remain.
+# 
+
+connection mysqld1;
+reset master;
+show variables like '%log_update%';
+
+create table bah (tst int primary key, cvy int, sqs int, unique(sqs)) engine=ndb;
+
+insert into bah values (1,1,1);
+
+# Wait for epoch to complete in Binlog
+--disable_query_log
+create table dummy (a int primary key) engine=ndb;
+--enable_query_log
+
+# Now perform update
+# This will be logged as WRITE
+# Without ability to use NdbApi write() for replace, mysqlbinlog
+# application will fail with duplicate key error on insert.
+update bah set cvy= 2 where tst=1;
+
+select * from bah order by tst;
+
+# Wait for epoch to complete in Binlog
+--disable_query_log
+drop table dummy;
+flush logs;
+--enable_query_log
+
+drop table bah;
+
+# Now let's re-apply the binlog
+# Without fix, this fails with duplicate PK error
+--echo Manually applying captured binlog
+--disable_query_log
+let $MYSQLD_DATADIR= `select @@datadir;`;
+let $BINLOG_FILE= '$MYSQLTEST_VARDIR/tmp/ndb_binlog_mysqlbinlog.sql';
+--exec $MYSQL_BINLOG $MYSQLD_DATADIR/mysqld-bin.000001 > $BINLOG_FILE
+--exec $MYSQL -uroot < $BINLOG_FILE
+
+--enable_query_log
+select * from bah order by tst;
+
+drop table bah;

=== modified file 'mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result'
--- a/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result	2009-05-08 14:43:21 +0000
+++ b/mysql-test/suite/ndb_team/r/ndb_dd_backuprestore.result	2009-10-15 15:45:56 +0000
@@ -337,7 +337,7 @@ t1	CREATE TABLE `t1` (
   `c3` int(11) NOT NULL,
   `c4` bit(1) NOT NULL,
   PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
+) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster AUTO_INCREMENT=251 DEFAULT CHARSET=latin1
 /*!50100 PARTITION BY HASH (c3)
 PARTITIONS 4 */
 SHOW CREATE TABLE test.t2;
@@ -348,7 +348,7 @@ t2	CREATE TABLE `t2` (
   `c3` int(11) NOT NULL,
   `c4` bit(1) NOT NULL,
   PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
+) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster AUTO_INCREMENT=251 DEFAULT CHARSET=latin1
 /*!50100 PARTITION BY KEY (c3)
 (PARTITION p0 ENGINE = ndbcluster,
  PARTITION p1 ENGINE = ndbcluster) */
@@ -360,7 +360,7 @@ t3	CREATE TABLE `t3` (
   `c3` int(11) NOT NULL,
   `c4` bit(1) NOT NULL,
   PRIMARY KEY (`pk1`,`c3`)
-) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1
+) /*!50100 TABLESPACE table_space2 STORAGE DISK */ ENGINE=ndbcluster AUTO_INCREMENT=251 DEFAULT CHARSET=latin1
 /*!50100 PARTITION BY RANGE (c3)
 (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster,
  PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster,
@@ -373,7 +373,7 @@ t4	CREATE TABLE `t4` (
   `c3` int(11) NOT NULL,
   `c4` bit(1) NOT NULL,
   PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster AUTO_INCREMENT=251 DEFAULT CHARSET=latin1
 /*!50100 PARTITION BY HASH (c3)
 PARTITIONS 2 */
 SHOW CREATE TABLE test.t5;
@@ -384,7 +384,7 @@ t5	CREATE TABLE `t5` (
   `c3` int(11) NOT NULL,
   `c4` bit(1) NOT NULL,
   PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster AUTO_INCREMENT=251 DEFAULT CHARSET=latin1
 /*!50100 PARTITION BY KEY (pk1)
 (PARTITION p0 ENGINE = ndbcluster,
  PARTITION p1 ENGINE = ndbcluster) */
@@ -396,7 +396,7 @@ t6	CREATE TABLE `t6` (
   `c3` int(11) NOT NULL,
   `c4` bit(1) NOT NULL,
   PRIMARY KEY (`pk1`,`c3`)
-) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster AUTO_INCREMENT=251 DEFAULT CHARSET=latin1
 /*!50100 PARTITION BY RANGE (pk1)
 (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster,
  PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2009-10-08 09:48:07 +0000
+++ b/sql/ha_ndbcluster.cc	2009-10-15 15:45:56 +0000
@@ -2893,6 +2893,27 @@ ha_ndbcluster::eventSetAnyValue(THD *thd
   }
 }
 
+bool ha_ndbcluster::isManualBinlogExec(THD *thd)
+{
+  /* Are we executing handler methods as part of 
+   * a mysql client BINLOG statement?
+   */
+#ifndef EMBEDDED_LIBRARY
+  return thd ? 
+    ( thd->rli_fake? 
+      thd->rli_fake->get_flag(Relay_log_info::IN_STMT) : false)
+    : false;
+#else
+  /* For Embedded library, we can't determine if we're
+   * executing Binlog manually
+   * TODO : Find better way to determine whether to use
+   *        SQL REPLACE or Write_row semantics
+   */
+  return false;
+#endif
+
+}
+
 int ha_ndbcluster::write_row(uchar *record)
 {
   DBUG_ENTER("ha_ndbcluster::write_row");
@@ -3049,27 +3070,28 @@ int ha_ndbcluster::ndb_write_row(uchar *
   if (m_use_write)
   {
     const uchar *mask;
-#ifdef HAVE_NDB_BINLOG
-    /*
-      The use of table->write_set is tricky here. This is done as a temporary
-      workaround for BUG#22045.
-
-      There is some confusion on the precise meaning of write_set in write_row,
-      with REPLACE INTO and replication SQL thread having different opinions.
-      There is work on the way to sort that out, but until then we need to
-      implement different semantics depending on whether we are in the slave
-      SQL thread or not.
+    /* Should we use the supplied table writeset or not?
+     * For a REPLACE command, we should ignore it, and write
+     * all columns to get correct REPLACE behaviour.
+     * For applying Binlog events, we need to use the writeset
+     * to avoid trampling unchanged columns when an update is
+     * logged as a WRITE
+     */
+    bool useWriteSet= isManualBinlogExec(thd);
 
-      SQL thread -> use the write_set for writeTuple().
-      otherwise (REPLACE INTO) -> do not use write_set.
-    */
-    if (thd->slave_thread)
+#ifdef HAVE_NDB_BINLOG
+    /* Slave always uses writeset
+     * TODO : What about SBR replicating a
+     * REPLACE command?
+     */
+    useWriteSet |= thd->slave_thread;
+#endif
+    if (useWriteSet)
     {
       user_cols_written_bitmap= table->write_set;
       mask= (uchar *)(user_cols_written_bitmap->bitmap);
     }
     else
-#endif
     {
       user_cols_written_bitmap= NULL;
       mask= NULL;
@@ -4219,7 +4241,8 @@ int ha_ndbcluster::extra(enum ha_extra_f
   case HA_EXTRA_WRITE_CAN_REPLACE:
     DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
     if (!m_has_unique_index ||
-        current_thd->slave_thread) /* always set if slave, quick fix for bug 27378 */
+        current_thd->slave_thread || /* always set if slave, quick fix for bug 27378 */
+        isManualBinlogExec(current_thd)) /* or if manual binlog application, for bug 46662 */
     {
       DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
       m_use_write= TRUE;

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2009-09-24 14:22:10 +0000
+++ b/sql/ha_ndbcluster.h	2009-10-07 16:26:17 +0000
@@ -616,6 +616,8 @@ private:
                                   ulonglong *nb_reserved_values);
   bool uses_blob_value(const MY_BITMAP *bitmap);
 
+  static inline bool isManualBinlogExec(THD *thd);
+
   char *update_table_comment(const char * comment);
 
   int write_ndb_file(const char *name);

=== modified file 'storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp'
--- a/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp	2009-10-09 09:13:43 +0000
@@ -32,7 +32,33 @@ class ContinueFragmented {
 public:
   
 private:
-  Uint32 line;
+  enum {
+    CONTINUE_SENDING = 0,
+    CONTINUE_CLEANUP = 1
+  };
+  
+  STATIC_CONST(CONTINUE_CLEANUP_FIXED_WORDS = 5);
+
+  enum {
+    RES_FRAGSEND = 0, /* Fragmented send lists */
+    RES_FRAGINFO = 1, /* Fragmented signal assembly hash */
+    RES_LAST = 2      /* Must be last */
+  };
+
+  Uint32 type;
+  
+  union
+  {
+    Uint32 line;  /* For CONTINUE_SENDING */
+    struct        /* For CONTINUE_CLEANUP */
+    {
+      Uint32 failedNodeId;
+      Uint32 resource;
+      Uint32 cursor;
+      Uint32 elementsCleaned;
+      Uint32 callbackStart; /* Callback structure placed here */
+    } cleanup;
+  };
 };
 
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp'
--- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2009-10-08 10:19:19 +0000
@@ -124,6 +124,7 @@ public:
                                       to be able to debug if events
                                       for some reason does not end up
                                       in clusterlog */
+    CmvmiTestLongSig = 2605,  /* Long signal testing trigger */
     LCPContinue = 5900,
     // 7000 DIH
     // 7001 DIH

=== modified file 'storage/ndb/include/kernel/signaldata/NodeFailRep.hpp'
--- a/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp	2009-10-08 10:19:19 +0000
@@ -24,7 +24,8 @@
 
 /**
  * This signals is sent by Qmgr to NdbCntr
- *   and then from NdbCntr sent to: dih, dict, lqh, tc & API
+ *   and then from NdbCntr sent to: dih, dict, lqh, tc, API
+ *   and others
  */
 struct NodeFailRep {
   STATIC_CONST( SignalLength = 3 + NdbNodeBitmask::Size );

=== modified file 'storage/ndb/include/ndb_global.h.in'
--- a/storage/ndb/include/ndb_global.h.in	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/ndb_global.h.in	2009-10-09 10:13:10 +0000
@@ -22,7 +22,11 @@
 #include <my_config.h>
 #include <ndb_types.h>
 
-#define NDB_PORT "@ndb_port@"
+#ifndef NDB_PORT
+/* Default port used by ndb_mgmd */
+#define NDB_PORT 1186
+#endif
+
 #define NDB_TCP_BASE_PORT "@ndb_port_base@"
 
 #if defined(_WIN32) || defined(_WIN64) || defined(__WIN32__) || defined(WIN32)
@@ -151,4 +155,49 @@ extern "C" {
 
 #define NDB_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
 
+
+/*
+  NDB_STATIC_ASSERT(expr)
+   - Check coding assumptions during compile time
+     by laying out code that will generate a compiler error
+     if the expression is false.
+*/
+
+#if (_MSC_VER > 1500) || (defined __GXX_EXPERIMENTAL_CXX0X__)
+
+/*
+  Prefer to use the 'static_assert' function from C++0x
+  to get best error message
+*/
+#define NDB_STATIC_ASSERT(expr) static_assert(expr, #expr)
+
+#else
+
+/*
+  Fallback to use home grown solution
+*/
+
+#define STR_CONCAT_(x, y) x##y
+#define STR_CONCAT(x, y) STR_CONCAT_(x, y)
+
+#define NDB_STATIC_ASSERT(expr) \
+  enum {STR_CONCAT(static_assert_, __LINE__) = 1 / (!!(expr)) }
+
+#undef STR_CONCAT_
+#undef STR_CONCAT
+
+#endif
+
+
+#if (_MSC_VER > 1500) || (defined __GXX_EXPERIMENTAL_CXX0X__)
+#define HAVE_COMPILER_TYPE_TRAITS
+#endif
+
+#ifdef HAVE_COMPILER_TYPE_TRAITS
+#define ASSERT_TYPE_HAS_CONSTRUCTOR(x)     \
+  NDB_STATIC_ASSERT(!__has_trivial_constructor(x))
+#else
+#define ASSERT_TYPE_HAS_CONSTRUCTOR(x)
+#endif
+
 #endif

=== modified file 'storage/ndb/include/ndbapi/NdbDictionary.hpp'
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp	2009-06-13 18:56:08 +0000
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp	2009-10-07 02:21:54 +0000
@@ -1920,7 +1920,9 @@ public:
      * @return       -1 if error.
      *
      */
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
     int listObjects(List & list, Object::Type type = Object::TypeUndefined);
+#endif
     int listObjects(List & list,
 		    Object::Type type = Object::TypeUndefined) const;
 
@@ -1974,7 +1976,9 @@ public:
      * @param tableName  Name of table that index belongs to.
      * @return  0 if successful, otherwise -1
      */
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
     int listIndexes(List & list, const char * tableName);
+#endif
     int listIndexes(List & list, const char * tableName) const;
 
 #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
@@ -2019,7 +2023,9 @@ public:
      * @param list   List of events returned in the dictionary
      * @return 0 if successful otherwise -1.
      */
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
     int listEvents(List & list);
+#endif
     int listEvents(List & list) const;
 
     /** @} *******************************************************************/

=== modified file 'storage/ndb/include/ndbapi/NdbOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp	2009-06-13 18:56:08 +0000
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp	2009-10-07 02:21:54 +0000
@@ -839,7 +839,9 @@ public:
    * 
    * @return method number where the error occured.
    */
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
   int getNdbErrorLine();
+#endif
   int getNdbErrorLine() const;
 
   /**
@@ -1482,7 +1484,9 @@ inline
 int
 NdbOperation::getNdbErrorLine()
 {
-  return theErrorLine;
+  // delegate to overloaded const function for same semantics
+  const NdbOperation * const cthis = this;
+  return cthis->NdbOperation::getNdbErrorLine();
 }
 
 inline

=== modified file 'storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp'
--- a/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp	2009-10-12 10:13:09 +0000
@@ -344,19 +344,11 @@ int scan_delete(Ndb* myNdb, 
       } while((check = myScanOp->nextResult(false)) == 0);
       
       /**
-       * Commit when all cached tuple have been marked for deletion
+       * NoCommit when all cached tuple have been marked for deletion
        */    
       if(check != -1)
       {
-	check = myTrans->execute(NdbTransaction::Commit);   
-      }
-
-      if(check == -1)
-      {
-	/**
-	 * Create a new transaction, while keeping scan open
-	 */
-	check = myTrans->restart();
+	check = myTrans->execute(NdbTransaction::NoCommit);
       }
 
       /**
@@ -377,6 +369,19 @@ int scan_delete(Ndb* myNdb, 
        * End of loop 
        */
     }
+    /**
+     * Commit all prepared operations
+     */
+    if(myTrans->execute(NdbTransaction::Commit) == -1)
+    {
+      if(err.status == NdbError::TemporaryError){
+	std::cout << myTrans->getNdbError().message << std::endl;
+	myNdb->closeTransaction(myTrans);
+	milliSleep(50);
+	continue;
+      }	
+    }
+    
     std::cout << myTrans->getNdbError().message << std::endl;
     myNdb->closeTransaction(myTrans);
     return 0;

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2009-10-08 10:19:19 +0000
@@ -850,6 +850,18 @@ Backup::execNODE_FAILREP(Signal* signal)
     jam();
     checkNodeFail(signal, ptr, newCoordinator, theFailedNodes);
   }
+
+  /* Block level cleanup */
+  for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+    jam();
+    if(NdbNodeBitmask::get(theFailedNodes, i))
+    {
+      jam();
+      Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback
+      ndbassert(elementsCleaned == 0); // Backup should have no distributed frag signals
+      (void) elementsCleaned; // Remove compiler warning
+    }//if
+  }//for
 }
 
 bool

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.hpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp	2009-10-08 09:55:36 +0000
@@ -175,6 +175,7 @@ public:
   typedef Ptr<Page32> Page32Ptr;
 
   struct Attribute {
+    Attribute() {}
     enum Flags {
       COL_NULLABLE = 0x1,
       COL_FIXED    = 0x2,

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2009-09-01 10:50:11 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2009-10-08 10:19:19 +0000
@@ -1152,6 +1152,16 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal
     sendSignal(reference(), GSN_TESTSIG, signal, 8, JBB, ptr, 2);
   }
 
+  if (arg == DumpStateOrd::CmvmiTestLongSig)
+  {
+    /* Forward as GSN_TESTSIG to self */
+    Uint32 numArgs= signal->length() - 1;
+    memmove(signal->getDataPtrSend(), 
+            signal->getDataPtrSend() + 1, 
+            numArgs << 2);
+    sendSignal(reference(), GSN_TESTSIG, signal, numArgs, JBB);
+  }
+
 #ifdef ERROR_INSERT
   if (arg == 9000 || arg == 9002)
   {
@@ -1265,6 +1275,379 @@ Cmvmi::execNODE_START_REP(Signal* signal
 
 BLOCK_FUNCTIONS(Cmvmi)
 
+void
+Cmvmi::startFragmentedSend(Signal* signal,
+                           Uint32 variant,
+                           Uint32 numSigs,
+                           NodeReceiverGroup rg)
+{
+  Uint32* sigData = signal->getDataPtrSend();
+  const Uint32 sigLength = 6;
+  const Uint32 sectionWords = 240;
+  Uint32 sectionData[ sectionWords ];
+  
+  for (Uint32 i = 0; i < sectionWords; i++)
+    sectionData[ i ] = i;
+  
+  const Uint32 secCount = 1; 
+  LinearSectionPtr ptr[3];
+  ptr[0].sz = sectionWords;
+  ptr[0].p = &sectionData[0];
+
+  for (Uint32 i = 0; i < numSigs; i++)
+  {
+    sigData[0] = variant;
+    sigData[1] = 31;
+    sigData[2] = 0;
+    sigData[3] = 1; // print
+    sigData[4] = 0;
+    sigData[5] = sectionWords;
+    
+    if ((i & 1) == 0)
+    {
+      DEBUG("Starting linear fragmented send (" << i + 1
+            << "/" << numSigs << ")");
+
+      /* Linear send */
+      /* Todo : Avoid reading from invalid stackptr in CONTINUEB */
+      sendFragmentedSignal(rg,
+                           GSN_TESTSIG,
+                           signal,
+                           sigLength,
+                           JBB,
+                           ptr,
+                           secCount,
+                           TheEmptyCallback,
+                           90); // messageSize
+    }
+    else
+    {
+      /* Segmented send */
+      DEBUG("Starting segmented fragmented send (" << i + 1
+            << "/" << numSigs << ")");
+      Ptr<SectionSegment> segPtr;
+      ndbrequire(import(segPtr, sectionData, sectionWords));
+      SegmentedSectionPtr ssPtr;
+      getSection(ssPtr, segPtr.i);
+      
+      signal->header.m_noOfSections = 1;
+      signal->m_sectionPtr[0] = ssPtr;
+      
+      sendFragmentedSignal(rg,
+                           GSN_TESTSIG,
+                           signal,
+                           sigLength,
+                           JBB,
+                           TheEmptyCallback,
+                           90); // messageSize
+    }
+  }
+}
+
+void
+Cmvmi::testNodeFailureCleanupCallback(Signal* signal, Uint32 data, Uint32 elementsCleaned)
+{
+  DEBUG("testNodeFailureCleanupCallback");
+  DEBUG("Data : " << data 
+        << " elementsCleaned : " << elementsCleaned);
+
+  debugPrintFragmentCounts();
+
+  Uint32 variant = data & 0xffff;
+  Uint32 testType = (data >> 16) & 0xffff;
+
+  DEBUG("Sending trigger(" << testType 
+        << ") variant " << variant 
+        << " to self to cleanup any fragments that arrived "
+        << "before send was cancelled");
+
+  Uint32* sigData = signal->getDataPtrSend();
+  sigData[0] = variant;
+  sigData[1] = testType;
+  sendSignal(reference(), GSN_TESTSIG, signal, 2, JBB);
+  
+  return; 
+}
+
+void 
+Cmvmi::testFragmentedCleanup(Signal* signal, Uint32 testType, Uint32 variant)
+{
+  DEBUG("TestType " << testType << " variant " << variant);
+  debugPrintFragmentCounts();
+
+  /* Variants : 
+   *     Local fragmented send   Multicast fragmented send
+   * 0 : Immediate cleanup       Immediate cleanup
+   * 1 : Continued cleanup       Immediate cleanup
+   * 2 : Immediate cleanup       Continued cleanup
+   * 3 : Continued cleanup       Continued cleanup
+   */
+  const Uint32 NUM_VARIANTS = 4;
+  if (variant >= NUM_VARIANTS)
+  {
+    DEBUG("Unsupported variant");
+    releaseSections(signal);
+    return;
+  }
+
+  /* Test from ndb_mgm with
+   * <node(s)> DUMP 2605 0 30 
+   * 
+   * Use
+   * <node(s)> DUMP 2605 0 39 to get fragment resource usage counts
+   * Use
+   * <node(s)> DUMP 2601 to get segment usage counts in clusterlog
+   */
+  if (testType == 30)
+  {
+    /* Send the first fragment of a fragmented signal to self
+     * Receiver will allocate assembly hash entries
+     * which must be freed when node failure cleanup
+     * executes later
+     */
+    const Uint32 sectionWords = 240;
+    Uint32 sectionData[ sectionWords ];
+
+    for (Uint32 i = 0; i < sectionWords; i++)
+      sectionData[ i ] = i;
+
+    const Uint32 secCount = 1; 
+    LinearSectionPtr ptr[3];
+    ptr[0].sz = sectionWords;
+    ptr[0].p = &sectionData[0];
+
+    /* Send signal with testType == 31 */
+    NodeReceiverGroup me(reference());
+    Uint32* sigData = signal->getDataPtrSend();
+    const Uint32 sigLength = 6;
+    const Uint32 numPartialSigs = 4; 
+    /* Not too many as CMVMI's fragInfo hash is limited size */
+    // TODO : Consider making it debug-larger to get 
+    // more coverage on CONTINUEB path
+
+    for (Uint32 i = 0; i < numPartialSigs; i++)
+    {
+      /* Fill in messy TESTSIG format */
+      sigData[0] = variant;
+      sigData[1] = 31;
+      sigData[2] = 0;
+      sigData[3] = 0; // print
+      sigData[4] = 0;
+      sigData[5] = sectionWords;
+      
+      FragmentSendInfo fsi;
+      
+      DEBUG("Sending first fragment to self");
+      sendFirstFragment(fsi,
+                        me,
+                        GSN_TESTSIG,
+                        signal,
+                        sigLength,
+                        JBB,
+                        ptr,
+                        secCount,
+                        90); // FragmentLength
+
+      DEBUG("Cancelling remainder to free internal section");
+      fsi.m_status = FragmentSendInfo::SendCancelled;
+      sendNextLinearFragment(signal, fsi);
+    };
+
+    /* Ok, now send short signal with testType == 32
+     * to trigger 'remote-side' actions in middle of
+     * multiple fragment assembly
+     */
+    sigData[0] = variant;
+    sigData[1] = 32;
+
+    DEBUG("Sending node fail trigger to self");
+    sendSignal(me, GSN_TESTSIG, signal, 2, JBB);
+    return;
+  }
+
+  if (testType == 31)
+  {
+    /* Just release sections - execTESTSIG() has shown sections received */
+    releaseSections(signal);
+    return;
+  }
+
+  if (testType == 32)
+  {
+    /* 'Remote side' trigger to clean up fragmented signal resources */
+    BlockReference senderRef = signal->getSendersBlockRef();
+    Uint32 sendingNode = refToNode(senderRef);
+    
+    /* Start sending some linear and fragmented responses to the
+     * sender, to exercise frag-send cleanup code when we execute
+     * node-failure later
+     */
+    DEBUG("Starting fragmented send using continueB back to self");
+
+    NodeReceiverGroup sender(senderRef);
+    startFragmentedSend(signal, variant, 6, sender);
+
+    debugPrintFragmentCounts();
+
+    Uint32 cbData= (((Uint32) 33) << 16) | variant;
+    Callback cb = { safe_cast(&Cmvmi::testNodeFailureCleanupCallback),
+                    cbData };
+
+    Callback* cbPtr = NULL;
+
+    bool passCallback = variant & 1;
+
+    if (passCallback)
+    {
+      DEBUG("Running simBlock failure code WITH CALLBACK for node " 
+            << sendingNode);
+      cbPtr = &cb;
+    }
+    else
+    {
+      DEBUG("Running simBlock failure code IMMEDIATELY (no callback) for node "
+            << sendingNode);
+      cbPtr = &TheEmptyCallback;
+    }
+
+    Uint32 elementsCleaned = simBlockNodeFailure(signal, sendingNode, *cbPtr);
+    
+    DEBUG("Elements cleaned by call : " << elementsCleaned);
+
+    debugPrintFragmentCounts();
+
+    if (! passCallback)
+    {
+      DEBUG("Variant " << variant << " manually executing callback");
+      /* We call the callback inline here to continue processing */
+      testNodeFailureCleanupCallback(signal, 
+                                     cbData,
+                                     elementsCleaned);
+    }
+
+    return;
+  }
+
+  if (testType == 33)
+  {
+    /* Original side - receive cleanup trigger from 'remote' side
+     * after node failure cleanup performed there.  We may have
+     * fragments it managed to send before the cleanup completed
+     * so we'll get rid of them.
+     * This would not be necessary in reality as this node would
+     * be failed
+     */
+    Uint32 sendingNode = refToNode(signal->getSendersBlockRef());
+    DEBUG("Running simBlock failure code for node " << sendingNode);
+
+    Uint32 elementsCleaned = simBlockNodeFailure(signal, sendingNode);
+
+    DEBUG("Elements cleaned : " << elementsCleaned);
+
+    /* Should have no fragment resources in use now */
+    ndbrequire(debugPrintFragmentCounts() == 0);
+
+    /* Now use ReceiverGroup to multicast a fragmented signal to
+     * all database nodes
+     */
+    DEBUG("Starting to send fragmented continueB to all nodes inc. self : ");
+    NodeReceiverGroup allNodes(CMVMI, c_dbNodes);
+    
+    unsigned nodeId = 0;
+    while((nodeId = c_dbNodes.find(nodeId+1)) != BitmaskImpl::NotFound)
+    {
+      DEBUG("Node " << nodeId);
+    }
+
+    startFragmentedSend(signal, variant, 8, allNodes);
+
+    debugPrintFragmentCounts();
+
+    Uint32 cbData= (((Uint32) 34) << 16) | variant;
+    Callback cb = { safe_cast(&Cmvmi::testNodeFailureCleanupCallback),
+                    cbData };
+    
+    Callback* cbPtr = NULL;
+    
+    bool passCallback = variant & 2;
+
+    if (passCallback)
+    {
+      DEBUG("Running simBlock failure code for self WITH CALLBACK (" 
+            << getOwnNodeId() << ")");
+      cbPtr= &cb;
+    }
+    else
+    {
+      DEBUG("Running simBlock failure code for self IMMEDIATELY (no callback) ("
+            << getOwnNodeId() << ")");
+      cbPtr= &TheEmptyCallback;
+    }
+    
+
+    /* Fragmented signals being sent will have this node removed
+     * from their receiver group, but will keep sending to the 
+     * other node(s).
+     * Other node(s) should therefore receive the complete signals.
+     * We will then receive only the first fragment of each of 
+     * the signals which must be removed later.
+     */
+    elementsCleaned = simBlockNodeFailure(signal, getOwnNodeId(), *cbPtr);
+
+    DEBUG("Elements cleaned : " << elementsCleaned);
+    
+    debugPrintFragmentCounts();
+
+    /* Callback will send a signal to self to clean up fragments that 
+     * were sent to self before the send was cancelled.  
+     * (Again, unnecessary in a 'real' situation
+     */
+    if (!passCallback)
+    {
+      DEBUG("Variant " << variant << " manually executing callback");
+
+      testNodeFailureCleanupCallback(signal,
+                                     cbData,
+                                     elementsCleaned);
+    }
+
+    return;
+  }
+  
+  if (testType == 34)
+  {
+    /* Cleanup fragments which were sent before send was cancelled. */
+    Uint32 elementsCleaned = simBlockNodeFailure(signal, getOwnNodeId());
+    
+    DEBUG("Elements cleaned " << elementsCleaned);
+    
+    /* All FragInfo should be clear, may still be sending some
+     * to other node(s)
+     */
+    debugPrintFragmentCounts();
+
+    DEBUG("Variant " << variant << " completed.");
+    
+    if (++variant < NUM_VARIANTS)
+    {
+      DEBUG("Re-executing with variant " << variant);
+      Uint32* sigData = signal->getDataPtrSend();
+      sigData[0] = variant;
+      sigData[1] = 30;
+      sendSignal(reference(), GSN_TESTSIG, signal, 2, JBB);
+    }
+//    else
+//    {
+//      // Infinite loop to test for leaks
+//       DEBUG("Back to zero");
+//       Uint32* sigData = signal->getDataPtrSend();
+//       sigData[0] = 0;
+//       sigData[1] = 30;
+//       sendSignal(reference(), GSN_TESTSIG, signal, 2, JBB);
+//    }
+  }
+}
+
 static Uint32 g_print;
 static LinearSectionPtr g_test[3];
 
@@ -1337,6 +1720,16 @@ Cmvmi::execTESTSIG(Signal* signal){
   
   NodeReceiverGroup rg(CMVMI, c_dbNodes);
 
+  /**
+   * Testing SimulatedBlock fragment assembly cleanup
+   */
+  if ((testType >= 30) &&
+      (testType < 40))
+  {
+    testFragmentedCleanup(signal, testType, ref);
+    return;
+  }
+
   if(signal->getSendersBlockRef() == ref){
     /**
      * Signal from API (not via NodeReceiverGroup)

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp	2009-10-08 10:19:19 +0000
@@ -114,6 +114,9 @@ private:
   Cmvmi(const Cmvmi &obj);
   void operator = (const Cmvmi &);
 
+  void startFragmentedSend(Signal* signal, Uint32 variant, Uint32 numSigs, NodeReceiverGroup rg);
+  void testNodeFailureCleanupCallback(Signal* signal, Uint32 variant, Uint32 elementsCleaned);
+  void testFragmentedCleanup(Signal* signal, Uint32 testType, Uint32 variant);
   void sendFragmentedComplete(Signal* signal, Uint32 data, Uint32 returnCode);
 };
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2009-09-30 18:51:17 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2009-10-08 10:19:19 +0000
@@ -3896,6 +3896,17 @@ Dbdict::restartDropObj_commit_complete_d
 /* ---------------------------------------------------------------- */
 /* **************************************************************** */
 
+void Dbdict::handleApiFailureCallback(Signal* signal, 
+                                      Uint32 failedNodeId,
+                                      Uint32 ignoredRc)
+{
+  jamEntry();
+  
+  signal->theData[0] = failedNodeId;
+  signal->theData[1] = reference();
+  sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
+}
+
 /* ---------------------------------------------------------------- */
 // We receive a report of an API that failed.
 /* ---------------------------------------------------------------- */
@@ -3913,11 +3924,27 @@ void Dbdict::execAPI_FAILREQ(Signal* sig
   }//if
 #endif
 
-  signal->theData[0] = failedApiNode;
-  signal->theData[1] = reference();
-  sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB);
+  ndbrequire(retRef == QMGR_REF); // As callback hard-codes QMGR_REF
+  Callback cb = { safe_cast(&Dbdict::handleApiFailureCallback),
+                  failedApiNode };
+  simBlockNodeFailure(signal, failedApiNode, cb);
 }//execAPI_FAILREQ()
 
+void Dbdict::handleNdbdFailureCallback(Signal* signal, 
+                                       Uint32 failedNodeId, 
+                                       Uint32 ignoredRc)
+{
+  jamEntry();
+
+  /* Node failure handling is complete */
+  NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
+  nfCompRep->blockNo      = DBDICT;
+  nfCompRep->nodeId       = getOwnNodeId();
+  nfCompRep->failedNodeId = failedNodeId;
+  sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal, 
+             NFCompleteRep::SignalLength, JBB);
+}
+
 /* ---------------------------------------------------------------- */
 // We receive a report of one or more node failures of kernel nodes.
 /* ---------------------------------------------------------------- */
@@ -3981,14 +4008,12 @@ void Dbdict::execNODE_FAILREP(Signal* si
       c_nodes.getPtr(nodePtr, i);
 
       nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
-      NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
-      nfCompRep->blockNo      = DBDICT;
-      nfCompRep->nodeId       = getOwnNodeId();
-      nfCompRep->failedNodeId = nodePtr.i;
-      sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal, 
-		 NFCompleteRep::SignalLength, JBB);
-      
       c_aliveNodes.clear(i);
+
+      Callback cb = {safe_cast(&Dbdict::handleNdbdFailureCallback),
+                     i};
+
+      simBlockNodeFailure(signal, nodePtr.i, cb);
     }//if
   }//for
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2009-08-21 09:35:23 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2009-10-09 09:09:48 +0000
@@ -2122,6 +2122,7 @@ private:
   friend struct DictLockRecord;
 
   struct DictLockRecord {
+    DictLockRecord() {}
     DictLockReq req;
     const DictLockType* lt;
     bool locked;
@@ -2153,7 +2154,12 @@ private:
 
   // NF handling
   void removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes);
-
+  void handleNdbdFailureCallback(Signal* signal, 
+                                 Uint32 failedNodeId,
+                                 Uint32 ignoredRc);
+  void handleApiFailureCallback(Signal* signal,
+                                Uint32 failedNodeId,
+                                Uint32 ignoredRc);
 
   // Statement blocks
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp	2009-10-15 12:27:43 +0000
@@ -192,6 +192,7 @@ print(const char * filename, const Schem
 NDB_COMMAND(printSchemafile, 
 	    "printSchemafile", "printSchemafile", "Prints a schemafile", 16384)
 { 
+  ndb_init();
   progname = argv[0];
   int exitcode = 0;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp	2009-10-15 12:27:43 +0000
@@ -122,6 +122,7 @@ print(const char * filename, const Sysfi
 
 NDB_COMMAND(printSysfile, 
 	    "printSysfile", "printSysfile", "Prints a sysfile", 16384){ 
+  ndb_init();
   if(argc < 2){
     usage(argv[0]);
     return 0;

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp	2009-10-15 12:27:43 +0000
@@ -120,6 +120,7 @@ print(const char * filename, const Sysfi
 
 NDB_COMMAND(printSysfile, 
 	    "printSysfile", "printSysfile", "Prints a sysfile", 16384){ 
+  ndb_init();
   if(argc < 2){
     usage(argv[0]);
     return 0;

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2009-09-22 07:32:29 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2009-10-12 05:43:10 +0000
@@ -7086,31 +7086,22 @@ void Dblqh::execACCKEYREF(Signal* signal
   ndbrequire(!LqhKeyReq::getNrCopyFlag(tcPtr->reqinfo));
   
   /**
-   * Only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND
+   * Not only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND
    *
-   * Unless it's a simple or dirty read
-   *
-   * NOT TRUE!
    * 1) op1 - primary insert ok
    * 2) op1 - backup insert fail (log full or what ever)
    * 3) op1 - delete ok @ primary
    * 4) op1 - delete fail @ backup
    *
    * -> ZNO_TUPLE_FOUND is possible
+   *
+   * 1) op1 primary delete ok
+   * 2) op1 backup delete fail (log full or what ever)
+   * 3) op2 insert ok @ primary
+   * 4) op2 insert fail @ backup
+   *
+   * -> ZTUPLE_ALREADY_EXIST
    */
-  if (unlikely(! (tcPtr->seqNoReplica == 0 ||
-                  errCode != ZTUPLE_ALREADY_EXIST ||
-                  (tcPtr->operation == ZREAD && 
-                   (tcPtr->dirtyOp || tcPtr->opSimple)))))
-  {
-    jamLine(Uint32(tcPtr->operation));
-    jamLine(Uint32(tcPtr->seqNoReplica));
-    jamLine(Uint32(errCode));
-    jamLine(Uint32(tcPtr->dirtyOp));
-    jamLine(Uint32(tcPtr->opSimple));
-    ndbrequire(false);
-  }
-  
   tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
   abortCommonLab(signal);
   return;

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp	2009-10-15 12:27:43 +0000
@@ -56,6 +56,7 @@ Uint32 startAtPageIndex = 0;
 Uint32 *redoLogPage;
 
 NDB_COMMAND(redoLogFileReader,  "redoLogFileReader", "redoLogFileReader", "Read a redo log file", 16384) { 
+  ndb_init();
   Uint32 wordIndex = 0;
   Uint32 oldWordIndex = 0;
   Uint32 recordType = 1234567890;

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2009-09-01 12:27:40 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2009-10-09 09:09:48 +0000
@@ -488,6 +488,8 @@ public:
   /* WHEN THE INDEX IS DROPPED.               */
   /* **************************************** */
   struct TcIndexData {
+    TcIndexData() {}
+
     /**
      *  IndexState
      */
@@ -942,11 +944,12 @@ public:
 
     enum NodeFailBits
     {
-      NF_TAKEOVER          = 0x1,
-      NF_CHECK_SCAN        = 0x2,
-      NF_CHECK_TRANSACTION = 0x4,
-      NF_CHECK_DROP_TAB    = 0x8,
-      NF_NODE_FAIL_BITS    = 0xF // All bits...
+      NF_TAKEOVER          = 0x01,
+      NF_CHECK_SCAN        = 0x02,
+      NF_CHECK_TRANSACTION = 0x04,
+      NF_CHECK_DROP_TAB    = 0x08,
+      NF_BLOCK_HANDLE      = 0x10,
+      NF_NODE_FAIL_BITS    = 0x1F // All bits...
     };
     Uint32 m_nf_bits;
     NdbNodeBitmask m_lqh_trans_conf;
@@ -1634,7 +1637,10 @@ private:
 			 LocalDLList<ScanFragRec>::Head&);
 
   void nodeFailCheckTransactions(Signal*,Uint32 transPtrI,Uint32 failedNodeId);
+  void ndbdFailBlockCleanupCallback(Signal* signal, Uint32 failedNodeId, Uint32 ignoredRc);
   void checkNodeFailComplete(Signal* signal, Uint32 failedNodeId, Uint32 bit);
+
+  void apiFailBlockCleanupCallback(Signal* signal, Uint32 failedNodeId, Uint32 ignoredRc);
   
   // Initialisation
   void initData();

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2009-10-05 10:38:50 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2009-10-08 10:19:19 +0000
@@ -1157,13 +1157,15 @@ Dbtc::removeMarkerForFailedAPI(Signal* s
       capiConnectClosing[nodeId]--;
       if (capiConnectClosing[nodeId] == 0) {
         jam();
+
         /********************************************************************/
         // No outstanding ABORT or COMMIT's of this failed API node. 
-        // We can respond with API_FAILCONF
+        // Perform SimulatedBlock level cleanup before sending
+        // API_FAILCONF
         /********************************************************************/
-        signal->theData[0] = nodeId;
-        signal->theData[1] = cownref;
-        sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
+        Callback cb = {safe_cast(&Dbtc::apiFailBlockCleanupCallback),
+                       nodeId};
+        simBlockNodeFailure(signal, nodeId, cb);
       }
       return;
     }
@@ -7335,22 +7337,24 @@ void Dbtc::execNODE_FAILREP(Signal* sign
 
   cmasterNodeId = tnewMasterId;
   
+  HostRecordPtr myHostPtr;
+
   tcNodeFailptr.i = 0;
   ptrAss(tcNodeFailptr, tcFailRecord);
   for (i = 0; i < tnoOfNodes; i++) 
   {
     jam();
-    hostptr.i = cdata[i];
-    ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+    myHostPtr.i = cdata[i];
+    ptrCheckGuard(myHostPtr, chostFilesize, hostRecord);
     
     /*------------------------------------------------------------*/
     /*       SET STATUS OF THE FAILED NODE TO DEAD SINCE IT HAS   */
     /*       FAILED.                                              */
     /*------------------------------------------------------------*/
-    hostptr.p->hostStatus = HS_DEAD;
-    hostptr.p->m_nf_bits = HostRecord::NF_NODE_FAIL_BITS;
+    myHostPtr.p->hostStatus = HS_DEAD;
+    myHostPtr.p->m_nf_bits = HostRecord::NF_NODE_FAIL_BITS;
     c_ongoing_take_over_cnt++;
-    c_alive_nodes.clear(hostptr.i);
+    c_alive_nodes.clear(myHostPtr.i);
 
     if (tcNodeFailptr.p->failStatus == FS_LISTENING) 
     {
@@ -7359,7 +7363,7 @@ void Dbtc::execNODE_FAILREP(Signal* sign
       /*       THE CURRENT TAKE OVER CAN BE AFFECTED BY THIS NODE   */
       /*       FAILURE.                                             */
       /*------------------------------------------------------------*/
-      if (hostptr.p->lqhTransStatus == LTS_ACTIVE) 
+      if (myHostPtr.p->lqhTransStatus == LTS_ACTIVE) 
       {
 	jam();
 	/*------------------------------------------------------------*/
@@ -7367,18 +7371,21 @@ void Dbtc::execNODE_FAILREP(Signal* sign
 	/*       PROTOCOL FOR TC.                                     */
 	/*------------------------------------------------------------*/
 	signal->theData[0] = TcContinueB::ZNODE_TAKE_OVER_COMPLETED;
-	signal->theData[1] = hostptr.i;
+	signal->theData[1] = myHostPtr.i;
 	sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
       }//if
     }//if
     
     jam();
-    signal->theData[0] = hostptr.i;
+    signal->theData[0] = myHostPtr.i;
     sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB);
     
-    checkScanActiveInFailedLqh(signal, 0, hostptr.i);
-    checkWaitDropTabFailedLqh(signal, hostptr.i, 0); // nodeid, tableid
-    nodeFailCheckTransactions(signal, 0, hostptr.i);
+    checkScanActiveInFailedLqh(signal, 0, myHostPtr.i);
+    checkWaitDropTabFailedLqh(signal, myHostPtr.i, 0); // nodeid, tableid
+    nodeFailCheckTransactions(signal, 0, myHostPtr.i);
+    Callback cb = {safe_cast(&Dbtc::ndbdFailBlockCleanupCallback), 
+                  myHostPtr.i};
+    simBlockNodeFailure(signal, myHostPtr.i, cb);
   }
 }//Dbtc::execNODE_FAILREP()
 
@@ -7518,6 +7525,28 @@ Dbtc::nodeFailCheckTransactions(Signal* 
   }
 }
 
+void
+Dbtc::ndbdFailBlockCleanupCallback(Signal* signal,
+                                   Uint32 failedNodeId,
+                                   Uint32 ignoredRc)
+{
+  jamEntry();
+  
+  checkNodeFailComplete(signal, failedNodeId,
+                        HostRecord::NF_BLOCK_HANDLE);
+}
+
+void
+Dbtc::apiFailBlockCleanupCallback(Signal* signal,
+                                  Uint32 failedNodeId,
+                                  Uint32 ignoredRc)
+{
+  jamEntry();
+  
+  signal->theData[0] = failedNodeId;
+  signal->theData[1] = cownref;
+  sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
+}
 
 void
 Dbtc::checkScanFragList(Signal* signal,

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2009-09-04 10:15:28 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2009-10-09 09:09:48 +0000
@@ -472,6 +472,7 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
 
   // Scan Lock
   struct ScanLock {
+    ScanLock() {}
     Uint32 m_accLockOp;
     union {
       Uint32 nextPool;
@@ -568,6 +569,7 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
 
   struct Page_request 
   {
+    Page_request() {}
     Local_key m_key;
     Uint32 m_frag_ptr_i;
     Uint32 m_extent_info_ptr;
@@ -1307,6 +1309,7 @@ typedef Ptr<HostBuffer> HostBufferPtr;
    * Build index operation record.
    */
   struct BuildIndexRec {
+    BuildIndexRec() {}
     // request cannot use signal class due to extra members
     Uint32 m_request[BuildIndxReq::SignalLength];
     Uint8  m_build_vs;          // varsize pages
@@ -1714,6 +1717,7 @@ private:
   void execALTER_TAB_REQ(Signal* signal);
   void execTUP_DEALLOCREQ(Signal* signal);
   void execTUP_WRITELOG_REQ(Signal* signal);
+  void execNODE_FAILREP(Signal* signal);
 
   // Ordered index related
   void execBUILDINDXREQ(Signal* signal);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2009-10-08 10:19:19 +0000
@@ -32,6 +32,7 @@
 #include <signaldata/FsRemoveReq.hpp>
 #include <signaldata/TupCommit.hpp>
 #include <signaldata/TupKey.hpp>
+#include <signaldata/NodeFailRep.hpp>
 
 #include <signaldata/DropTab.hpp>
 #include <SLList.hpp>
@@ -65,6 +66,7 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* 
   addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG);
   addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB);
   addRecSignal(GSN_LCP_FRAG_ORD, &Dbtup::execLCP_FRAG_ORD);
+  addRecSignal(GSN_NODE_FAILREP, &Dbtup::execNODE_FAILREP);
 
   addRecSignal(GSN_DUMP_STATE_ORD, &Dbtup::execDUMP_STATE_ORD);
   addRecSignal(GSN_SEND_PACKED, &Dbtup::execSEND_PACKED);
@@ -773,4 +775,22 @@ void Dbtup::releaseFragrec(FragrecordPtr
 }//Dbtup::releaseFragrec()
 
 
+void Dbtup::execNODE_FAILREP(Signal* signal)
+{
+  jamEntry();
+  const NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+  NdbNodeBitmask failed; 
+  failed.assign(NdbNodeBitmask::Size, rep->theNodes);
+
+  /* Block level cleanup */
+  for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+    jam();
+    if(failed.get(i)) {
+      jam();
+      Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback
+      ndbassert(elementsCleaned == 0); // No distributed fragmented signals
+      (void) elementsCleaned; // Remove compiler warning
+    }//if
+  }//for
+}
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp	2009-10-08 09:55:36 +0000
@@ -24,6 +24,7 @@
 
 struct Tup_page 
 {
+  Tup_page() {}
   struct File_formats::Page_header m_page_header;
   Uint32 m_restart_seq;
   Uint32 page_state;

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2009-06-01 08:52:20 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2009-10-08 09:55:36 +0000
@@ -356,6 +356,7 @@ private:
 
   // ScanLock
   struct ScanLock {
+    ScanLock() {}
     Uint32 m_accLockOp;
     union {
     Uint32 nextPool;

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-10-08 10:19:19 +0000
@@ -29,6 +29,7 @@
 #include <signaldata/TcKeyFailConf.hpp>
 #include <signaldata/GetTabInfo.hpp>
 #include <signaldata/DictTabInfo.hpp>
+#include <signaldata/NodeFailRep.hpp>
 
 #include <signaldata/UtilSequence.hpp>
 #include <signaldata/UtilPrepare.hpp>
@@ -66,6 +67,7 @@ DbUtil::DbUtil(Block_context& ctx) :
   addRecSignal(GSN_NDB_STTOR, &DbUtil::execNDB_STTOR);
   addRecSignal(GSN_DUMP_STATE_ORD, &DbUtil::execDUMP_STATE_ORD);
   addRecSignal(GSN_CONTINUEB, &DbUtil::execCONTINUEB);
+  addRecSignal(GSN_NODE_FAILREP, &DbUtil::execNODE_FAILREP);
   
   //addRecSignal(GSN_TCSEIZEREF, &DbUtil::execTCSEIZEREF);
   addRecSignal(GSN_TCSEIZECONF, &DbUtil::execTCSEIZECONF);
@@ -304,6 +306,25 @@ DbUtil::execCONTINUEB(Signal* signal){
 }
 
 void
+DbUtil::execNODE_FAILREP(Signal* signal){
+  jamEntry();
+  const NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+  NdbNodeBitmask failed; 
+  failed.assign(NdbNodeBitmask::Size, rep->theNodes);
+
+  /* Block level cleanup */
+  for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+    jam();
+    if(failed.get(i)) {
+      jam();
+      Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback
+      ndbassert(elementsCleaned == 0); // No distributed fragmented signals
+      (void) elementsCleaned; // Remove compiler warning
+    }//if
+  }//for
+}
+
+void
 DbUtil::execDUMP_STATE_ORD(Signal* signal){
   jamEntry();
 

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2009-10-09 09:09:48 +0000
@@ -76,6 +76,7 @@ protected:
   void execNDB_STTOR(Signal* signal);
   void execDUMP_STATE_ORD(Signal* signal);
   void execCONTINUEB(Signal* signal);
+  void execNODE_FAILREP(Signal* signal);
 
   /**
    * Sequence Service : Public interface
@@ -425,6 +426,7 @@ public:
    * Lock manager
    */
   struct LockQueueElement {
+    LockQueueElement() {}
     Uint32 m_senderData;
     Uint32 m_senderRef;
     union {

=== modified file 'storage/ndb/src/kernel/blocks/lgman.cpp'
--- a/storage/ndb/src/kernel/blocks/lgman.cpp	2009-08-21 09:26:34 +0000
+++ b/storage/ndb/src/kernel/blocks/lgman.cpp	2009-10-08 10:19:19 +0000
@@ -29,6 +29,7 @@
 #include <signaldata/SumaImpl.hpp>
 #include <signaldata/LgmanContinueB.hpp>
 #include <signaldata/GetTabInfo.hpp>
+#include <signaldata/NodeFailRep.hpp>
 #include "ndbfs/Ndbfs.hpp"
 #include "dbtup/Dbtup.hpp"
 
@@ -66,6 +67,7 @@ Lgman::Lgman(Block_context & ctx) :
   addRecSignal(GSN_READ_CONFIG_REQ, &Lgman::execREAD_CONFIG_REQ);
   addRecSignal(GSN_DUMP_STATE_ORD, &Lgman::execDUMP_STATE_ORD);
   addRecSignal(GSN_CONTINUEB, &Lgman::execCONTINUEB);
+  addRecSignal(GSN_NODE_FAILREP, &Lgman::execNODE_FAILREP);
 
   addRecSignal(GSN_CREATE_FILE_REQ, &Lgman::execCREATE_FILE_REQ);
   addRecSignal(GSN_CREATE_FILEGROUP_REQ, &Lgman::execCREATE_FILEGROUP_REQ);
@@ -258,6 +260,26 @@ Lgman::execCONTINUEB(Signal* signal){
 }
 
 void
+Lgman::execNODE_FAILREP(Signal* signal)
+{
+  jamEntry();
+  const NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+  NdbNodeBitmask failed; 
+  failed.assign(NdbNodeBitmask::Size, rep->theNodes);
+
+  /* Block level cleanup */
+  for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+    jam();
+    if(failed.get(i)) {
+      jam();
+      Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback
+      ndbassert(elementsCleaned == 0); // No distributed fragmented signals
+      (void) elementsCleaned; // Remove compiler warning
+    }//if
+  }//for
+}
+
+void
 Lgman::execDUMP_STATE_ORD(Signal* signal){
   jamEntry();
   if(signal->theData[0] == 12001)

=== modified file 'storage/ndb/src/kernel/blocks/lgman.hpp'
--- a/storage/ndb/src/kernel/blocks/lgman.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/lgman.hpp	2009-10-08 10:19:19 +0000
@@ -47,6 +47,7 @@ protected:
   void execREAD_CONFIG_REQ(Signal* signal);
   void execDUMP_STATE_ORD(Signal* signal);
   void execCONTINUEB(Signal* signal);
+  void execNODE_FAILREP(Signal* signal);
   
   void execCREATE_FILE_REQ(Signal* signal);
   void execCREATE_FILEGROUP_REQ(Signal* signal);

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2009-08-03 11:28:27 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2009-10-08 10:19:19 +0000
@@ -1705,6 +1705,18 @@ void Ndbcntr::execNODE_FAILREP(Signal* s
   sendSignal(QMGR_REF, GSN_NODE_FAILREP, signal,
 	     NodeFailRep::SignalLength, JBB);
 
+  sendSignal(DBUTIL_REF, GSN_NODE_FAILREP, signal,
+             NodeFailRep::SignalLength, JBB);
+
+  sendSignal(DBTUP_REF, GSN_NODE_FAILREP, signal,
+             NodeFailRep::SignalLength, JBB);
+
+  sendSignal(TSMAN_REF, GSN_NODE_FAILREP, signal,
+             NodeFailRep::SignalLength, JBB);
+
+  sendSignal(LGMAN_REF, GSN_NODE_FAILREP, signal,
+             NodeFailRep::SignalLength, JBB);
+
   if (c_stopRec.stopReq.senderRef)
   {
     jam();

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2009-09-01 10:50:11 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2009-10-06 13:29:45 +0000
@@ -3501,9 +3501,9 @@ void Qmgr::handleApiCloseComConf(Signal*
   CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
 
   /* Api failure special case */
-  for(Uint32 nodeId = 0; nodeId < MAX_NDB_NODES; nodeId ++)
+  for(Uint32 nodeId = 0; nodeId < MAX_NODES; nodeId ++)
   {
-    if(NdbNodeBitmask::get(closeCom->theNodes, nodeId))
+    if (NodeBitmask::get(closeCom->theNodes, nodeId))
     {
       jam();
       /* Check that *only* 1 *API* node is included in
@@ -3511,8 +3511,8 @@ void Qmgr::handleApiCloseComConf(Signal*
        */
       ndbrequire(getNodeInfo(nodeId).getType() != NodeInfo::DB);
       ndbrequire(closeCom->noOfNodes == 1);
-      NdbNodeBitmask::clear(closeCom->theNodes, nodeId);
-      ndbrequire(NdbNodeBitmask::isclear(closeCom->theNodes));
+      NodeBitmask::clear(closeCom->theNodes, nodeId);
+      ndbrequire(NodeBitmask::isclear(closeCom->theNodes));
       
       /* Now that we know communication from the failed Api has
        * ceased, we can send the required API_FAILREQ signals

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2009-10-08 10:19:19 +0000
@@ -717,20 +717,24 @@ void Suma::execAPI_FAILREQ(Signal* signa
   jamEntry();
   DBUG_ENTER("Suma::execAPI_FAILREQ");
   Uint32 failedApiNode = signal->theData[0];
-  BlockReference retRef = signal->theData[1];
+  ndbrequire(signal->theData[1] == QMGR_REF); // As callback hard-codes QMGR
 
   c_connected_nodes.clear(failedApiNode);
 
   if (c_failedApiNodes.get(failedApiNode))
   {
     jam();
+    /* Being handled already, just conf */
     goto CONF;
   }
 
   if (!c_subscriber_nodes.get(failedApiNode))
   {
     jam();
-    goto CONF;
+    /* No Subscribers on that node, no SUMA 
+     * specific work to do
+     */
+    goto BLOCK_CLEANUP;
   }
 
   c_failedApiNodes.set(failedApiNode);
@@ -744,15 +748,52 @@ void Suma::execAPI_FAILREQ(Signal* signa
   sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 2, JBB);
   return;
 
+BLOCK_CLEANUP:
+  jam();
+  api_fail_block_cleanup(signal, failedApiNode);
+  DBUG_VOID_RETURN;
+
 CONF:
+  jam();
   signal->theData[0] = failedApiNode;
   signal->theData[1] = reference();
-  sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB);
+  sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
 
   DBUG_VOID_RETURN;
 }//execAPI_FAILREQ()
 
 void
+Suma::api_fail_block_cleanup_callback(Signal* signal,
+                                      Uint32 failedNodeId,
+                                      Uint32 elementsCleaned)
+{
+  jamEntry();
+
+  /* Suma should not have any block level elements
+   * to be cleaned (Fragmented send/receive structures etc.)
+   * As it only uses Fragmented send/receive locally
+   */
+  ndbassert(elementsCleaned == 0);
+
+  /* Node failure handling is complete */
+  signal->theData[0] = failedNodeId;
+  signal->theData[1] = reference();
+  sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
+  c_failedApiNodes.clear(failedNodeId);
+}
+
+void
+Suma::api_fail_block_cleanup(Signal* signal, Uint32 failedNode)
+{
+  jam();
+
+  Callback cb = {safe_cast(&Suma::api_fail_block_cleanup_callback),
+                 failedNode};
+  
+  simBlockNodeFailure(signal, failedNode, cb);
+}
+
+void
 Suma::api_fail_gci_list(Signal* signal, Uint32 nodeId)
 {
   jam();
@@ -869,10 +910,7 @@ Suma::api_fail_subscriber_list(Signal* s
   if (iter.curr.isNull())
   {
     jam();
-    signal->theData[0] = nodeId;
-    signal->theData[1] = reference();
-    sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
-    c_failedApiNodes.clear(nodeId);
+    api_fail_block_cleanup(signal, nodeId);
     return;
   }
 
@@ -981,10 +1019,9 @@ Suma::api_fail_subscription(Signal* sign
   }
 
   c_subOpPool.release(subOpPtr);
-  signal->theData[0] = nodeId;
-  signal->theData[1] = reference();
-  sendSignal(QMGR_REF, GSN_API_FAILCONF, signal, 2, JBB);
-  c_failedApiNodes.clear(nodeId);
+
+  /* Now do block level cleanup */
+  api_fail_block_cleanup(signal, nodeId);
 }
 
 void
@@ -1059,6 +1096,17 @@ Suma::execNODE_FAILREP(Signal* signal){
       }
     }
   }
+
+  /* Block level cleanup */
+  for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+    jam();
+    if(failed.get(i)) {
+      jam();
+      Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback
+      ndbassert(elementsCleaned == 0); // As Suma has no remote fragmented signals
+      (void) elementsCleaned; // Avoid compiler error
+    }//if
+  }//for
   
   c_alive_nodes.assign(tmp);
   

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.hpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp	2009-10-09 09:09:48 +0000
@@ -135,6 +135,7 @@ public:
   };
 
   struct Subscriber {
+    Subscriber() {}
     Uint32 m_senderRef;
     Uint32 m_senderData;
     Uint32 nextList;
@@ -197,6 +198,8 @@ public:
 
   struct SubOpRecord
   {
+    SubOpRecord() {}
+
     enum OpType
     {
       R_SUB_START_REQ,
@@ -434,6 +437,10 @@ public:
   void api_fail_gci_list(Signal*, Uint32 node);
   void api_fail_subscriber_list(Signal*, Uint32 node);
   void api_fail_subscription(Signal*);
+  void api_fail_block_cleanup(Signal* signal, Uint32 failedNode);
+  void api_fail_block_cleanup_callback(Signal* signal,
+                                       Uint32 failedNodeId,
+                                       Uint32 elementsCleaned);
 
   void execSUB_GCP_COMPLETE_ACK(Signal* signal);
 

=== modified file 'storage/ndb/src/kernel/blocks/trix/Trix.hpp'
--- a/storage/ndb/src/kernel/blocks/trix/Trix.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.hpp	2009-10-08 09:55:36 +0000
@@ -71,6 +71,7 @@ private:
 
   // Node data needed when communicating with remote TRIX:es
   struct NodeRecord {
+    NodeRecord() {}
     bool alive;
     BlockReference trixRef;
     union {

=== modified file 'storage/ndb/src/kernel/blocks/tsman.cpp'
--- a/storage/ndb/src/kernel/blocks/tsman.cpp	2009-08-21 09:26:34 +0000
+++ b/storage/ndb/src/kernel/blocks/tsman.cpp	2009-10-08 10:19:19 +0000
@@ -30,6 +30,7 @@
 #include <signaldata/DumpStateOrd.hpp>
 #include <signaldata/TsmanContinueB.hpp>
 #include <signaldata/GetTabInfo.hpp>
+#include <signaldata/NodeFailRep.hpp>
 #include <dbtup/Dbtup.hpp>
 
 #define JONAS 0
@@ -60,6 +61,7 @@ Tsman::Tsman(Block_context& ctx,
   addRecSignal(GSN_READ_CONFIG_REQ, &Tsman::execREAD_CONFIG_REQ);
   addRecSignal(GSN_DUMP_STATE_ORD, &Tsman::execDUMP_STATE_ORD);
   addRecSignal(GSN_CONTINUEB, &Tsman::execCONTINUEB);
+  addRecSignal(GSN_NODE_FAILREP, &Tsman::execNODE_FAILREP);
 
   addRecSignal(GSN_CREATE_FILE_REQ, &Tsman::execCREATE_FILE_REQ);
   addRecSignal(GSN_CREATE_FILEGROUP_REQ, &Tsman::execCREATE_FILEGROUP_REQ);
@@ -176,6 +178,26 @@ Tsman::execCONTINUEB(Signal* signal){
   ndbrequire(false);
 }
 
+void
+Tsman::execNODE_FAILREP(Signal* signal)
+{
+  jamEntry();
+  const NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+  NdbNodeBitmask failed; 
+  failed.assign(NdbNodeBitmask::Size, rep->theNodes);
+
+  /* Block level cleanup */
+  for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+    jam();
+    if(failed.get(i)) {
+      jam();
+      Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback
+      ndbassert(elementsCleaned == 0); // No distributed fragmented signals
+      (void) elementsCleaned; // Remove compiler warning
+    }//if
+  }//for
+}
+
 #ifdef VM_TRACE
 struct TsmanChunk
 { 

=== modified file 'storage/ndb/src/kernel/blocks/tsman.hpp'
--- a/storage/ndb/src/kernel/blocks/tsman.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/blocks/tsman.hpp	2009-10-08 10:19:19 +0000
@@ -43,6 +43,7 @@ protected:
   void execREAD_CONFIG_REQ(Signal* signal);
   void execDUMP_STATE_ORD(Signal* signal);
   void execCONTINUEB(Signal* signal);
+  void execNODE_FAILREP(Signal* signal);
 
   void execCREATE_FILE_REQ(Signal* signal);
   void execCREATE_FILEGROUP_REQ(Signal* signal);

=== modified file 'storage/ndb/src/kernel/vm/DLFifoList.hpp'
--- a/storage/ndb/src/kernel/vm/DLFifoList.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/vm/DLFifoList.hpp	2009-10-08 09:55:36 +0000
@@ -177,11 +177,16 @@ template <typename P, typename T, typena
 inline
 DLFifoListImpl<P,T,U>::Head::Head()
 {
+  // Require user defined constructor on T since we fiddle
+  // with T's members
+  ASSERT_TYPE_HAS_CONSTRUCTOR(T);
+
   firstItem = RNIL;
   lastItem = RNIL;
 #ifdef VM_TRACE
   in_use = false;
 #endif
+
 }
 
 template <typename P, typename T, typename U>

=== modified file 'storage/ndb/src/kernel/vm/DLHashTable.hpp'
--- a/storage/ndb/src/kernel/vm/DLHashTable.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/vm/DLHashTable.hpp	2009-10-08 09:55:36 +0000
@@ -158,6 +158,10 @@ inline
 DLHashTableImpl<P, T, U>::DLHashTableImpl(P & _pool)
   : thePool(_pool)
 {
+  // Require user defined constructor on T since we fiddle
+  // with T's members
+  ASSERT_TYPE_HAS_CONSTRUCTOR(T);
+
   mask = 0;
   hashValues = 0;
 }

=== modified file 'storage/ndb/src/kernel/vm/DLList.hpp'
--- a/storage/ndb/src/kernel/vm/DLList.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/vm/DLList.hpp	2009-10-08 09:55:36 +0000
@@ -183,6 +183,9 @@ inline
 DLListImpl<P,T,U>::DLListImpl(P & _pool)
   : thePool(_pool)
 {
+  // Require user defined constructor on T since we fiddle
+  // with T's members
+  ASSERT_TYPE_HAS_CONSTRUCTOR(T);
 }
 
 template <typename P, typename T, typename U>

=== modified file 'storage/ndb/src/kernel/vm/DataBuffer.hpp'
--- a/storage/ndb/src/kernel/vm/DataBuffer.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/vm/DataBuffer.hpp	2009-10-07 14:08:22 +0000
@@ -410,21 +410,89 @@ template<Uint32 sz>
 inline
 bool
 DataBuffer<sz>::first(DataBufferIterator & it){
-  return first((ConstDataBufferIterator&)it);
+  it.curr.i = head.firstItem;
+  if(it.curr.i == RNIL){
+    it.setNull();
+    return false;
+  }
+  thePool.getPtr(it.curr);
+  it.data = &it.curr.p->data[0];
+  it.ind = 0;
+  it.pos = 0;
+  return true;
 }
 
 template<Uint32 sz>
 inline
 bool
 DataBuffer<sz>::next(DataBufferIterator & it){
-  return next((ConstDataBufferIterator&)it);
+  it.ind ++;
+  it.data ++;
+  it.pos ++;
+  if(it.ind < sz && it.pos < head.used){
+    return true;
+  }
+
+  if(it.pos < head.used){
+    it.curr.i = it.curr.p->nextPool;
+#ifdef ARRAY_GUARD
+    if(it.curr.i == RNIL){
+      /**
+       * This is actually "internal error"
+       * pos can't be less than head.used and at the same time we can't
+       * find next segment
+       *
+       * Note this must not "really" be checked since thePool.getPtr will
+       *  abort when trying to get RNIL. That's why the check is within
+       *  ARRAY_GUARD
+       */
+      ErrorReporter::handleAssert("DataBuffer<sz>::next", __FILE__, __LINE__);
+    }
+#endif
+    thePool.getPtr(it.curr);
+    it.data = &it.curr.p->data[0];
+    it.ind = 0;
+    return true;
+  }
+  it.setNull();
+  return false;
 }
 
 template<Uint32 sz>
 inline
 bool
 DataBuffer<sz>::next(DataBufferIterator & it, Uint32 hops){
-  return next((ConstDataBufferIterator&)it, hops);
+#if 0
+  for (Uint32 i=0; i<hops; i++) {
+    if (!this->next(it))
+      return false;
+  }
+  return true;
+#else
+  if(it.pos + hops < head.used){
+    while(hops >= sz){
+      it.curr.i = it.curr.p->nextPool;
+      thePool.getPtr(it.curr);
+      hops -= sz;
+      it.pos += sz;
+    }
+
+    it.ind += hops;
+    it.pos += hops;
+    if(it.ind < sz){
+      it.data = &it.curr.p->data[it.ind];
+      return true;
+    }
+
+    it.curr.i = it.curr.p->nextPool;
+    thePool.getPtr(it.curr);
+    it.ind -= sz;
+    it.data = &it.curr.p->data[it.ind];
+    return true;
+  }
+  it.setNull();
+  return false;
+#endif
 }
 
 template<Uint32 sz>

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2009-10-09 09:13:43 +0000
@@ -906,53 +906,87 @@ SimulatedBlock::execSIGNAL_DROPPED_REP(S
 void
 SimulatedBlock::execCONTINUE_FRAGMENTED(Signal * signal){
   ljamEntry();
-  
-  Ptr<FragmentSendInfo> fragPtr;
-  
-  c_segmentedFragmentSendList.first(fragPtr);  
-  for(; !fragPtr.isNull();){
+
+  ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
+  ndbrequire(signal->getSendersBlockRef() == reference()); /* Paranoia */
+
+  switch (sig->type)
+  {
+  case ContinueFragmented::CONTINUE_SENDING :
+  {
     ljam();
-    Ptr<FragmentSendInfo> copyPtr = fragPtr;
-    c_segmentedFragmentSendList.next(fragPtr);
+    Ptr<FragmentSendInfo> fragPtr;
     
-    sendNextSegmentedFragment(signal, * copyPtr.p);
-    if(copyPtr.p->m_status == FragmentSendInfo::SendComplete){
+    c_segmentedFragmentSendList.first(fragPtr);  
+    for(; !fragPtr.isNull();){
       ljam();
-      if(copyPtr.p->m_callback.m_callbackFunction != 0) {
+      Ptr<FragmentSendInfo> copyPtr = fragPtr;
+      c_segmentedFragmentSendList.next(fragPtr);
+      
+      sendNextSegmentedFragment(signal, * copyPtr.p);
+      if(copyPtr.p->m_status == FragmentSendInfo::SendComplete){
         ljam();
-	execute(signal, copyPtr.p->m_callback, 0);
-      }//if
-      c_segmentedFragmentSendList.release(copyPtr);
+        if(copyPtr.p->m_callback.m_callbackFunction != 0) {
+          ljam();
+          execute(signal, copyPtr.p->m_callback, 0);
+        }//if
+        c_segmentedFragmentSendList.release(copyPtr);
+      }
     }
-  }
-  
-  c_linearFragmentSendList.first(fragPtr);  
-  for(; !fragPtr.isNull();){
-    ljam(); 
-    Ptr<FragmentSendInfo> copyPtr = fragPtr;
-    c_linearFragmentSendList.next(fragPtr);
     
-    sendNextLinearFragment(signal, * copyPtr.p);
-    if(copyPtr.p->m_status == FragmentSendInfo::SendComplete){
-      ljam();
-      if(copyPtr.p->m_callback.m_callbackFunction != 0) {
+    c_linearFragmentSendList.first(fragPtr);  
+    for(; !fragPtr.isNull();){
+      ljam(); 
+      Ptr<FragmentSendInfo> copyPtr = fragPtr;
+      c_linearFragmentSendList.next(fragPtr);
+      
+      sendNextLinearFragment(signal, * copyPtr.p);
+      if(copyPtr.p->m_status == FragmentSendInfo::SendComplete){
         ljam();
-	execute(signal, copyPtr.p->m_callback, 0);
-      }//if
-      c_linearFragmentSendList.release(copyPtr);
+        if(copyPtr.p->m_callback.m_callbackFunction != 0) {
+          ljam();
+          execute(signal, copyPtr.p->m_callback, 0);
+        }//if
+        c_linearFragmentSendList.release(copyPtr);
+      }
     }
+    
+    if(c_segmentedFragmentSendList.isEmpty() && 
+       c_linearFragmentSendList.isEmpty()){
+      ljam();
+      c_fragSenderRunning = false;
+      return;
+    }
+    
+    sig->type = ContinueFragmented::CONTINUE_SENDING;
+    sig->line = __LINE__;
+    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 2, JBB);
+    break;
   }
-  
-  if(c_segmentedFragmentSendList.isEmpty() && 
-     c_linearFragmentSendList.isEmpty()){
+  case ContinueFragmented::CONTINUE_CLEANUP:
+  {
     ljam();
-    c_fragSenderRunning = false;
-    return;
+    
+    const Uint32 callbackWords = (sizeof(Callback) + 3) >> 2;
+    /* Check length of signal */
+    ndbassert(signal->getLength() ==
+              ContinueFragmented::CONTINUE_CLEANUP_FIXED_WORDS + 
+              callbackWords);
+    
+    Callback cb;
+    memcpy(&cb, &sig->cleanup.callbackStart, callbackWords << 2);
+
+    doNodeFailureCleanup(signal,
+                         sig->cleanup.failedNodeId,
+                         sig->cleanup.resource,
+                         sig->cleanup.cursor,
+                         sig->cleanup.elementsCleaned,
+                         cb);
+    break;
+  }
+  default:
+    ndbrequire(false);
   }
-  
-  ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
-  sig->line = __LINE__;
-  sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
 }
 
 void
@@ -1117,6 +1151,287 @@ SimulatedBlock::assembleFragments(Signal
   return false;
 }
 
+/**
+ * doCleanupFragInfo
+ * Iterate over block's Fragment assembly hash, looking
+ * for in-assembly fragments from the failed node
+ * Release these
+ * Returns after each scanned bucket to avoid consuming
+ * too much time.
+ *
+ * Parameters
+ *   failedNodeId    : Node id of failed node
+ *   cursor          : Hash bucket to start iteration from
+ *   rtUnitsUsed     : Total rt units used
+ *   elementsCleaned : Number of elements cleaned
+ *
+ * Updates
+ *   cursor          : Hash bucket to continue iteration from
+ *   rtUnitsUsed     : += units used
+ *   elementsCleaned : += elements cleaned
+ * 
+ * Returns
+ *   true  if all FragInfo structs cleaned up
+ *   false if more to do 
+ */
+bool
+SimulatedBlock::doCleanupFragInfo(Uint32 failedNodeId,
+                                  Uint32& cursor,
+                                  Uint32& rtUnitsUsed,
+                                  Uint32& elementsCleaned)
+{
+  ljam();
+  DLHashTable<FragmentInfo>::Iterator iter;
+  
+  c_fragmentInfoHash.next(cursor, iter);
+
+  const Uint32 startBucket = iter.bucket;
+
+  while (!iter.isNull() &&
+         (iter.bucket == startBucket))
+  {
+    ljam();
+
+    Ptr<FragmentInfo> curr = iter.curr;
+    c_fragmentInfoHash.next(iter);
+
+    FragmentInfo* fragInfo = curr.p;
+    
+    if (refToNode(fragInfo->m_senderRef) == failedNodeId)
+    {
+      ljam();
+      /* We were assembling a fragmented signal from the
+       * failed node, discard the partially assembled
+       * sections and free the FragmentInfo hash entry
+       */
+      for(Uint32 s = 0; s<3; s++)
+      {
+        if (fragInfo->m_sectionPtrI[s] != RNIL)
+        {
+          ljam();
+          SegmentedSectionPtr ssptr;
+          getSection(ssptr, fragInfo->m_sectionPtrI[s]);
+          release(ssptr);
+        }
+      }
+      
+      /* Release FragmentInfo hash element */
+      c_fragmentInfoHash.release(curr);
+
+      elementsCleaned++;
+      rtUnitsUsed+=3;
+    }
+      
+    rtUnitsUsed++;
+  } // while
+   
+  cursor = iter.bucket;
+  return iter.isNull();
+}
+
+bool
+SimulatedBlock::doCleanupFragSend(Uint32 failedNodeId,
+                                  Uint32& cursor,
+                                  Uint32& rtUnitsUsed,
+                                  Uint32& elementsCleaned)
+{
+  ljam();
+  
+  Ptr<FragmentSendInfo> fragPtr;
+  const Uint32 NumSendLists = 2;
+  ndbrequire(cursor < NumSendLists);
+
+  DLList<FragmentSendInfo>* fragSendLists[ NumSendLists ] =
+    { &c_segmentedFragmentSendList,
+      &c_linearFragmentSendList };
+  
+  DLList<FragmentSendInfo>* list = fragSendLists[ cursor ];
+  
+  list->first(fragPtr);  
+  for(; !fragPtr.isNull();){
+    ljam();
+    Ptr<FragmentSendInfo> copyPtr = fragPtr;
+    list->next(fragPtr);
+    rtUnitsUsed++;
+
+    NodeReceiverGroup& rg = copyPtr.p->m_nodeReceiverGroup;
+    
+    if (rg.m_nodes.get(failedNodeId))
+    {
+      ljam();
+      /* Fragmented signal is being sent to node */
+      rg.m_nodes.clear(failedNodeId);
+      
+      if (rg.m_nodes.isclear())
+      {
+        ljam();
+        /* No other nodes in receiver group - send
+         * is cancelled
+         * Will be cleaned up in the usual CONTINUE_FRAGMENTED
+         * handling code.
+         */
+        copyPtr.p->m_status = FragmentSendInfo::SendCancelled;
+      }
+      elementsCleaned++;
+    }
+  }
+
+  /* Next time we'll do the next list */
+  cursor++;
+  
+  return (cursor == NumSendLists);
+}
+
+
+Uint32
+SimulatedBlock::doNodeFailureCleanup(Signal* signal,
+                                     Uint32 failedNodeId,
+                                     Uint32 resource,
+                                     Uint32 cursor,
+                                     Uint32 elementsCleaned,
+                                     Callback& cb)
+{
+  ljam();
+  const bool userCallback = (cb.m_callbackFunction != 0);
+  const Uint32 maxRtUnits = userCallback ?
+#ifdef VM_TRACE
+    2 :
+#else
+    16 :
+#endif 
+    ~0; /* Must complete all processing in this call */
+  
+  Uint32 rtUnitsUsed = 0;
+
+  /* Loop over resources, cleaning them up */
+  do
+  {
+    bool resourceDone= false;
+    switch(resource) {
+    case ContinueFragmented::RES_FRAGSEND:
+    {
+      ljam();
+      resourceDone = doCleanupFragSend(failedNodeId, cursor,
+                                       rtUnitsUsed, elementsCleaned);
+      break;
+    }
+    case ContinueFragmented::RES_FRAGINFO:
+    {
+      ljam();
+      resourceDone = doCleanupFragInfo(failedNodeId, cursor, 
+                                       rtUnitsUsed, elementsCleaned);
+      break;
+    }
+    case ContinueFragmented::RES_LAST:
+    {
+      ljam();
+      /* Node failure processing complete, execute user callback if provided */
+      if (userCallback)
+        execute(signal, cb, elementsCleaned);
+      
+      return elementsCleaned;
+    }
+    default:
+      ndbrequire(false);
+    }
+
+    /* Did we complete cleaning up this resource? */
+    if (resourceDone)
+    {
+      resource++;
+      cursor= 0;
+    }
+
+  } while (rtUnitsUsed <= maxRtUnits);
+  
+  ljam();
+
+  /* Not yet completed failure handling.
+   * Must have exhausted RT units.  
+   * Update cursor and re-invoke
+   */
+  ndbassert(userCallback);
+  
+  /* Send signal to continue processing */
+  
+  ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
+  sig->type = ContinueFragmented::CONTINUE_CLEANUP;
+  sig->cleanup.failedNodeId = failedNodeId;
+  sig->cleanup.resource = resource;
+  sig->cleanup.cursor = cursor;
+  sig->cleanup.elementsCleaned= elementsCleaned;
+  Uint32 callbackWords = (sizeof(Callback) + 3) >> 2;
+  Uint32 sigLen = ContinueFragmented::CONTINUE_CLEANUP_FIXED_WORDS + 
+    callbackWords;
+  ndbassert(sigLen <= 25); // Should be STATIC_ASSERT
+  memcpy(&sig->cleanup.callbackStart, &cb, callbackWords << 2);
+  
+  sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, sigLen, JBB);
+
+  return elementsCleaned;
+}
+  
+Uint32
+SimulatedBlock::simBlockNodeFailure(Signal* signal,
+                                    Uint32 failedNodeId, 
+                                    Callback& cb)
+{
+  ljam();
+  return doNodeFailureCleanup(signal, failedNodeId, 0, 0, 0, cb);
+}
+
+Uint32
+SimulatedBlock::debugPrintFragmentCounts()
+{
+  const char* blockName = getBlockName(theNumber);
+  DLHashTable<FragmentInfo>::Iterator iter;
+  Uint32 fragmentInfoCount = 0;
+  c_fragmentInfoHash.first(iter);
+  
+  while(!iter.isNull())
+  {
+    fragmentInfoCount++;
+    c_fragmentInfoHash.next(iter);
+  }
+  
+  Ptr<FragmentSendInfo> ptr;
+  Uint32 linSendInfoCount = 0;
+
+  c_linearFragmentSendList.first(ptr);
+  
+  while (!ptr.isNull())
+  {
+    linSendInfoCount++;
+    c_linearFragmentSendList.next(ptr);
+  }
+  
+  Uint32 segSendInfoCount = 0;
+  c_segmentedFragmentSendList.first(ptr);
+  
+  while (!ptr.isNull())
+  {
+    segSendInfoCount++;
+    c_segmentedFragmentSendList.next(ptr);
+  }
+
+  ndbout_c("%s : Fragment assembly hash entry count : %d", 
+           blockName,
+           fragmentInfoCount);
+
+  ndbout_c("%s : Linear fragment send list size : %d", 
+           blockName,
+           linSendInfoCount);
+
+  ndbout_c("%s : Segmented fragment send list size : %d", 
+           blockName,
+           segSendInfoCount);
+
+  return fragmentInfoCount + 
+    linSendInfoCount +
+    segSendInfoCount;
+}
+
+
 bool
 SimulatedBlock::sendFirstFragment(FragmentSendInfo & info,
 				  NodeReceiverGroup rg, 
@@ -1207,6 +1522,37 @@ void
 SimulatedBlock::sendNextSegmentedFragment(Signal* signal,
 					  FragmentSendInfo & info){
   
+  if (unlikely(info.m_status == FragmentSendInfo::SendCancelled))
+  {
+    /* Send was cancelled - all dest. nodes have failed
+     * since send was started
+     *
+     * Free any sections still to be sent
+     */
+    Uint32 secCount = 0;
+    SegmentedSectionPtr ssptr[3];
+    for (Uint32 s = 0; s < 3; s++)
+    {
+      Uint32 sectionI = info.m_sectionPtr[s].m_segmented.i;
+      if (sectionI != RNIL)
+      {
+        getSection(ssptr[secCount], sectionI);
+        info.m_sectionPtr[s].m_segmented.i = RNIL;
+        info.m_sectionPtr[s].m_segmented.p = NULL;
+        secCount++;
+      }
+    }
+    
+    ::releaseSections(secCount, ssptr);
+    
+    /* Free inline signal data storage section */
+    Uint32 inlineDataI = info.m_theDataSection.p[info.m_theDataSection.sz];
+    g_sectionSegmentPool.release(inlineDataI);
+    
+    info.m_status = FragmentSendInfo::SendComplete;
+    return;
+  }
+
   /**
    * Store "theData"
    */
@@ -1446,6 +1792,19 @@ void
 SimulatedBlock::sendNextLinearFragment(Signal* signal,
 				       FragmentSendInfo & info){
   
+  if (unlikely(info.m_status == FragmentSendInfo::SendCancelled))
+  {
+    /* Send was cancelled - all dest. nodes have failed
+     * since send was started
+     */
+    /* Free inline signal data storage section */
+    Uint32 inlineDataI = info.m_theDataSection.p[info.m_theDataSection.sz];
+    g_sectionSegmentPool.release(inlineDataI);
+    
+    info.m_status = FragmentSendInfo::SendComplete;
+    return;
+  }
+
   /**
    * Store "theData"
    */
@@ -1617,8 +1976,9 @@ SimulatedBlock::sendFragmentedSignal(Blo
   if(!c_fragSenderRunning){
     c_fragSenderRunning = true;
     ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
+    sig->type = ContinueFragmented::CONTINUE_SENDING;
     sig->line = __LINE__;
-    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
+    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 2, JBB);
   }
 }
 
@@ -1655,8 +2015,9 @@ SimulatedBlock::sendFragmentedSignal(Nod
   if(!c_fragSenderRunning){
     c_fragSenderRunning = true;
     ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
+    sig->type = ContinueFragmented::CONTINUE_SENDING;
     sig->line = __LINE__;
-    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
+    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 2, JBB);
   }
 }
 
@@ -1704,8 +2065,9 @@ SimulatedBlock::sendFragmentedSignal(Blo
   if(!c_fragSenderRunning){
     c_fragSenderRunning = true;
     ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
+    sig->type = ContinueFragmented::CONTINUE_SENDING;
     sig->line = __LINE__;
-    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
+    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 2, JBB);
   }
 }
 
@@ -1746,8 +2108,9 @@ SimulatedBlock::sendFragmentedSignal(Nod
   if(!c_fragSenderRunning){
     c_fragSenderRunning = true;
     ContinueFragmented * sig = (ContinueFragmented*)signal->getDataPtrSend();
+    sig->type = ContinueFragmented::CONTINUE_SENDING;
     sig->line = __LINE__;
-    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
+    sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 2, JBB);
   }
 }
 

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2009-09-25 11:07:16 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2009-10-09 09:09:48 +0000
@@ -237,6 +237,37 @@ protected:
 			    Callback &,
 			    Uint32 messageSize = 240);
 
+  /**
+   * simBlockNodeFailure
+   *
+   * Method must be called by blocks that send or receive 
+   * remote Fragmented Signals when they detect a node 
+   * (NDBD or API) failure.
+   * If the block needs to acknowledge or perform further
+   * processing after completing block-level node failure 
+   * handling, it can supply a Callback which will be invoked 
+   * when block-level node failure handling has completed.
+   * Otherwise TheEmptyCallback is used.
+   * If TheEmptyCallback is used, all failure handling is
+   * performed in the current timeslice, to avoid any
+   * races.
+   * 
+   * Parameters
+   *   signal       : Current signal*
+   *   failedNodeId : Node id of failed node
+   *   cb           : Callback to be executed when block-level
+   *                  node failure handling completed.
+   *                  TheEmptyCallback is passed if no further
+   *                  processing is required.
+   * Returns
+   *   Number of 'resources' cleaned up in call.
+   *   Callback return code is total resources cleaned up.
+   *   
+   */
+  Uint32 simBlockNodeFailure(Signal* signal,
+                             Uint32 failedNodeId,
+                             Callback& cb = TheEmptyCallback);
+
   /**********************************************************
    * Fragmented signals structures
    */
@@ -273,7 +304,8 @@ protected:
     
     enum Status {
       SendNotComplete = 0,
-      SendComplete    = 1
+      SendComplete    = 1,
+      SendCancelled   = 2
     };
     Uint8  m_status;
     Uint8  m_prio;
@@ -355,6 +387,23 @@ private:
   const NodeId         theNodeId;
   const BlockNumber    theNumber;
   const BlockReference theReference;
+
+  Uint32 doNodeFailureCleanup(Signal* signal,
+                              Uint32 failedNodeId,
+                              Uint32 resource,
+                              Uint32 cursor,
+                              Uint32 elementsCleaned,
+                              Callback& cb);
+
+  bool doCleanupFragInfo(Uint32 failedNodeId,
+                         Uint32& cursor,
+                         Uint32& rtUnitsUsed,
+                         Uint32& elementsCleaned);
+
+  bool doCleanupFragSend(Uint32 failedNodeId,
+                         Uint32& cursor,
+                         Uint32& rtUnitsUsed,
+                         Uint32& elementsCleaned);
   
 protected:
   Block_context m_ctx;
@@ -467,6 +516,9 @@ private:
   ArrayPool<FragmentSendInfo> c_fragmentSendPool;
   DLList<FragmentSendInfo> c_linearFragmentSendList;
   DLList<FragmentSendInfo> c_segmentedFragmentSendList;
+
+protected:
+  Uint32 debugPrintFragmentCounts();
   
 public: 
   class MutexManager {
@@ -484,6 +536,7 @@ public: 
      * core interface
      */
     struct ActiveMutex {
+      ActiveMutex() {}
       Uint32 m_gsn; // state
       Uint32 m_mutexId;
       Uint32 m_mutexKey;

=== modified file 'storage/ndb/src/mgmapi/LocalConfig.cpp'
--- a/storage/ndb/src/mgmapi/LocalConfig.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/mgmapi/LocalConfig.cpp	2009-09-24 16:17:14 +0000
@@ -22,6 +22,9 @@
 #include <NdbAutoPtr.hpp>
 #include <NdbMem.h>
 
+#define _STR_VALUE(x) #x
+#define STR_VALUE(x) _STR_VALUE(x)
+
 LocalConfig::LocalConfig(){
   error_line = 0; error_msg[0] = 0;
   _ownNodeId= 0;
@@ -95,11 +98,10 @@ LocalConfig::init(const char *connectStr
       DBUG_RETURN(false);
   }
 
-  //7. Check
+  //7. Use default connect string
   {
-    char buf2[256];
-    BaseString::snprintf(buf2, sizeof(buf2), "host=localhost:%s", NDB_PORT);
-    if(readConnectString(buf2, "default connect string"))
+    if(readConnectString("host=localhost:" STR_VALUE(NDB_PORT),
+                         "default connect string"))
       DBUG_RETURN(true);
   }
 
@@ -189,7 +191,7 @@ LocalConfig::parseHostName(const char * 
     if (buf == tempString2)
       break;
     // try to add default port to see if it works
-    snprintf(tempString2, sizeof(tempString2),"%s:%s", buf, NDB_PORT);
+    snprintf(tempString2, sizeof(tempString2),"%s:%d", buf, NDB_PORT);
     buf= tempString2;
   } while(1);
   return false;

=== modified file 'storage/ndb/src/mgmapi/Makefile.am'
--- a/storage/ndb/src/mgmapi/Makefile.am	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/mgmapi/Makefile.am	2009-09-24 16:17:14 +0000
@@ -24,7 +24,7 @@ libmgmapi_la_SOURCES = mgmapi.cpp ndb_lo
 INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi
 
 DEFS_LOC = -DNDB_MGMAPI -DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\"" \
-           -DNO_DEBUG_MESSAGES -DNDB_PORT="\"@ndb_port@\""
+           -DNO_DEBUG_MESSAGES
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_util.mk.am

=== modified file 'storage/ndb/src/mgmsrv/ConfigInfo.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2009-09-09 09:01:45 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2009-09-24 16:17:14 +0000
@@ -1798,7 +1798,7 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_USED,
     false,
     ConfigInfo::CI_INT,
-    NDB_PORT,
+    STR_VALUE(NDB_PORT),
     "0",
     STR_VALUE(MAX_PORT_NO) },
 
@@ -3412,7 +3412,7 @@ fixShmUniqueId(InitConfigFileParser::Con
   ctx.m_userProperties.get(ctx.fname, &nodes);
   if (nodes == 1) // first management server
   {
-    Uint32 portno= atoi(NDB_PORT);
+    Uint32 portno= NDB_PORT;
     ctx.m_currentSection->get("PortNumber", &portno);
     ctx.m_userProperties.put("ShmUniqueId", portno);
   }

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2009-10-07 02:21:54 +0000
@@ -2054,7 +2054,9 @@ NdbDictionary::Dictionary::getEvent(cons
 int
 NdbDictionary::Dictionary::listEvents(List& list)
 {
-  return m_impl.listEvents(list);
+  // delegate to overloaded const function for same semantics
+  const NdbDictionary::Dictionary * const cthis = this;
+  return cthis->NdbDictionary::Dictionary::listEvents(list);
 }
 
 int
@@ -2066,7 +2068,9 @@ NdbDictionary::Dictionary::listEvents(Li
 int
 NdbDictionary::Dictionary::listObjects(List& list, Object::Type type)
 {
-  return m_impl.listObjects(list, type);
+  // delegate to overloaded const function for same semantics
+  const NdbDictionary::Dictionary * const cthis = this;
+  return cthis->NdbDictionary::Dictionary::listObjects(list, type);
 }
 
 int
@@ -2078,12 +2082,9 @@ NdbDictionary::Dictionary::listObjects(L
 int
 NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
 {
-  const NdbDictionary::Table* tab= getTable(tableName);
-  if(tab == 0)
-  {
-    return -1;
-  }
-  return m_impl.listIndexes(list, tab->getTableId());
+  // delegate to overloaded const function for same semantics
+  const NdbDictionary::Dictionary * const cthis = this;
+  return cthis->NdbDictionary::Dictionary::listIndexes(list, tableName);
 }
 
 int

=== modified file 'storage/ndb/src/ndbapi/NdbOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperation.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperation.cpp	2009-10-07 02:21:54 +0000
@@ -377,6 +377,9 @@ NdbOperation::setValue(Uint32 anAttrId, 
 NdbBlob*
 NdbOperation::getBlobHandle(const char* anAttrName)
 {
+  // semantics differs from overloaded 'getBlobHandle(const char*) const'
+  // by delegating to the non-const variant of internal getBlobHandle(...),
+  // which may create a new BlobHandle
   const NdbColumnImpl* col = m_currentTable->getColumn(anAttrName);
   if (col == NULL)
   {
@@ -392,6 +395,9 @@ NdbOperation::getBlobHandle(const char* 
 NdbBlob*
 NdbOperation::getBlobHandle(Uint32 anAttrId)
 {
+  // semantics differs from overloaded 'getBlobHandle(Uint32) const'
+  // by delegating to the non-const variant of internal getBlobHandle(...),
+  // which may create a new BlobHandle
   const NdbColumnImpl* col = m_currentTable->getColumn(anAttrId);
   if (col == NULL)
   {

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2009-10-06 10:39:02 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2009-10-12 05:43:10 +0000
@@ -2758,10 +2758,10 @@ runBug44065_org(NDBT_Context* ctx, NDBT_
     ndbout << "Outer Iter : " << outerIter 
            << " " << offset << "-" << (offset + numRecords - 1) << endl;
 
-    CHECK(hugoOps.startTransaction(pNdb) == 0);
-    CHECK(hugoOps.pkInsertRecord(pNdb, offset, numRecords) == 0);
-    CHECK(hugoOps.execute_Commit(pNdb) == 0);
-    CHECK(hugoOps.closeTransaction(pNdb) == 0);
+    {
+      HugoTransactions trans(*pTab);
+      CHECK(trans.loadTableStartFrom(pNdb, offset, numRecords) == 0);
+    }
 
     for (int iter=0; iter < numInnerIterations; iter++)
     {
@@ -2784,10 +2784,12 @@ runBug44065_org(NDBT_Context* ctx, NDBT_
       if ((trans->execute(NdbTransaction::NoCommit,
                           NdbOperation::AO_IgnoreError) != 0))
       {
+        NdbError err = trans->getNdbError();
         ndbout << "Execute failed, error is " 
-               << trans->getNdbError().code << " "
-               << trans->getNdbError().message << endl;
-        CHECK(0);
+               << err.code << " " << endl;
+        CHECK((err.classification == NdbError::TemporaryResourceError ||
+               err.classification == NdbError::OverloadError));
+        NdbSleep_MilliSleep(50);
       }
       
       /* Now abort the transaction by closing it without committing */

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2009-10-06 10:39:02 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2009-10-12 11:47:31 +0000
@@ -549,10 +549,10 @@ max-time: 1000
 cmd: testNodeRestart
 args: -n Bug18414 T1
 
-max-time: 1000
-cmd: testNodeRestart
-args: -n Bug18612 T1
-
+#max-time: 1000
+#cmd: testNodeRestart
+#args: -n Bug18612 T1
+#
 max-time: 1000
 cmd: testNodeRestart
 args: -n Bug18612SR T1

Thread
bzr commit into mysql-5.1-telco-6.2 branch (Martin.Skold:3018) Bug#38502Bug#44607 Bug#45964 Bug#46113 Bug#46662 Bug#47674 Bug#47816 Bug#47935Martin Skold15 Oct