List:Commits« Previous MessageNext Message »
From:Jonas Oreland Date:April 29 2011 9:24am
Subject:bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch (jonas:3486)
View as plain text  
#At file:///home/jonas/src/70-spj-svs/ based on revid:jonas@stripped

 3486 Jonas Oreland	2011-04-29 [merge]
      ndb - merge 70 to 70-spj

    added:
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ui.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ui3.result
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test
    modified:
      mysql-test/suite/ndb/r/ndb_basic.result
      sql/ha_ndbcluster.cc
      storage/ndb/include/kernel/GlobalSignalNumbers.h
      storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
      storage/ndb/include/kernel/signaldata/LqhKey.hpp
      storage/ndb/include/kernel/signaldata/PackedSignal.hpp
      storage/ndb/include/kernel/signaldata/TcContinueB.hpp
      storage/ndb/include/kernel/signaldata/TcKeyReq.hpp
      storage/ndb/include/kernel/signaldata/TupKey.hpp
      storage/ndb/include/kernel/trigger_definitions.h
      storage/ndb/include/ndb_version.h.in
      storage/ndb/include/ndbapi/NdbOperation.hpp
      storage/ndb/src/common/debugger/signaldata/LqhKey.cpp
      storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp
      storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
      storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxBuild.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
      storage/ndb/src/ndbapi/NdbOperationDefine.cpp
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testIndex.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-04-12 08:57:18 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-04-29 09:23:56 +0000
@@ -87,6 +87,7 @@ ndb_blob_write_batch_bytes	#
 ndb_cache_check_time	#
 ndb_cluster_connection_pool	#
 ndb_connectstring	#
+ndb_deferred_constraints	#
 ndb_distribution	#
 ndb_extra_logging	#
 ndb_force_send	#

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ui.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui.result	2011-04-28 07:47:53 +0000
@@ -0,0 +1,49 @@
+include/master-slave.inc
+[connection master]
+CREATE TABLE t1 (pk int primary key, uk int not null, unique(uk)) engine=ndb;
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+insert into t1 values (3,3);
+insert into t1 values (4,4);
+insert into t1 values (5,5);
+select * from t1 where uk in (1,2,3,4,5);
+pk	uk
+1	1
+2	2
+3	3
+4	4
+5	5
+update t1 set uk=uk-1 order by pk;
+select * from t1 where uk in (0,1,2,3,4);
+pk	uk
+1	0
+2	1
+3	2
+4	3
+5	4
+Master table contents
+select * from t1 order by pk;
+pk	uk
+1	0
+2	1
+3	2
+4	3
+5	4
+flush logs;
+Slave table contents
+select * from t1 order by pk;
+pk	uk
+1	0
+2	1
+3	2
+4	3
+5	4
+select * from t1 where uk in (0,1,2,3,4);
+pk	uk
+1	0
+2	1
+3	2
+4	3
+5	4
+drop table t1;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result	2011-04-28 07:47:53 +0000
@@ -0,0 +1,18 @@
+include/master-slave.inc
+[connection master]
+CREATE TABLE t1 (pk int primary key, uk int not null, unique(uk)) engine=ndb;
+STOP SLAVE;
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+insert into t1 values (1,2);
+insert into t1 values (2,1);
+delete from t1;
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+start slave;
+select * from t1;
+pk	uk
+1	1
+2	2
+drop table t1;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ui3.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui3.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui3.result	2011-04-28 07:47:53 +0000
@@ -0,0 +1,51 @@
+include/master-slave.inc
+[connection master]
+CREATE TABLE t1 (pk int primary key, uk1 int not null, uk2 int not null,
+unique(uk1), unique(uk2)) engine=ndb;
+insert into t1 values (1,1,1);
+insert into t1 values (2,2,2);
+insert into t1 values (3,3,3);
+insert into t1 values (4,4,4);
+insert into t1 values (5,5,5);
+select * from t1 where uk1 in (1,2,3,4,5);
+pk	uk1	uk2
+1	1	1
+2	2	2
+3	3	3
+4	4	4
+5	5	5
+update t1 set uk1=uk1-1 order by pk;
+update t1 set uk2=uk2+1 order by pk desc;
+select * from t1 where uk1 in (0,1,2,3,4);
+pk	uk1	uk2
+1	0	2
+2	1	3
+3	2	4
+4	3	5
+5	4	6
+Master table contents
+select * from t1 order by pk;
+pk	uk1	uk2
+1	0	2
+2	1	3
+3	2	4
+4	3	5
+5	4	6
+flush logs;
+Slave table contents
+select * from t1 order by pk;
+pk	uk1	uk2
+1	0	2
+2	1	3
+3	2	4
+4	3	5
+5	4	6
+select * from t1 where uk1 in (0,1,2,3,4);
+pk	uk1	uk2
+1	0	2
+2	1	3
+3	2	4
+4	3	5
+5	4	6
+drop table t1;
+include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test	2011-04-28 07:47:53 +0000
@@ -0,0 +1,52 @@
+--source include/have_ndb.inc
+--source include/have_binlog_format_mixed_or_row.inc
+--source suite/rpl_ndb/ndb_master-slave.inc
+
+#
+# Slave behaviour when replicating unique index operations
+#
+#
+
+--connection master
+
+CREATE TABLE t1 (pk int primary key, uk int not null, unique(uk)) engine=ndb;
+
+# Now perform some operations which do not result in uniqueness
+# violations when applied in-order, but which can result in them
+# when applied out-of-order
+# 5 distinct values chosen to increase the chance of different
+# fragments (and therefore potentially different SUMA->Binlog
+# orders) being seen.
+#
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+insert into t1 values (3,3);
+insert into t1 values (4,4);
+insert into t1 values (5,5);
+
+--sorted_result
+select * from t1 where uk in (1,2,3,4,5);
+
+# Do a 'right shift' on the unique key values
+update t1 set uk=uk-1 order by pk;
+
+--sorted_result
+select * from t1 where uk in (0,1,2,3,4);
+
+--echo Master table contents
+select * from t1 order by pk;
+
+flush logs;
+
+--sync_slave_with_master
+
+--connection slave
+
+--echo Slave table contents
+select * from t1 order by pk;
+select * from t1 where uk in (0,1,2,3,4);
+
+--connection master
+drop table t1;
+
+--source include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test	2011-04-28 07:47:53 +0000
@@ -0,0 +1,36 @@
+--source include/have_ndb.inc
+--source include/have_binlog_format_mixed_or_row.inc
+--source suite/rpl_ndb/ndb_master-slave.inc
+
+--connection master
+
+CREATE TABLE t1 (pk int primary key, uk int not null, unique(uk)) engine=ndb;
+
+--sync_slave_with_master
+--connection slave
+STOP SLAVE;
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+--connection master
+insert into t1 values (1,2);
+insert into t1 values (2,1);
+--sleep 1
+delete from t1;
+--sleep 1
+insert into t1 values (1,1);
+insert into t1 values (2,2);
+
+--connection slave
+start slave;
+
+--connection master
+--sync_slave_with_master
+--connection slave
+--sorted_result
+select * from t1;
+
+--connection master
+drop table t1;
+--sync_slave_with_master
+
+--source include/rpl_end.inc

=== added file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test	2011-04-28 07:47:53 +0000
@@ -0,0 +1,54 @@
+--source include/have_ndb.inc
+--source include/have_binlog_format_mixed_or_row.inc
+--source suite/rpl_ndb/ndb_master-slave.inc
+
+#
+# Slave behaviour when replicating unique index operations
+#
+#
+
+--connection master
+
+CREATE TABLE t1 (pk int primary key, uk1 int not null, uk2 int not null,
+       unique(uk1), unique(uk2)) engine=ndb;
+
+# Now perform some operations which do not result in uniqueness
+# violations when applied in-order, but which can result in them
+# when applied out-of-order
+# 5 distinct values chosen to increase the chance of different
+# fragments (and therefore potentially different SUMA->Binlog
+# orders) being seen.
+#
+insert into t1 values (1,1,1);
+insert into t1 values (2,2,2);
+insert into t1 values (3,3,3);
+insert into t1 values (4,4,4);
+insert into t1 values (5,5,5);
+
+--sorted_result
+select * from t1 where uk1 in (1,2,3,4,5);
+
+# Do a 'right shift' on the unique key values
+update t1 set uk1=uk1-1 order by pk;
+update t1 set uk2=uk2+1 order by pk desc;
+
+--sorted_result
+select * from t1 where uk1 in (0,1,2,3,4);
+
+--echo Master table contents
+select * from t1 order by pk;
+
+flush logs;
+
+--sync_slave_with_master
+
+--connection slave
+
+--echo Slave table contents
+select * from t1 order by pk;
+select * from t1 where uk1 in (0,1,2,3,4);
+
+--connection master
+drop table t1;
+
+--source include/rpl_end.inc

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-04-27 08:39:36 +0000
+++ b/sql/ha_ndbcluster.cc	2011-04-29 09:23:56 +0000
@@ -250,25 +250,33 @@ static MYSQL_THDVAR_UINT(
   0                                  /* block */
 );
 
-#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
-static MYSQL_THDVAR_BOOL(
-  join_pushdown,                     /* name */
-  PLUGIN_VAR_OPCMDARG,
-  "Enable pushing down of join to datanodes",
-  NULL,                              /* check func. */
-  NULL,                              /* update func. */
-  FALSE                              /* default */
+static MYSQL_THDVAR_UINT(
+  deferred_constraints,              /* name */
+  PLUGIN_VAR_RQCMDARG,
+  "Specified that constraints should be checked deferred (when supported)",
+  NULL,                              /* check func */
+  NULL,                              /* update func */
+  0,                                 /* default */
+  0,                                 /* min */
+  1,                                 /* max */
+  0                                  /* block */
 );
+
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define DEFAULT_NDB_JOIN_PUSHDOWN FALSE
 #else
+#define DEFAULT_NDB_JOIN_PUSHDOWN TRUE
+#endif
+
 static MYSQL_THDVAR_BOOL(
   join_pushdown,                     /* name */
   PLUGIN_VAR_OPCMDARG,
   "Enable pushing down of join to datanodes",
   NULL,                              /* check func. */
   NULL,                              /* update func. */
-  TRUE                               /* default */
+  DEFAULT_NDB_JOIN_PUSHDOWN          /* default */
 );
-#endif
+
 /*
   Default value for max number of transactions createable against NDB from
   the handler. Should really be 2 but there is a transaction to much allocated
@@ -5033,6 +5041,12 @@ int ha_ndbcluster::ndb_write_row(uchar *
     options.extraSetValues= sets;
     options.numExtraSetValues= num_sets;
   }
+  if (thd->slave_thread || THDVAR(thd, deferred_constraints))
+  {
+    options.optionsPresent |=
+      NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
+  }
+
   if (options.optionsPresent != 0)
     poptions=&options;
 
@@ -5783,6 +5797,12 @@ int ha_ndbcluster::ndb_update_row(const 
   
   bool need_flush= add_row_check_if_batch_full(thd_ndb);
 
+  if (thd->slave_thread || THDVAR(thd, deferred_constraints))
+  {
+    options.optionsPresent |=
+      NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
+  }
+
   if (cursor)
   {
     /*
@@ -6008,6 +6028,12 @@ int ha_ndbcluster::ndb_delete_row(const 
   uint delete_size= 12 + (m_bytes_per_write >> 2);
   bool need_flush= add_row_check_if_batch_full_size(thd_ndb, delete_size);
 
+  if (thd->slave_thread || THDVAR(thd, deferred_constraints))
+  {
+    options.optionsPresent |=
+      NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
+  }
+
   if (cursor)
   {
     if (options.optionsPresent != 0)
@@ -16766,8 +16792,8 @@ static struct st_mysql_sys_var* system_v
   MYSQL_SYSVAR(nodeid),
   MYSQL_SYSVAR(blob_read_batch_bytes),
   MYSQL_SYSVAR(blob_write_batch_bytes),
+  MYSQL_SYSVAR(deferred_constraints),
   MYSQL_SYSVAR(join_pushdown),
-
   NULL
 };
 

=== modified file 'storage/ndb/include/kernel/GlobalSignalNumbers.h'
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h	2011-04-09 15:48:21 +0000
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h	2011-04-28 07:47:53 +0000
@@ -337,9 +337,11 @@ extern const GlobalSignalNumber NO_OF_SI
 /* 233 unused */
 /* 234 unused */
 #define GSN_DISCONNECT_REP              235
-/* 236 unused */
-/* 237 unused */
-/* 238 unused */
+
+#define GSN_FIRE_TRIG_REQ               236
+#define GSN_FIRE_TRIG_REF               237
+#define GSN_FIRE_TRIG_CONF              238
+
 #define GSN_DIVERIFYCONF                239
 #define GSN_DIVERIFYREF                 240
 #define GSN_DIVERIFYREQ                 241

=== modified file 'storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp'
--- a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp	2011-04-28 07:47:53 +0000
@@ -225,5 +225,47 @@ void FireTrigOrd::setAnyValue(Uint32 any
   m_any_value = any_value;
 }
 
+struct FireTrigReq
+{
+  STATIC_CONST( SignalLength = 4 );
+
+  Uint32 tcOpRec;
+  Uint32 transId[2];
+  Uint32 pass;
+};
+
+struct FireTrigRef
+{
+  STATIC_CONST( SignalLength = 4 );
+
+  Uint32 tcOpRec;
+  Uint32 transId[2];
+  Uint32 errCode;
+
+  enum ErrorCode
+  {
+    FTR_UnknownOperation = 1235
+    ,FTR_IncorrectState = 1236
+  };
+};
+
+struct FireTrigConf
+{
+  STATIC_CONST( SignalLength = 4 );
+
+  Uint32 tcOpRec;
+  Uint32 transId[2];
+  Uint32 noFiredTriggers; // bit 31 defered trigger
+
+  static Uint32 getFiredCount(Uint32 v) {
+    return NoOfFiredTriggers::getFiredCount(v);
+  }
+  static Uint32 getDeferredBit(Uint32 v) {
+    return NoOfFiredTriggers::getDeferredBit(v);
+  }
+  static void setDeferredBit(Uint32 & v) {
+    NoOfFiredTriggers::setDeferredBit(v);
+  }
+};
 
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/LqhKey.hpp'
--- a/storage/ndb/include/kernel/signaldata/LqhKey.hpp	2011-02-08 14:29:52 +0000
+++ b/storage/ndb/include/kernel/signaldata/LqhKey.hpp	2011-04-29 09:23:56 +0000
@@ -19,6 +19,7 @@
 #define LQH_KEY_H
 
 #include "SignalData.hpp"
+#include <trigger_definitions.h>
 
 class LqhKeyReq {
   /**
@@ -166,6 +167,12 @@ private:
    */
   static UintR getCorrFactorFlag(const UintR & requestInfo);
   static void setCorrFactorFlag(UintR & requestInfo, UintR val);
+
+  /**
+   * Include corr factor
+   */
+  static UintR getDeferredConstraints(const UintR & requestInfo);
+  static void setDeferredConstraints(UintR & requestInfo, UintR val);
 };
 
 /**
@@ -195,6 +202,7 @@ private:
  * q = Queue on redo problem  - 1  Bit (14)
  * A = CorrFactor flag        - 1  Bit (24)
  * P = Do normal protocol even if dirty-read - 1 Bit (25)
+ * D = Deferred constraints   - 1  Bit (26)
 
  * Short LQHKEYREQ :
  *             1111111111222222222233
@@ -205,7 +213,7 @@ private:
  * Long LQHKEYREQ :
  *             1111111111222222222233
  *   01234567890123456789012345678901
- *             llgnqpdisooorrAP cumxz
+ *             llgnqpdisooorrAPDcumxz
  *
  */
 
@@ -235,6 +243,7 @@ private:
 #define RI_QUEUE_REDO_SHIFT  (14)
 #define RI_CORR_FACTOR_VALUE (24)
 #define RI_NORMAL_DIRTY      (25)
+#define RI_DEFERRED_CONSTAINTS (26)
 
 /**
  * Scan Info
@@ -625,6 +634,19 @@ LqhKeyReq::getCorrFactorFlag(const UintR
 }
 
 inline
+void
+LqhKeyReq::setDeferredConstraints(UintR & requestInfo, UintR val){
+  ASSERT_BOOL(val, "LqhKeyReq::setDeferredConstraints");
+  requestInfo |= (val << RI_DEFERRED_CONSTAINTS);
+}
+
+inline
+UintR
+LqhKeyReq::getDeferredConstraints(const UintR & requestInfo){
+  return (requestInfo >> RI_DEFERRED_CONSTAINTS) & 1;
+}
+
+inline
 Uint32
 table_version_major_lqhkeyreq(Uint32 x)
 {
@@ -688,7 +710,17 @@ private:
   };
   Uint32 transId1;
   Uint32 transId2;
-  Uint32 noFiredTriggers;
+  Uint32 noFiredTriggers; // bit 31 defered trigger
+
+  static Uint32 getFiredCount(Uint32 v) {
+    return NoOfFiredTriggers::getFiredCount(v);
+  }
+  static Uint32 getDeferredBit(Uint32 v) {
+    return NoOfFiredTriggers::getDeferredBit(v);
+  }
+  static void setDeferredBit(Uint32 & v) {
+    NoOfFiredTriggers::setDeferredBit(v);
+  }
 };
 
 class LqhKeyRef {

=== modified file 'storage/ndb/include/kernel/signaldata/PackedSignal.hpp'
--- a/storage/ndb/include/kernel/signaldata/PackedSignal.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/PackedSignal.hpp	2011-04-28 07:47:53 +0000
@@ -28,6 +28,8 @@
 #define ZCOMPLETED 3
 #define ZLQHKEYCONF 4
 #define ZREMOVE_MARKER 5
+#define ZFIRE_TRIG_REQ 6
+#define ZFIRE_TRIG_CONF 7
 
 class PackedSignal {
 

=== modified file 'storage/ndb/include/kernel/signaldata/TcContinueB.hpp'
--- a/storage/ndb/include/kernel/signaldata/TcContinueB.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/TcContinueB.hpp	2011-04-28 07:47:53 +0000
@@ -46,7 +46,8 @@ private:
     TRIGGER_PENDING                        = 17,
     
     DelayTCKEYCONF = 18,
-    ZNF_CHECK_TRANSACTIONS = 19
+    ZNF_CHECK_TRANSACTIONS = 19,
+    ZSEND_FIRE_TRIG_REQ = 20
   };
 };
 

=== modified file 'storage/ndb/include/kernel/signaldata/TcKeyReq.hpp'
--- a/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp	2011-02-08 14:29:52 +0000
+++ b/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp	2011-04-29 09:23:56 +0000
@@ -207,6 +207,13 @@ private:
 
   static void setQueueOnRedoProblemFlag(UintR & requestInfo, UintR val);
   static UintR getQueueOnRedoProblemFlag(const UintR & requestInfo);
+
+  /**
+   * Check constraints deferred
+   */
+  static UintR getDeferredConstraints(const UintR & requestInfo);
+  static void setDeferredConstraints(UintR & requestInfo, UintR val);
+
   /**
    * Set:ers for scanInfo
    */
@@ -237,11 +244,12 @@ private:
  n = No disk flag          - 1  Bit 1
  r = reorg flag            - 1  Bit 19
  q = Queue on redo problem - 1  Bit 9
+ D = deferred constraint   - 1  Bit 17
 
            1111111111222222222233
  01234567890123456789012345678901
  dnb cooop lsyyeiaaarkkkkkkkkkkkk  (Short TCKEYREQ)
- dnbvcooopqlsyyei   r              (Long TCKEYREQ)
+ dnbvcooopqlsyyei D r              (Long TCKEYREQ)
 */
 
 #define TCKEY_NODISK_SHIFT (1)
@@ -270,6 +278,8 @@ private:
 #define TC_REORG_SHIFT     (19)
 #define QUEUE_ON_REDO_SHIFT (9)
 
+#define TC_DEFERRED_CONSTAINTS_SHIFT (17)
+
 /**
  * Scan Info
  *
@@ -612,4 +622,18 @@ TcKeyReq::setQueueOnRedoProblemFlag(Uint
   requestInfo |= (flag << QUEUE_ON_REDO_SHIFT);
 }
 
+inline
+void
+TcKeyReq::setDeferredConstraints(UintR & requestInfo, UintR val){
+  ASSERT_BOOL(val, "TcKeyReq::setDeferredConstraints");
+  requestInfo |= (val << TC_DEFERRED_CONSTAINTS_SHIFT);
+}
+
+inline
+UintR
+TcKeyReq::getDeferredConstraints(const UintR & requestInfo){
+  return (requestInfo >> TC_DEFERRED_CONSTAINTS_SHIFT) & 1;
+}
+
+
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/TupKey.hpp'
--- a/storage/ndb/include/kernel/signaldata/TupKey.hpp	2011-02-04 11:45:24 +0000
+++ b/storage/ndb/include/kernel/signaldata/TupKey.hpp	2011-04-29 09:23:56 +0000
@@ -38,7 +38,7 @@ class TupKeyReq {
   friend bool printTUPKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
 
 public:
-  STATIC_CONST( SignalLength = 19 );
+  STATIC_CONST( SignalLength = 20 );
 
 private:
 
@@ -64,6 +64,7 @@ private:
   Uint32 m_row_id_page_no;
   Uint32 m_row_id_page_idx;
   Uint32 attrInfoIVal;
+  Uint32 deferred_constraints;
 };
 
 class TupKeyConf {

=== modified file 'storage/ndb/include/kernel/trigger_definitions.h'
--- a/storage/ndb/include/kernel/trigger_definitions.h	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/trigger_definitions.h	2011-04-28 07:47:53 +0000
@@ -196,4 +196,19 @@ struct TriggerInfo {
   }
 };
 
+struct NoOfFiredTriggers
+{
+  STATIC_CONST( DeferredBit = (Uint32(1) << 31) );
+
+  static Uint32 getFiredCount(Uint32 v) {
+    return v & ~(Uint32(DeferredBit));
+  }
+  static Uint32 getDeferredBit(Uint32 v) {
+    return (v & Uint32(DeferredBit)) != 0;
+  }
+  static void setDeferredBit(Uint32 & v) {
+    v |= Uint32(DeferredBit);
+  }
+};
+
 #endif

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2011-04-10 17:32:41 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2011-04-29 09:23:56 +0000
@@ -611,4 +611,21 @@ ndbd_connectivity_check(Uint32 x)
   return x >= NDBD_PING_REQ_71;
 }
 
+#define NDBD_DEFERRED_UNIQUE_CONSTRAINTS_70 NDB_MAKE_VERSION(7,0,25)
+#define NDBD_DEFERRED_UNIQUE_CONSTRAINTS_71 NDB_MAKE_VERSION(7,1,14)
+
+static
+inline
+int
+ndbd_deferred_unique_constraints(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor == 0)
+    return x >= NDBD_DEFERRED_UNIQUE_CONSTRAINTS_70;
+
+  return x >= NDBD_DEFERRED_UNIQUE_CONSTRAINTS_71;
+}
+
 #endif

=== modified file 'storage/ndb/include/ndbapi/NdbOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp	2011-02-09 14:59:39 +0000
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp	2011-04-29 09:23:56 +0000
@@ -1040,7 +1040,8 @@ public:
                  OO_CUSTOMDATA   = 0x40,
                  OO_LOCKHANDLE   = 0x80,
                  OO_QUEUABLE     = 0x100,
-                 OO_NOT_QUEUABLE = 0x200
+                 OO_NOT_QUEUABLE = 0x200,
+                 OO_DEFERRED_CONSTAINTS = 0x400
     };
 
     /* An operation-specific abort option.
@@ -1439,7 +1440,8 @@ protected:
       word set by setAnyValue().
     */
     OF_USE_ANY_VALUE = 0x2,
-    OF_QUEUEABLE = 0x4
+    OF_QUEUEABLE = 0x4,
+    OF_DEFERRED_CONSTRAINTS = 0x8
   };
   Uint8  m_flags;
 

=== modified file 'storage/ndb/src/common/debugger/signaldata/LqhKey.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp	2011-02-08 14:29:52 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp	2011-04-29 09:23:56 +0000
@@ -65,7 +65,9 @@ printLQHKEYREQ(FILE * output, const Uint
     fprintf(output, "GCI ");
   if(LqhKeyReq::getQueueOnRedoProblemFlag(reqInfo))
     fprintf(output, "Queue ");
-  
+  if(LqhKeyReq::getDeferredConstraints(reqInfo))
+    fprintf(output, "Deferred-constraints ");
+
   fprintf(output, "ScanInfo/noFiredTriggers: H\'%x\n", sig->scanInfo);
   
   fprintf(output,

=== modified file 'storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp	2011-04-28 07:47:53 +0000
@@ -97,6 +97,24 @@ printPACKED_SIGNAL(FILE * output, const 
       fprintf(output,"\n");
       break;
     }
+    case ZFIRE_TRIG_REQ: {
+      Uint32 signalLength = 3;
+
+      fprintf(output, "--------------- Signal ----------------\n");
+      fprintf(output, "r.bn: %u \"%s\", length: %u \"FIRE_TRIG_REQ\"\n",
+	      receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength);
+      i += signalLength;
+      break;
+    }
+    case ZFIRE_TRIG_CONF: {
+      Uint32 signalLength = 4;
+
+      fprintf(output, "--------------- Signal ----------------\n");
+      fprintf(output, "r.bn: %u \"%s\", length: %u \"FIRE_TRIG_CONF\"\n",
+	      receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength);
+      i += signalLength;
+      break;
+    }
     default:
       fprintf(output, "Unknown signal type\n");
       i = len; // terminate printing

=== modified file 'storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp	2011-02-08 14:29:52 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp	2011-04-29 09:23:56 +0000
@@ -79,6 +79,9 @@ printTCKEYREQ(FILE * output, const Uint3
     if(sig->getQueueOnRedoProblemFlag(sig->requestInfo))
       fprintf(output, "Queue ");
 
+    if(sig->getDeferredConstraints(sig->requestInfo))
+      fprintf(output, "Deferred-constraints ");
+
     fprintf(output, "\n");
   }
   

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-04-10 17:32:41 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-04-29 09:23:56 +0000
@@ -18,10 +18,10 @@ Next NDBCNTR 1002
 Next NDBFS 2000
 Next DBACC 3002
 Next DBTUP 4035
-Next DBLQH 5064
+Next DBLQH 5072
 Next DBDICT 6026
 Next DBDIH 7229
-Next DBTC 8090
+Next DBTC 8092
 Next CMVMI 9000
 Next BACKUP 10042
 Next DBUTIL 11002

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-04-27 08:39:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-04-29 09:23:56 +0000
@@ -2112,10 +2112,12 @@ public:
     Uint8 m_disk_table;
     Uint8 m_use_rowid;
     Uint8 m_dealloc;
+    Uint8 m_fire_trig_pass;
     enum op_flags {
       OP_ISLONGREQ              = 0x1,
       OP_SAVEATTRINFO           = 0x2,
-      OP_SCANKEYINFOPOSSAVED    = 0x4
+      OP_SCANKEYINFOPOSSAVED    = 0x4,
+      OP_DEFERRED_CONSTRAINTS   = 0x8
     };
     Uint32 m_flags;
     Uint32 m_log_part_ptr_i;
@@ -2308,6 +2310,8 @@ private:
   void execBUILD_INDX_IMPL_REF(Signal* signal);
   void execBUILD_INDX_IMPL_CONF(Signal* signal);
 
+  void execFIRE_TRIG_REQ(Signal*);
+
   // Statement blocks
 
   void init_acc_ptr_list(ScanRecord*);
@@ -3266,6 +3270,9 @@ public:
   void suspendFile(Signal* signal, Ptr<LogFileRecord> logFile, Uint32 millis);
 
   void send_runredo_event(Signal*, LogPartRecord *, Uint32 currgci);
+
+  void sendFireTrigConfTc(Signal* signal, BlockReference ref, Uint32 Tdata[]);
+  bool check_fire_trig_pass(Uint32 op, Uint32 pass);
 };
 
 inline

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp	2011-03-28 11:59:09 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp	2011-04-28 07:47:53 +0000
@@ -424,6 +424,8 @@ Dblqh::Dblqh(Block_context& ctx, Uint32 
                &Dblqh::execFSWRITEREQ);
   addRecSignal(GSN_DBINFO_SCANREQ, &Dblqh::execDBINFO_SCANREQ);
 
+  addRecSignal(GSN_FIRE_TRIG_REQ, &Dblqh::execFIRE_TRIG_REQ);
+
   initData();
 
 #ifdef VM_TRACE

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-04-27 11:50:17 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-04-29 09:23:56 +0000
@@ -74,6 +74,7 @@
 #include <signaldata/FsReadWriteReq.hpp>
 #include <signaldata/DbinfoScan.hpp>
 #include <signaldata/SystemError.hpp>
+#include <signaldata/FireTrigOrd.hpp>
 #include <NdbEnv.h>
 
 #include "../suma/Suma.hpp"
@@ -3196,6 +3197,7 @@ void Dblqh::execPACKED_SIGNAL(Signal* si
 
   jamEntry();
   Tlength = signal->length();
+  Uint32 TsenderRef = signal->getSendersBlockRef();
   Uint32 TcommitLen = 5;
   Uint32 Tgci_lo_mask = ~(Uint32)0;
 
@@ -3244,8 +3246,7 @@ void Dblqh::execPACKED_SIGNAL(Signal* si
       break;
     case ZLQHKEYCONF: {
       jam();
-      LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
-
+      LqhKeyConf * lqhKeyConf = CAST_PTR(LqhKeyConf, signal->theData);
       sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
       sig1 = TpackedData[Tstep + 1];
       sig2 = TpackedData[Tstep + 2];
@@ -3274,6 +3275,22 @@ void Dblqh::execPACKED_SIGNAL(Signal* si
       execREMOVE_MARKER_ORD(signal);
       Tstep += 3;
       break;
+    case ZFIRE_TRIG_REQ:
+      jam();
+      ndbassert(FireTrigReq::SignalLength == 4);
+      sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+      sig1 = TpackedData[Tstep + 1];
+      sig2 = TpackedData[Tstep + 2];
+      sig3 = TpackedData[Tstep + 3];
+      signal->theData[0] = sig0;
+      signal->theData[1] = sig1;
+      signal->theData[2] = sig2;
+      signal->theData[3] = sig3;
+      signal->header.theLength = FireTrigReq::SignalLength;
+      signal->header.theSendersBlockRef = TsenderRef;
+      execFIRE_TRIG_REQ(signal);
+      Tstep += FireTrigReq::SignalLength;
+      break;
     default:
       ndbrequire(false);
       return;
@@ -4567,6 +4584,13 @@ void Dblqh::execLQHKEYREQ(Signal* signal
     nextPos += 2;
   }
 
+  Uint32 Tdeferred = LqhKeyReq::getDeferredConstraints(Treqinfo);
+  if (isLongReq && Tdeferred)
+  {
+    regTcPtr->m_flags |= TcConnectionrec::OP_DEFERRED_CONSTRAINTS;
+    regTcPtr->m_fire_trig_pass = 0;
+  }
+
   UintR TitcKeyLen = 0;
   Uint32 keyLenWithLQHReq = 0;
   UintR TreclenAiLqhkey   = 0;
@@ -5921,6 +5945,7 @@ Dblqh::acckeyconf_tupkeyreq(Signal* sign
   Uint32 page_idx = lkey2;
   Uint32 page_no = lkey1;
   Uint32 Ttupreq = regTcPtr->dirtyOp;
+  Uint32 flags = regTcPtr->m_flags;
   Ttupreq = Ttupreq + (regTcPtr->opSimple << 1);
   Ttupreq = Ttupreq + (op << 6);
   Ttupreq = Ttupreq + (regTcPtr->opExec << 10);
@@ -5983,6 +6008,8 @@ Dblqh::acckeyconf_tupkeyreq(Signal* sign
   regTcPtr->m_row_id.m_page_idx = page_idx;
   
   tupKeyReq->attrInfoIVal= RNIL;
+  tupKeyReq->deferred_constraints =
+    (flags & TcConnectionrec::OP_DEFERRED_CONSTRAINTS) != 0;
 
   /* Pass AttrInfo section if available in the TupKeyReq signal
    * We are still responsible for releasing it, TUP is just
@@ -7319,6 +7346,160 @@ void Dblqh::errorReport(Signal* signal, 
   return;
 }//Dblqh::errorReport()
 
+void
+Dblqh::execFIRE_TRIG_REQ(Signal* signal)
+{
+  Uint32 tcOprec = signal->theData[0];
+  Uint32 transid1 = signal->theData[1];
+  Uint32 transid2 = signal->theData[2];
+  Uint32 pass = signal->theData[3];
+  Uint32 senderRef = signal->getSendersBlockRef();
+
+  jamEntry();
+
+  if (ERROR_INSERTED_CLEAR(5064))
+  {
+    // throw away...should cause timeout in TC
+    return;
+  }
+
+  CRASH_INSERTION(5072);
+
+  Uint32 err;
+  if (findTransaction(transid1, transid2, tcOprec, 0) == ZOK &&
+      !ERROR_INSERTED_CLEAR(5065) &&
+      !ERROR_INSERTED(5070) &&
+      !ERROR_INSERTED(5071))
+  {
+    TcConnectionrec * const regTcPtr = tcConnectptr.p;
+
+    if (unlikely(regTcPtr->transactionState != TcConnectionrec::PREPARED ||
+                 ERROR_INSERTED_CLEAR(5067)))
+    {
+      err = FireTrigRef::FTR_IncorrectState;
+      goto do_err;
+    }
+
+    /**
+     *
+     */
+    signal->theData[0] = regTcPtr->tupConnectrec;
+    signal->theData[1] = regTcPtr->tcBlockref;
+    signal->theData[2] = regTcPtr->tcOprec;
+    signal->theData[3] = transid1;
+    signal->theData[4] = transid2;
+    signal->theData[5] = pass;
+    Uint32 tup = refToMain(regTcPtr->tcTupBlockref);
+    EXECUTE_DIRECT(tup, GSN_FIRE_TRIG_REQ, signal, 6);
+
+    err = signal->theData[0];
+    Uint32 cnt = signal->theData[1];
+
+    if (ERROR_INSERTED_CLEAR(5066))
+    {
+      err = 5066;
+    }
+
+    if (ERROR_INSERTED_CLEAR(5068))
+      tcOprec++;
+    if (ERROR_INSERTED_CLEAR(5069))
+      transid1++;
+
+    if (err == 0)
+    {
+      jam();
+      Uint32 Tdata[FireTrigConf::SignalLength];
+      FireTrigConf * conf = CAST_PTR(FireTrigConf, Tdata);
+      conf->tcOpRec = tcOprec;
+      conf->transId[0] = transid1;
+      conf->transId[1] = transid2;
+      conf->noFiredTriggers = cnt;
+      sendFireTrigConfTc(signal, regTcPtr->tcBlockref, Tdata);
+      return;
+    }
+  }
+  else
+  {
+    jam();
+    err = FireTrigRef::FTR_UnknownOperation;
+  }
+
+do_err:
+  if (ERROR_INSERTED_CLEAR(5070))
+    tcOprec++;
+
+  if (ERROR_INSERTED_CLEAR(5071))
+    transid1++;
+
+  FireTrigRef * ref = CAST_PTR(FireTrigRef, signal->getDataPtrSend());
+  ref->tcOpRec = tcOprec;
+  ref->transId[0] = transid1;
+  ref->transId[1] = transid2;
+  ref->errCode = err;
+  sendSignal(senderRef, GSN_FIRE_TRIG_REF,
+             signal, FireTrigRef::SignalLength, JBB);
+
+  return;
+}
+
+void
+Dblqh::sendFireTrigConfTc(Signal* signal,
+                          BlockReference atcBlockref,
+                          Uint32 Tdata[])
+{
+  if (refToInstance(atcBlockref) != 0)
+  {
+    jam();
+    memcpy(signal->theData, Tdata, 4 * FireTrigConf::SignalLength);
+    sendSignal(atcBlockref, GSN_FIRE_TRIG_CONF,
+               signal, FireTrigConf::SignalLength, JBB);
+    return;
+  }
+
+  HostRecordPtr Thostptr;
+  Uint32 len = FireTrigConf::SignalLength;
+
+  Thostptr.i = refToNode(atcBlockref);
+  ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+
+  if (Thostptr.p->noOfPackedWordsTc > (25 - len))
+  {
+    jam();
+    sendPackedSignalTc(signal, Thostptr.p);
+  }
+  else
+  {
+    jam();
+    updatePackedList(signal, Thostptr.p, Thostptr.i);
+  }
+
+  ndbassert(FireTrigConf::SignalLength == 4);
+  Uint32 * dst = &Thostptr.p->packedWordsTc[Thostptr.p->noOfPackedWordsTc];
+  Thostptr.p->noOfPackedWordsTc += len;
+  dst[0] = Tdata[0] | (ZFIRE_TRIG_CONF << 28);
+  dst[1] = Tdata[1];
+  dst[2] = Tdata[2];
+  dst[3] = Tdata[3];
+}
+
+bool
+Dblqh::check_fire_trig_pass(Uint32 opId, Uint32 pass)
+{
+  /**
+   * Check that trigger only fires once per pass
+   *   (per primary key)
+   */
+  TcConnectionrecPtr regTcPtr;
+  regTcPtr.i= opId;
+  ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+  if (regTcPtr.p->m_fire_trig_pass <= pass)
+  {
+    regTcPtr.p->m_fire_trig_pass = pass + 1;
+    return true;
+  }
+  return false;
+}
+
 /* ************************************************************************>>
  *  COMMIT: Start commit request from TC. This signal is originally sent as a
  *  packed signal and this function is called from execPACKED_SIGNAL.

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-03-29 06:50:49 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-04-29 09:11:12 +0000
@@ -1583,8 +1583,6 @@ Dbspj::nodeFail(Signal* signal, Ptr<Requ
 {
   Uint32 cnt = 0;
   Uint32 iter = 0;
-  Uint32 outstanding = requestPtr.p->m_outstanding;
-  Uint32 aborting = requestPtr.p->m_state & Request::RS_ABORTING;
 
   {
     Ptr<TreeNode> nodePtr;
@@ -1623,12 +1621,6 @@ Dbspj::nodeFail(Signal* signal, Ptr<Requ
   {
     jam();
     abort(signal, requestPtr, DbspjErr::NodeFailure);
-
-    if (aborting && outstanding && requestPtr.p->m_outstanding == 0)
-    {
-      jam();
-      checkBatchComplete(signal, requestPtr, 0);
-    }
   }
 
   return cnt + iter;
@@ -4731,20 +4723,16 @@ Dbspj::scanIndex_parent_row(Signal* sign
         break;
       }
 
-      if (fragPtr.p->m_ref == 0)
-      {
-        jam();
-        fragPtr.p->m_ref = tmp.receiverRef;
-      }
-      else
-      {
-        /**
-         * TODO: not 100% sure if this is correct with reorg ongoing...
-         *       but scanning "old" should regardless be safe as we still have
-         *       scanCookie
-         */
-        ndbassert(fragPtr.p->m_ref == tmp.receiverRef);
-      }
+      /**
+       * NOTE: We can get different receiverRef's here
+       *       for different keys. E.g during node-recovery where
+       *       primary-fragment is switched.
+       *
+       *       Use latest that we receive
+       *
+       * TODO: Also double check table-reorg
+       */
+      fragPtr.p->m_ref = tmp.receiverRef;
     }
     else
     {

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-04-05 06:46:48 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-04-29 09:23:56 +0000
@@ -170,7 +170,17 @@ public:
     CS_FAIL_COMMITTING = 22,
     CS_FAIL_COMMITTED = 23,
     CS_FAIL_COMPLETED = 24,
-    CS_START_SCAN = 25
+    CS_START_SCAN = 25,
+
+    /**
+     * Sending FIRE_TRIG_REQ
+     */
+    CS_SEND_FIRE_TRIG_REQ = 26,
+
+    /**
+     * Waiting for FIRE_TRIG_CONF/REF (or operations generated by this)
+     */
+    CS_WAIT_FIRE_TRIG_REQ = 27
   };
 
   enum OperationState {
@@ -191,7 +201,9 @@ public:
     OS_WAIT_COMMIT_CONF = 15,
     OS_WAIT_ABORT_CONF = 16,
     OS_WAIT_COMPLETE_CONF = 17,
-    OS_WAIT_SCAN = 18
+    OS_WAIT_SCAN = 18,
+
+    OS_FIRE_TRIG_REQ = 19,
   };
 
   enum AbortState {
@@ -693,7 +705,10 @@ public:
     //---------------------------------------------------
     UintR lastTcConnect;
     UintR lqhkeyreqrec;
-    Uint32 buddyPtr;
+    union {
+      Uint32 buddyPtr;
+      Int32 pendingTriggers; // For deferred triggers
+    };
     union {
       UintR apiScanRec;
       UintR commitAckMarker;
@@ -709,6 +724,8 @@ public:
       TF_TRIGGER_PENDING = 2, // Used to mark waiting for a CONTINUEB
       TF_EXEC_FLAG       = 4,
       TF_COMMIT_ACK_MARKER_RECEIVED = 8,
+      TF_DEFERRED_CONSTRAINTS = 16, // check constraints in deferred fashion
+      TF_DEFERRED_TRIGGERS = 32, // trans has deferred triggers
       TF_END = 0
     };
     Uint32 m_flags;
@@ -779,6 +796,12 @@ public:
 #ifdef ERROR_INSERT
     Uint32 continueBCount;  // ERROR_INSERT 8082
 #endif
+    Uint8 m_pre_commit_pass;
+
+    bool isExecutingDeferredTriggers() const {
+      return apiConnectstate == CS_SEND_FIRE_TRIG_REQ ||
+        apiConnectstate == CS_WAIT_FIRE_TRIG_REQ ;
+    }
   };
   
   typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
@@ -843,7 +866,8 @@ public:
       SOF_REORG_MOVING = 8,           // A record that should be moved
       SOF_TRIGGER = 16,               // A trigger
       SOF_REORG_COPY = 32,
-      SOF_REORG_DELETE = 64
+      SOF_REORG_DELETE = 64,
+      SOF_DEFERRED_TRIGGER = 128      // Op has deferred trigger
     };
     
     static inline bool isIndexOp(Uint8 flags) {
@@ -866,12 +890,12 @@ public:
     Uint16 lqhInstanceKey;
     
     // Trigger data
-    FiredTriggerPtr accumulatingTriggerData;
-    UintR noFiredTriggers;
-    UintR noReceivedTriggers;
-    UintR triggerExecutionCount;
-    UintR triggeringOperation;
+    UintR noFiredTriggers;      // As reported by lqhKeyConf
+    UintR noReceivedTriggers;   // FIRE_TRIG_ORD
+    UintR triggerExecutionCount;// No of outstanding op due to triggers
     UintR savedState[LqhKeyConf::SignalLength];
+
+    UintR triggeringOperation;  // Which operation was "cause" of this op
     
     // Index data
     UintR indexOp;
@@ -1374,6 +1398,8 @@ private:
   void execALTER_INDX_IMPL_REQ(Signal* signal);
   void execSIGNAL_DROPPED_REP(Signal* signal);
 
+  void execFIRE_TRIG_REF(Signal*);
+  void execFIRE_TRIG_CONF(Signal*);
 
   // Index table lookup
   void execTCKEYCONF(Signal* signal);
@@ -1413,6 +1439,10 @@ private:
                        TcConnectRecord * const regTcPtr);
   Uint32 sendCompleteLqh(Signal* signal,
                          TcConnectRecord * const regTcPtr);
+
+  void sendFireTrigReq(Signal*, Ptr<ApiConnectRecord>, Uint32 firstTcConnect);
+  Uint32 sendFireTrigReqLqh(Signal*, Ptr<TcConnectRecord>, Uint32 pass);
+
   void sendTCKEY_FAILREF(Signal* signal, ApiConnectRecord *);
   void sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord *);
   void routeTCKEY_FAILREFCONF(Signal* signal, const ApiConnectRecord *, 
@@ -1549,11 +1579,10 @@ private:
 			     TcConnectRecord* trigOp);
   void restoreTriggeringOpState(Signal* signal, 
 				TcConnectRecord* trigOp);
-  void continueTriggeringOp(Signal* signal, 
-			    TcConnectRecord* trigOp);
+  void trigger_op_finished(Signal* signal, ApiConnectRecordPtr,
+                           TcConnectRecord* triggeringOp);
+  void continueTriggeringOp(Signal* signal, TcConnectRecord* trigOp);
 
-  void scheduleFiredTrigger(ApiConnectRecordPtr* transPtr, 
-                            TcConnectRecordPtr* opPtr);
   void executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr);
   void executeTrigger(Signal* signal,
                       TcFiredTriggerData* firedTriggerData,
@@ -1573,14 +1602,12 @@ private:
                             TcFiredTriggerData* firedTriggerData, 
                             ApiConnectRecordPtr* transPtr,
                             TcConnectRecordPtr* opPtr,
-                            TcIndexData* indexData,
-                            bool holdOperation = false);
+                            TcIndexData* indexData);
   void deleteFromIndexTable(Signal* signal, 
                             TcFiredTriggerData* firedTriggerData, 
                             ApiConnectRecordPtr* transPtr,
                             TcConnectRecordPtr* opPtr,
-                            TcIndexData* indexData,
-                            bool holdOperation = false);
+                            TcIndexData* indexData);
 
   void executeReorgTrigger(Signal* signal,
                            TcDefinedTriggerData* definedTriggerData,
@@ -2066,6 +2093,7 @@ private:
   // Used with ERROR_INSERT 8078 + 8079 to check API_FAILREQ handling
   Uint32 c_lastFailedApi;
 #endif
+  Uint32 m_deferred_enabled;
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp	2011-04-28 07:47:53 +0000
@@ -297,7 +297,10 @@ Dbtc::Dbtc(Block_context& ctx, Uint32 in
   addRecSignal(GSN_ALTER_TAB_REQ, &Dbtc::execALTER_TAB_REQ);
   addRecSignal(GSN_ROUTE_ORD, &Dbtc::execROUTE_ORD);
   addRecSignal(GSN_TCKEY_FAILREFCONF_R, &Dbtc::execTCKEY_FAILREFCONF_R);
-  
+
+  addRecSignal(GSN_FIRE_TRIG_REF, &Dbtc::execFIRE_TRIG_REF);
+  addRecSignal(GSN_FIRE_TRIG_CONF, &Dbtc::execFIRE_TRIG_CONF);
+
   cacheRecord = 0;
   apiConnectRecord = 0;
   tcConnectRecord = 0;
@@ -334,6 +337,7 @@ Dbtc::Dbtc(Block_context& ctx, Uint32 in
   c_apiConTimer = 0;
   c_apiConTimer_line = 0;
   csystemStart = SSS_FALSE;
+  m_deferred_enabled = ~Uint32(0);
 }//Dbtc::Dbtc()
 
 Dbtc::~Dbtc() 

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-04-27 11:50:17 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-04-29 09:23:56 +0000
@@ -342,6 +342,19 @@ void Dbtc::execCONTINUEB(Signal* signal)
     ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
     sendtckeyconf(signal, Tdata1);
     return;
+  case TcContinueB::ZSEND_FIRE_TRIG_REQ:
+    jam();
+    apiConnectptr.i = Tdata0;
+    ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+    if (unlikely(! (apiConnectptr.p->transid[0] == Tdata1 &&
+                    apiConnectptr.p->transid[1] == Tdata2 &&
+                    apiConnectptr.p->apiConnectstate == CS_SEND_FIRE_TRIG_REQ)))
+    {
+      warningReport(signal, 29);
+      return;
+    }
+    sendFireTrigReq(signal, apiConnectptr, signal->theData[4]);
+    return;
   default:
     ndbrequire(false);
   }//switch
@@ -392,6 +405,16 @@ void Dbtc::execINCL_NODEREQ(Signal* sign
   }
 
   sendSignal(tblockref, GSN_INCL_NODECONF, signal, 2, JBB);
+
+  if (m_deferred_enabled)
+  {
+    jam();
+    if (!ndbd_deferred_unique_constraints(getNodeInfo(Tnode).m_version))
+    {
+      jam();
+      m_deferred_enabled = 0;
+    }
+  }
 }
 
 void Dbtc::execREAD_NODESREF(Signal* signal) 
@@ -871,6 +894,11 @@ void Dbtc::execREAD_NODESCONF(Signal* si
           jam();
           hostptr.p->hostLqhBlockRef = numberToRef(DBLQH, i);
         }
+        if (!ndbd_deferred_unique_constraints(getNodeInfo(i).m_version))
+        {
+          jam();
+          m_deferred_enabled = 0;
+        }
       }//if
     }//if
   }//for
@@ -2347,6 +2375,7 @@ void Dbtc::initApiConnectRec(Signal* sig
   regApiPtr->currSavePointId = 0;
   regApiPtr->m_transaction_nodes.clear();
   regApiPtr->singleUserMode = 0;
+  regApiPtr->m_pre_commit_pass = 0;
   // Trigger data
   releaseFiredTriggerData(&regApiPtr->theFiredTriggers);
   // Index data
@@ -2357,6 +2386,8 @@ void Dbtc::initApiConnectRec(Signal* sig
     releaseAllSeizedIndexOperations(regApiPtr);
   regApiPtr->immediateTriggerId = RNIL;
 
+  tc_clearbit(regApiPtr->m_flags,
+              ApiConnectRecord::TF_DEFERRED_CONSTRAINTS);
   c_counters.ctransCount++;
 
 #ifdef ERROR_INSERT
@@ -2397,8 +2428,6 @@ Dbtc::seizeTcRecord(Signal* signal)
 
   regTcPtr->prevTcConnect = TlastTcConnect;
   regTcPtr->nextTcConnect = RNIL;
-  regTcPtr->accumulatingTriggerData.i = RNIL;  
-  regTcPtr->accumulatingTriggerData.p = NULL;  
   regTcPtr->noFiredTriggers = 0;
   regTcPtr->noReceivedTriggers = 0;
   regTcPtr->triggerExecutionCount = 0;
@@ -2638,6 +2667,8 @@ void Dbtc::execTCKEYREQ(Signal* signal) 
     }//if
     break;
   case CS_START_COMMITTING:
+  case CS_SEND_FIRE_TRIG_REQ:
+  case CS_WAIT_FIRE_TRIG_REQ:
     jam();
     if(isIndexOpReturn || isExecutingTrigger){
       break;
@@ -2760,6 +2791,11 @@ void Dbtc::execTCKEYREQ(Signal* signal) 
     SegmentedSectionPtr attrInfoSec;
     if (handle.getSection(attrInfoSec, TcKeyReq::AttrInfoSectionNum))
       TattrLen= attrInfoSec.sz;
+
+    if (TcKeyReq::getDeferredConstraints(Treqinfo))
+    {
+      regApiPtr->m_flags |= ApiConnectRecord::TF_DEFERRED_CONSTRAINTS;
+    }
   }
   else
   {
@@ -3056,7 +3092,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) 
       jam();
       // Trigger execution at commit
       regApiPtr->apiConnectstate = CS_REC_COMMITTING;
-    } else {
+    } else if (!regApiPtr->isExecutingDeferredTriggers()) {
       jam();
       regApiPtr->apiConnectstate = CS_RECEIVING;
     }//if
@@ -3352,6 +3388,10 @@ void Dbtc::tckeyreq050Lab(Signal* signal
       jam();
       regApiPtr->apiConnectstate = CS_START_COMMITTING;
       break;
+    case CS_SEND_FIRE_TRIG_REQ:
+    case CS_WAIT_FIRE_TRIG_REQ:
+      jam();
+      break;
     default:
       jam();
       systemErrorLab(signal, __LINE__);
@@ -3433,16 +3473,7 @@ void Dbtc::attrinfoDihReceivedLab(Signal
       TcConnectRecordPtr opPtr;
       opPtr.i = trigOp;
       ptrCheckGuard(opPtr, ctcConnectFilesize, tcConnectRecord);
-      opPtr.p->triggerExecutionCount--;
-      if (opPtr.p->triggerExecutionCount == 0)
-      {
-        /**
-         * We have completed current trigger execution
-         * Continue triggering operation
-         */
-        jam();
-        continueTriggeringOp(signal, opPtr.p);
-      }
+      trigger_op_finished(signal, apiConnectptr, opPtr.p);
       return;
     }
     else
@@ -3523,6 +3554,8 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
     }//if
   }//if
 #endif
+  Uint32 Tdeferred = tc_testbit(regApiPtr->m_flags,
+                                ApiConnectRecord::TF_DEFERRED_CONSTRAINTS);
   Uint32 reorg = 0;
   Uint32 Tspecial_op = regTcPtr->m_special_op_flags;
   if (Tspecial_op == 0)
@@ -3589,6 +3622,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
   LqhKeyReq::setOperation(Tdata10, sig1);
   LqhKeyReq::setNoDiskFlag(Tdata10, regCachePtr->m_no_disk_flag);
   LqhKeyReq::setQueueOnRedoProblemFlag(Tdata10, regCachePtr->m_op_queue);
+  LqhKeyReq::setDeferredConstraints(Tdata10, (Tdeferred & m_deferred_enabled));
 
   /* ----------------------------------------------------------------------- 
    * If we are sending a short LQHKEYREQ, then there will be some AttrInfo
@@ -3669,8 +3703,6 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
   }//if
 
   // Reset trigger count
-  regTcPtr->accumulatingTriggerData.i = RNIL;  
-  regTcPtr->accumulatingTriggerData.p = NULL;  
   regTcPtr->noFiredTriggers = 0;
   regTcPtr->triggerExecutionCount = 0;
 
@@ -3994,6 +4026,13 @@ void Dbtc::execPACKED_SIGNAL(Signal* sig
       execLQHKEYCONF(signal);
       Tstep += LqhKeyConf::SignalLength;
       break;
+    case ZFIRE_TRIG_CONF:
+      jam();
+      signal->header.theLength = 4;
+      signal->theData[3] = TpackDataPtr[3];
+      execFIRE_TRIG_CONF(signal);
+      Tstep += 4;
+      break;
     default:
       systemErrorLab(signal, __LINE__);
       return;
@@ -4131,7 +4170,8 @@ void Dbtc::execSIGNAL_DROPPED_REP(Signal
 
 void Dbtc::execLQHKEYCONF(Signal* signal) 
 {
-  const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+  const LqhKeyConf * lqhKeyConf = CAST_CONSTPTR(LqhKeyConf,
+                                                signal->getDataPtr());
 #ifdef UNUSED
   ndbout << "TC: Received LQHKEYCONF"
          << " transId1=" << lqhKeyConf-> transId1
@@ -4186,7 +4226,8 @@ void Dbtc::execLQHKEYCONF(Signal* signal
   UintR TapiConnectFilesize = capiConnectFilesize;
   UintR Ttrans1 = lqhKeyConf->transId1;
   UintR Ttrans2 = lqhKeyConf->transId2;
-  Uint32 noFired = lqhKeyConf->noFiredTriggers;
+  Uint32 noFired = LqhKeyConf::getFiredCount(lqhKeyConf->noFiredTriggers);
+  Uint32 deferred = LqhKeyConf::getDeferredBit(lqhKeyConf->noFiredTriggers);
 
   if (TapiConnectptrIndex >= TapiConnectFilesize) {
     TCKEY_abort(signal, 29);
@@ -4242,6 +4283,10 @@ void Dbtc::execLQHKEYCONF(Signal* signal
   regTcPtr->lastLqhCon = tlastLqhConnect;
   regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref);
   regTcPtr->noFiredTriggers = noFired;
+  regTcPtr->m_special_op_flags |= (deferred) ?
+    TcConnectRecord::SOF_DEFERRED_TRIGGER : 0;
+  regApiPtr.p->m_flags |= (deferred) ?
+    ApiConnectRecord::TF_DEFERRED_TRIGGERS : 0;
 
   UintR Ttckeyrec = (UintR)regApiPtr.p->tckeyrec;
   UintR TclientData = regTcPtr->clientData;
@@ -4249,10 +4294,7 @@ void Dbtc::execLQHKEYCONF(Signal* signal
   Uint32 TopSimple = regTcPtr->opSimple;
   Uint32 Toperation = regTcPtr->operation;
   ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate;
-  if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
-    TCKEY_abort(signal, 30);
-    return;
-  }
+
   if (TapiConnectstate == CS_ABORTING) {
     warningReport(signal, 27);
     return;
@@ -4292,6 +4334,12 @@ void Dbtc::execLQHKEYCONF(Signal* signal
   } else {
     if (noFired == 0 && regTcPtr->triggeringOperation == RNIL) {
       jam();
+
+      if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
+        TCKEY_abort(signal, 30);
+        return;
+      }
+
       /*
        * Skip counting triggering operations the first round
        * since they will enter execLQHKEYCONF a second time
@@ -4405,7 +4453,8 @@ void Dbtc::execLQHKEYCONF(Signal* signal
   /**
    * And now decide what to do next
    */
-  if (regTcPtr->triggeringOperation != RNIL) {
+  if (regTcPtr->triggeringOperation != RNIL &&
+      !regApiPtr.p->isExecutingDeferredTriggers()) {
     jam();
     // This operation was created by a trigger execting operation
     // Restart it if we have executed all it's triggers
@@ -4413,15 +4462,7 @@ void Dbtc::execLQHKEYCONF(Signal* signal
 
     opPtr.i = regTcPtr->triggeringOperation;
     ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
-    opPtr.p->triggerExecutionCount--;
-    if (opPtr.p->triggerExecutionCount == 0) {
-      /*
-      We have completed current trigger execution
-      Continue triggering operation
-      */
-      jam();
-      continueTriggeringOp(signal, opPtr.p);
-    }
+    trigger_op_finished(signal, regApiPtr, opPtr.p);
   } else if (noFired == 0) {
     // This operation did not fire any triggers, finish operation
     jam();
@@ -4478,6 +4519,7 @@ Dbtc::lqhKeyConf_checkTransactionState(S
   UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
   int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec;
 
+  apiConnectptr = regApiPtr;
   switch (TapiConnectstate) {
   case CS_START_COMMITTING:
     if (TnoOfOutStanding == 0) {
@@ -4554,6 +4596,17 @@ Dbtc::lqhKeyConf_checkTransactionState(S
 /*---------------------------------------------------------------*/
     regApiPtr.p->tckeyrec = 0;
     return;
+  case CS_SEND_FIRE_TRIG_REQ:
+    return;
+  case CS_WAIT_FIRE_TRIG_REQ:
+    if (TnoOfOutStanding == 0 && regApiPtr.p->pendingTriggers == 0)
+    {
+      jam();
+      regApiPtr.p->apiConnectstate = CS_START_COMMITTING;
+      diverify010Lab(signal);
+      return;
+    }
+    return;
   default:
     TCKEY_abort(signal, 46);
     return;
@@ -4801,6 +4854,19 @@ void Dbtc::diverify010Lab(Signal* signal
     systemErrorLab(signal, __LINE__);
   }//if
 
+  if (tc_testbit(regApiPtr->m_flags, ApiConnectRecord::TF_DEFERRED_TRIGGERS))
+  {
+    jam();
+    /**
+     * If trans has deferred triggers, let them fire just before
+     *   transaction starts to commit
+     */
+    regApiPtr->pendingTriggers = 0;
+    tc_clearbit(regApiPtr->m_flags, ApiConnectRecord::TF_DEFERRED_TRIGGERS);
+    sendFireTrigReq(signal, apiConnectptr, regApiPtr->firstTcConnect);
+    return;
+  }
+
   if (regApiPtr->lqhkeyreqrec)
   {
     if (TfirstfreeApiConnectCopy != RNIL) {
@@ -5585,6 +5651,237 @@ Dbtc::sendCompleteLqh(Signal* signal,
 }
 
 void
+Dbtc::sendFireTrigReq(Signal* signal,
+                      Ptr<ApiConnectRecord> regApiPtr,
+                      Uint32 TopPtrI)
+{
+  UintR TtcConnectFilesize = ctcConnectFilesize;
+  TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+  TcConnectRecordPtr localTcConnectptr;
+
+  setApiConTimer(regApiPtr.i, ctcTimer, __LINE__);
+  regApiPtr.p->apiConnectstate = CS_SEND_FIRE_TRIG_REQ;
+
+  localTcConnectptr.i = TopPtrI;
+  ndbassert(TopPtrI != RNIL);
+  Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
+  Uint32 pass = regApiPtr.p->m_pre_commit_pass;
+  for (Uint32 i = 0; localTcConnectptr.i != RNIL && i < 16; i++)
+  {
+    ptrCheckGuard(localTcConnectptr,
+                  TtcConnectFilesize, localTcConnectRecord);
+
+    const Uint32 nextTcConnect = localTcConnectptr.p->nextTcConnect;
+    Uint32 flags = localTcConnectptr.p->m_special_op_flags;
+    if (flags & TcConnectRecord::SOF_DEFERRED_TRIGGER)
+    {
+      jam();
+      tc_clearbit(flags, TcConnectRecord::SOF_DEFERRED_TRIGGER);
+      ndbrequire(localTcConnectptr.p->tcConnectstate == OS_PREPARED);
+      localTcConnectptr.p->tcConnectstate = OS_FIRE_TRIG_REQ;
+      localTcConnectptr.p->m_special_op_flags = flags;
+      i += sendFireTrigReqLqh(signal, localTcConnectptr, pass);
+      Tlqhkeyreqrec++;
+    }
+    localTcConnectptr.i = nextTcConnect;
+  }
+
+  regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec;
+  if (localTcConnectptr.i == RNIL)
+  {
+    /**
+     * Now wait for FIRE_TRIG_CONF
+     */
+    jam();
+    regApiPtr.p->apiConnectstate = CS_WAIT_FIRE_TRIG_REQ;
+    ndbrequire(pass < 255);
+    regApiPtr.p->m_pre_commit_pass = (Uint8)(pass + 1);
+    return;
+  }
+  else
+  {
+    jam();
+    signal->theData[0] = TcContinueB::ZSEND_FIRE_TRIG_REQ;
+    signal->theData[1] = regApiPtr.i;
+    signal->theData[2] = regApiPtr.p->transid[0];
+    signal->theData[3] = regApiPtr.p->transid[1];
+    signal->theData[4] = localTcConnectptr.i;
+    if (ERROR_INSERTED_CLEAR(8090))
+    {
+      sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 5);
+    }
+    else
+    {
+      sendSignal(cownref, GSN_CONTINUEB, signal, 5, JBB);
+    }
+  }
+}
+
+Uint32
+Dbtc::sendFireTrigReqLqh(Signal* signal,
+                         Ptr<TcConnectRecord> regTcPtr,
+                         Uint32 pass)
+{
+  HostRecordPtr Thostptr;
+  UintR ThostFilesize = chostFilesize;
+  ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+  Thostptr.i = regTcPtr.p->tcNodedata[0];
+  ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
+
+  Uint32 Tnode = Thostptr.i;
+  Uint32 self = getOwnNodeId();
+  Uint32 ret = (Tnode == self) ? 4 : 1;
+
+  Uint32 Tdata[FireTrigReq::SignalLength];
+  FireTrigReq * req = CAST_PTR(FireTrigReq, Tdata);
+  req->tcOpRec = regTcPtr.i;
+  req->transId[0] = regApiPtr->transid[0];
+  req->transId[1] = regApiPtr->transid[1];
+  req->pass = pass;
+  Uint32 len = FireTrigReq::SignalLength;
+
+  // currently packed signal cannot address specific instance
+  const bool send_unpacked = getNodeInfo(Thostptr.i).m_lqh_workers > 1;
+  if (send_unpacked) {
+    memcpy(signal->theData, Tdata, len << 2);
+    Uint32 instanceKey = regTcPtr.p->lqhInstanceKey;
+    BlockReference lqhRef = numberToRef(DBLQH, instanceKey, Tnode);
+    sendSignal(lqhRef, GSN_FIRE_TRIG_REQ, signal, len, JBB);
+    return ret;
+  }
+
+  if (Thostptr.p->noOfPackedWordsLqh > 25 - len) {
+    jam();
+    sendPackedSignalLqh(signal, Thostptr.p);
+  } else {
+    jam();
+    ret = 1;
+    updatePackedList(signal, Thostptr.p, Thostptr.i);
+  }
+
+  Tdata[0] |= (ZFIRE_TRIG_REQ << 28);
+  UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
+  UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
+  memcpy(TDataPtr, Tdata, len << 2);
+  Thostptr.p->noOfPackedWordsLqh = Tindex + len;
+  return ret;
+}
+
+void
+Dbtc::execFIRE_TRIG_CONF(Signal* signal)
+{
+  TcConnectRecordPtr localTcConnectptr;
+  ApiConnectRecordPtr regApiPtr;
+
+  UintR TtcConnectFilesize = ctcConnectFilesize;
+  UintR TapiConnectFilesize = capiConnectFilesize;
+  TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+  ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+  const FireTrigConf * conf = CAST_CONSTPTR(FireTrigConf, signal->theData);
+  localTcConnectptr.i = conf->tcOpRec;
+  jamEntry();
+  ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+  regApiPtr.i = localTcConnectptr.p->apiConnect;
+  if (localTcConnectptr.p->tcConnectstate != OS_FIRE_TRIG_REQ)
+  {
+    warningReport(signal, 28);
+    return;
+  }//if
+  ptrCheckGuard(regApiPtr, TapiConnectFilesize,
+                localApiConnectRecord);
+
+  Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
+  Uint32 TapiConnectstate = regApiPtr.p->apiConnectstate;
+  UintR Tdata1 = regApiPtr.p->transid[0] - conf->transId[0];
+  UintR Tdata2 = regApiPtr.p->transid[1] - conf->transId[1];
+  Uint32 TcheckCondition =
+    (TapiConnectstate != CS_SEND_FIRE_TRIG_REQ) &&
+    (TapiConnectstate != CS_WAIT_FIRE_TRIG_REQ);
+
+  Tdata1 = Tdata1 | Tdata2 | TcheckCondition;
+
+  if (Tdata1 != 0) {
+    warningReport(signal, 28);
+    return;
+  }//if
+
+  if (ERROR_INSERTED_CLEAR(8091))
+  {
+    jam();
+    return;
+  }
+
+  CRASH_INSERTION(8092);
+
+  setApiConTimer(regApiPtr.i, ctcTimer, __LINE__);
+  ndbassert(Tlqhkeyreqrec > 0);
+  regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1;
+  localTcConnectptr.p->tcConnectstate = OS_PREPARED;
+
+  Uint32 noFired  = FireTrigConf::getFiredCount(conf->noFiredTriggers);
+  Uint32 deferred = FireTrigConf::getDeferredBit(conf->noFiredTriggers);
+
+  regApiPtr.p->pendingTriggers += noFired;
+  regApiPtr.p->m_flags |= (deferred) ?
+    ApiConnectRecord::TF_DEFERRED_TRIGGERS : 0;
+  localTcConnectptr.p->m_special_op_flags |= (deferred) ?
+    TcConnectRecord::SOF_DEFERRED_TRIGGER : 0;
+
+  if (regApiPtr.p->pendingTriggers == 0)
+  {
+    jam();
+    lqhKeyConf_checkTransactionState(signal, regApiPtr);
+  }
+}
+
+void
+Dbtc::execFIRE_TRIG_REF(Signal* signal)
+{
+  TcConnectRecordPtr localTcConnectptr;
+  ApiConnectRecordPtr regApiPtr;
+
+  UintR TtcConnectFilesize = ctcConnectFilesize;
+  UintR TapiConnectFilesize = capiConnectFilesize;
+  TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+  ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+  const FireTrigRef * ref = CAST_CONSTPTR(FireTrigRef, signal->theData);
+  localTcConnectptr.i = ref->tcOpRec;
+  jamEntry();
+  ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+  regApiPtr.i = localTcConnectptr.p->apiConnect;
+  if (localTcConnectptr.p->tcConnectstate != OS_FIRE_TRIG_REQ)
+  {
+    warningReport(signal, 28);
+    return;
+  }//if
+  ptrCheckGuard(regApiPtr, TapiConnectFilesize,
+                localApiConnectRecord);
+
+  apiConnectptr = regApiPtr;
+
+  UintR Tdata1 = regApiPtr.p->transid[0] - ref->transId[0];
+  UintR Tdata2 = regApiPtr.p->transid[1] - ref->transId[1];
+  Tdata1 = Tdata1 | Tdata2;
+  if (Tdata1 != 0) {
+    warningReport(signal, 28);
+    return;
+  }//if
+
+  if (regApiPtr.p->apiConnectstate != CS_SEND_FIRE_TRIG_REQ &&
+      regApiPtr.p->apiConnectstate != CS_WAIT_FIRE_TRIG_REQ)
+  {
+    jam();
+    warningReport(signal, 28);
+    return;
+  }
+
+  terrorCode = ref->errCode;
+  abortErrorLab(signal);
+}
+
+void
 Dbtc::execTC_COMMIT_ACK(Signal* signal){
   jamEntry();
 
@@ -5968,19 +6265,16 @@ void Dbtc::execLQHKEYREF(Signal* signal)
          */
         regApiPtr->lqhkeyreqrec--;
 
+        /**
+         * An failing op in LQH, never leaves the commit ack marker around
+         * TODO: This can be bug in ordinary code too!!!
+         */
+        clearCommitAckMarker(regApiPtr, regTcPtr);
+
         unlinkReadyTcCon(signal);
         releaseTcCon();
 
-        opPtr.p->triggerExecutionCount--;
-        if (opPtr.p->triggerExecutionCount == 0)
-        {
-          /**
-           * We have completed current trigger execution
-           * Continue triggering operation
-           */
-          jam();
-          continueTriggeringOp(signal, opPtr.p);
-        }
+        trigger_op_finished(signal, apiConnectptr, opPtr.p);
         return;
       }
       
@@ -6571,6 +6865,18 @@ void Dbtc::warningReport(Signal* signal,
     ndbout << "Received LQHKEYCONF in wrong api-state in Dbtc" << endl;
 #endif
     break;
+  case 28:
+    jam();
+#ifdef ABORT_TRACE
+    ndbout << "Discarding FIRE_TRIG_REF/CONF in Dbtc" << endl;
+#endif
+    break;
+  case 29:
+    jam();
+#ifdef ABORT_TRACE
+    ndbout << "Discarding TcContinueB::ZSEND_FIRE_TRIG_REQ in Dbtc" << endl;
+#endif
+    break;
   default:
     jam();
     break;
@@ -6821,6 +7127,8 @@ ABORT020:
     jam();
   case OS_OPERATING:
     jam();
+  case OS_FIRE_TRIG_REQ:
+    jam();
     /*----------------------------------------------------------------------
      * WE HAVE SENT LQHKEYREQ AND ARE IN SOME STATE OF EITHER STILL       
      * SENDING THE OPERATION, WAITING FOR REPLIES, WAITING FOR MORE       
@@ -7166,6 +7474,8 @@ void Dbtc::timeOutFoundLab(Signal* signa
   case CS_RECEIVING:
   case CS_REC_COMMITTING:
   case CS_START_COMMITTING:
+  case CS_WAIT_FIRE_TRIG_REQ:
+  case CS_SEND_FIRE_TRIG_REQ:
     jam();
     /*------------------------------------------------------------------*/
     /*       WE ARE STILL IN THE PREPARE PHASE AND THE TRANSACTION HAS  */
@@ -8090,6 +8400,28 @@ void Dbtc::execNODE_FAILREP(Signal* sign
                   myHostPtr.i};
     simBlockNodeFailure(signal, myHostPtr.i, cb);
   }
+
+  if (m_deferred_enabled == 0)
+  {
+    jam();
+    Uint32 ok = 1;
+    for(Uint32 n = c_alive_nodes.find_first();
+        n != c_alive_nodes.NotFound;
+        n = c_alive_nodes.find_next(n))
+    {
+      if (!ndbd_deferred_unique_constraints(getNodeInfo(n).m_version))
+      {
+        jam();
+        ok = 0;
+        break;
+      }
+    }
+    if (ok)
+    {
+      jam();
+      m_deferred_enabled = ~Uint32(0);
+    }
+  }
 }//Dbtc::execNODE_FAILREP()
 
 void
@@ -13468,11 +13800,12 @@ void Dbtc::execFIRE_TRIG_ORD(Signal* sig
     {
       jam();      
       opPtr.p->noReceivedTriggers++;
-      opPtr.p->triggerExecutionCount++;
+      opPtr.p->triggerExecutionCount++; // Default 1 LQHKEYREQ per trigger
 
       // Insert fired trigger in execution queue
       transPtr.p->theFiredTriggers.add(trigPtr);
-      if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
+      if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers ||
+          transPtr.p->isExecutingDeferredTriggers()) {
 	executeTriggers(signal, &transPtr);
       }
       return;
@@ -14788,6 +15121,31 @@ void Dbtc::saveTriggeringOpState(Signal*
                 LqhKeyConf::SignalLength);  
 }
 
+void
+Dbtc::trigger_op_finished(Signal* signal, ApiConnectRecordPtr regApiPtr,
+                          TcConnectRecord* triggeringOp)
+{
+  if (!regApiPtr.p->isExecutingDeferredTriggers())
+  {
+    ndbassert(triggeringOp->triggerExecutionCount > 0);
+    triggeringOp->triggerExecutionCount--;
+    if (triggeringOp->triggerExecutionCount == 0)
+    {
+      /**
+       * We have completed current trigger execution
+       * Continue triggering operation
+       */
+      jam();
+      continueTriggeringOp(signal, triggeringOp);
+    }
+  }
+  else
+  {
+    jam();
+    lqhKeyConf_checkTransactionState(signal, regApiPtr);
+  }
+}
+
 void Dbtc::continueTriggeringOp(Signal* signal, TcConnectRecord* trigOp)
 {
   LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
@@ -14795,6 +15153,9 @@ void Dbtc::continueTriggeringOp(Signal* 
                 (UintR*)lqhKeyConf,
 		LqhKeyConf::SignalLength);
 
+  ndbassert(trigOp->savedState[LqhKeyConf::SignalLength-1] != ~Uint32(0));
+  trigOp->savedState[LqhKeyConf::SignalLength-1] = ~Uint32(0);
+
   lqhKeyConf->noFiredTriggers = 0;
   trigOp->noReceivedTriggers = 0;
 
@@ -14802,18 +15163,6 @@ void Dbtc::continueTriggeringOp(Signal* 
   execLQHKEYCONF(signal);
 }
 
-void Dbtc::scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
-                                TcConnectRecordPtr* opPtr)
-{
-  // Set initial values for trigger fireing operation
-  opPtr->p->triggerExecutionCount++;
-
-  // Insert fired trigger in execution queue
-  transPtr->p->theFiredTriggers.add(opPtr->p->accumulatingTriggerData);
-  opPtr->p->accumulatingTriggerData.i = RNIL;
-  opPtr->p->accumulatingTriggerData.p = NULL;
-}
-
 void Dbtc::executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr)
 {
   ApiConnectRecord* regApiPtr = transPtr->p;
@@ -14824,7 +15173,10 @@ void Dbtc::executeTriggers(Signal* signa
   if (!regApiPtr->theFiredTriggers.isEmpty()) {
     jam();
     if ((regApiPtr->apiConnectstate == CS_STARTED) ||
-        (regApiPtr->apiConnectstate == CS_START_COMMITTING)) {
+        (regApiPtr->apiConnectstate == CS_START_COMMITTING) ||
+        (regApiPtr->apiConnectstate == CS_SEND_FIRE_TRIG_REQ) ||
+        (regApiPtr->apiConnectstate == CS_WAIT_FIRE_TRIG_REQ))
+    {
       jam();
       regApiPtr->theFiredTriggers.first(trigPtr);
       while (trigPtr.i != RNIL) {
@@ -14834,7 +15186,8 @@ void Dbtc::executeTriggers(Signal* signa
         ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
 	FiredTriggerPtr nextTrigPtr = trigPtr;
 	regApiPtr->theFiredTriggers.next(nextTrigPtr);
-        if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
+        if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers ||
+            regApiPtr->isExecutingDeferredTriggers()) {
           jam();
           // Fireing operation is ready to have a trigger executing
           executeTrigger(signal, trigPtr.p, transPtr, &opPtr);
@@ -14903,6 +15256,7 @@ void Dbtc::executeTrigger(Signal* signal
        c_theDefinedTriggers.getPtr(firedTriggerData->triggerId)) 
       != NULL)
   {
+    transPtr->p->pendingTriggers--;
     switch(firedTriggerData->triggerType) {
     case(TriggerType::SECONDARY_INDEX):
       jam();
@@ -14942,8 +15296,8 @@ void Dbtc::executeIndexTrigger(Signal* s
   }
   case(TriggerEvent::TE_UPDATE): {
     jam();
-    deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, 
-			 indexData, true); // Hold the triggering operation
+    opPtr->p->triggerExecutionCount++; // One is already added...and this is 2
+    deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
     insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
     break;
   }
@@ -15066,8 +15420,7 @@ void Dbtc::insertIntoIndexTable(Signal* 
                                 TcFiredTriggerData* firedTriggerData, 
                                 ApiConnectRecordPtr* transPtr,
                                 TcConnectRecordPtr* opPtr,
-                                TcIndexData* indexData,
-                                bool holdOperation)
+                                TcIndexData* indexData)
 {
   ApiConnectRecord* regApiPtr = transPtr->p;
   TcConnectRecord* opRecord = opPtr->p;
@@ -15081,10 +15434,6 @@ void Dbtc::insertIntoIndexTable(Signal* 
   ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
   tcKeyReq->apiConnectPtr = transPtr->i;
   tcKeyReq->senderData = opPtr->i;
-  if (holdOperation) {
-    jam();
-    opRecord->triggerExecutionCount++;
-  }//if
 
   /* Key for insert to unique index table is the afterValues from the
    * base table operation (from update or insert on base).
@@ -15097,6 +15446,15 @@ void Dbtc::insertIntoIndexTable(Signal* 
   LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues);
   LocalDataBuffer<11> keyValues(pool, firedTriggerData->keyValues);
 
+  if (afterValues.getSize() == 0)
+  {
+    jam();
+    ndbrequire(tc_testbit(regApiPtr->m_flags,
+                          ApiConnectRecord::TF_DEFERRED_CONSTRAINTS));
+    trigger_op_finished(signal, *transPtr, opRecord);
+    return;
+  }
+
   Uint32 keyIVal= RNIL;
   Uint32 attrIVal= RNIL;
   bool appendOk= false;
@@ -15129,15 +15487,7 @@ void Dbtc::insertIntoIndexTable(Signal* 
     {
       jam();
       releaseSection(keyIVal);
-      opRecord->triggerExecutionCount--;
-      if (opRecord->triggerExecutionCount == 0) {
-        /*
-          We have completed current trigger execution
-          Continue triggering operation
-        */
-        jam();
-        continueTriggeringOp(signal, opRecord);	
-      }//if
+      trigger_op_finished(signal, *transPtr, opRecord);
       return;
     }
     
@@ -15231,8 +15581,7 @@ void Dbtc::deleteFromIndexTable(Signal* 
                                 TcFiredTriggerData* firedTriggerData, 
                                 ApiConnectRecordPtr* transPtr,
                                 TcConnectRecordPtr* opPtr,
-                                TcIndexData* indexData,
-                                bool holdOperation)
+                                TcIndexData* indexData)
 {
   ApiConnectRecord* regApiPtr = transPtr->p;
   TcConnectRecord* opRecord = opPtr->p;
@@ -15244,10 +15593,7 @@ void Dbtc::deleteFromIndexTable(Signal* 
   ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
   tcKeyReq->apiConnectPtr = transPtr->i;
   tcKeyReq->senderData = opPtr->i;
-  if (holdOperation) {
-    jam();
-    opRecord->triggerExecutionCount++;
-  }//if
+
   // Calculate key length and renumber attribute id:s
   AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
   LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues);
@@ -15255,7 +15601,16 @@ void Dbtc::deleteFromIndexTable(Signal* 
   Uint32 keyIVal= RNIL;
   Uint32 attrId= 0;
   bool hasNull= false;
-  
+
+  if (beforeValues.getSize() == 0)
+  {
+    jam();
+    ndbrequire(tc_testbit(regApiPtr->m_flags,
+                          ApiConnectRecord::TF_DEFERRED_CONSTRAINTS));
+    trigger_op_finished(signal, *transPtr, opRecord);
+    return;
+  }
+
   /* Build Delete KeyInfo section from beforevalues */
   if (unlikely((! appendAttrDataToSection(keyIVal,
                                           beforeValues,
@@ -15277,15 +15632,7 @@ void Dbtc::deleteFromIndexTable(Signal* 
   {
     jam();
     releaseSection(keyIVal);
-    opRecord->triggerExecutionCount--;
-    if (opRecord->triggerExecutionCount == 0) {
-      /*
-        We have completed current trigger execution
-        Continue triggering operation
-      */
-      jam();
-      continueTriggeringOp(signal, opRecord);	
-    }//if
+    trigger_op_finished(signal, *transPtr, opRecord);
     return;
   }
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-04-27 08:39:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-04-29 09:23:56 +0000
@@ -960,7 +960,10 @@ ArrayPool<TupTriggerData> c_triggerPool;
       subscriptionDeleteTriggers(triggerPool),
       subscriptionUpdateTriggers(triggerPool),
       constraintUpdateTriggers(triggerPool),
-      tuxCustomTriggers(triggerPool)
+      tuxCustomTriggers(triggerPool),
+      deferredInsertTriggers(triggerPool),
+      deferredDeleteTriggers(triggerPool),
+      deferredUpdateTriggers(triggerPool)
       {}
     
     Bitmask<MAXNROFATTRIBUTESINWORDS> notNullAttributeMask;
@@ -1090,7 +1093,10 @@ ArrayPool<TupTriggerData> c_triggerPool;
     DLList<TupTriggerData> subscriptionDeleteTriggers;
     DLList<TupTriggerData> subscriptionUpdateTriggers;
     DLList<TupTriggerData> constraintUpdateTriggers;
-    
+    DLList<TupTriggerData> deferredInsertTriggers;
+    DLList<TupTriggerData> deferredUpdateTriggers;
+    DLList<TupTriggerData> deferredDeleteTriggers;
+
     // List of ordered indexes
     DLList<TupTriggerData> tuxCustomTriggers;
     
@@ -1505,19 +1511,31 @@ typedef Ptr<HostBuffer> HostBufferPtr;
     STATIC_CONST( SZ32 = 1 );
   };
 
+  enum When
+  {
+    KRS_PREPARE = 0,
+    KRS_COMMIT = 1,
+    KRS_PRE_COMMIT0 = 2, // There can be multiple pre commit phases...
+    KRS_PRE_COMMIT1 = 3
+  };
+
 struct KeyReqStruct {
 
-  KeyReqStruct(EmulatedJamBuffer * _jamBuffer) {
+  KeyReqStruct(EmulatedJamBuffer * _jamBuffer, When when = KRS_PREPARE) {
 #if defined VM_TRACE || defined ERROR_INSERT
     memset(this, 0xf3, sizeof(* this));
 #endif
     jamBuffer = _jamBuffer;
+    m_when = when;
+    m_deferred_constraints = true;
   }
-  KeyReqStruct(Dbtup* tup) {
+  KeyReqStruct(Dbtup* tup, When when = KRS_PREPARE) {
 #if defined VM_TRACE || defined ERROR_INSERT
     memset(this, 0xf3, sizeof(* this));
 #endif
     jamBuffer = tup->jamBuffer();
+    m_when = when;
+    m_deferred_constraints = true;
   }
   
 /**
@@ -1564,6 +1582,7 @@ struct KeyReqStruct {
   /* Flag: is tuple in expanded or in shrunken/stored format? */
   bool is_expanded;
   bool m_is_lcp;
+  enum When m_when;
 
   struct Var_data {
     /*
@@ -1611,6 +1630,7 @@ struct KeyReqStruct {
   bool            last_row;
   bool            m_use_rowid;
   Uint8           m_reorg;
+  bool            m_deferred_constraints;
 
   Signal*         signal;
   Uint32 no_fired_triggers;
@@ -1999,6 +2019,12 @@ private:
 //------------------------------------------------------------------
   void execDROP_TRIG_IMPL_REQ(Signal* signal);
 
+  /**
+   * Deferred triggers execute when execFIRE_TRIG_REQ
+   *   is called
+   */
+  void execFIRE_TRIG_REQ(Signal* signal);
+
 // *****************************************************************
 // Setting up the environment for reads, inserts, updates and deletes.
 // *****************************************************************
@@ -2584,11 +2610,11 @@ private:
                                     Tablerec* tablePtr,
                                     bool disk);
 
-#if 0
-  void checkDeferredTriggers(Signal* signal, 
+  void checkDeferredTriggers(KeyReqStruct *req_struct,
                              Operationrec* regOperPtr,
-                             Tablerec* regTablePtr);
-#endif
+                             Tablerec* regTablePtr,
+                             bool disk);
+
   void checkDetachedTriggers(KeyReqStruct *req_struct,
                              Operationrec* regOperPtr,
                              Tablerec* regTablePtr,
@@ -2599,9 +2625,19 @@ private:
                              Operationrec* regOperPtr,
                              bool disk);
 
+  void checkDeferredTriggersDuringPrepare(KeyReqStruct *req_struct,
+                                          DLList<TupTriggerData>& triggerList,
+                                          Operationrec* const regOperPtr,
+                                          bool disk);
   void fireDeferredTriggers(KeyReqStruct *req_struct,
                             DLList<TupTriggerData>& triggerList,
-                            Operationrec* regOperPtr);
+                            Operationrec* const regOperPtr,
+                            bool disk);
+
+  void fireDeferredConstraints(KeyReqStruct *req_struct,
+                               DLList<TupTriggerData>& triggerList,
+                               Operationrec* const regOperPtr,
+                               bool disk);
 
   void fireDetachedTriggers(KeyReqStruct *req_struct,
                             DLList<TupTriggerData>& triggerList,

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-04-21 05:38:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-04-28 07:47:53 +0000
@@ -580,7 +580,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* si
   FragrecordPtr regFragPtr;
   OperationrecPtr regOperPtr;
   TablerecPtr regTabPtr;
-  KeyReqStruct req_struct(this);
+  KeyReqStruct req_struct(this, KRS_COMMIT);
   TransState trans_state;
   Uint32 no_of_fragrec, no_of_tablerec;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-04-27 08:39:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-04-29 09:23:56 +0000
@@ -597,9 +597,11 @@ void Dbtup::execTUPKEYREQ(Signal* signal
 
    sig1 = tupKeyReq->m_row_id_page_no;
    sig2 = tupKeyReq->m_row_id_page_idx;
+   sig3 = tupKeyReq->deferred_constraints;
 
    req_struct.m_row_id.m_page_no = sig1;
    req_struct.m_row_id.m_page_idx = sig2;
+   req_struct.m_deferred_constraints = sig3;
 
    /* Get AttrInfo section if this is a long TUPKEYREQ */
    Uint32 attrInfoIVal= tupKeyReq->attrInfoIVal;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-02-07 13:21:49 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-04-28 07:47:53 +0000
@@ -128,6 +128,8 @@ Dbtup::Dbtup(Block_context& ctx, Uint32 
   addRecSignal(GSN_DROP_FRAG_REQ, &Dbtup::execDROP_FRAG_REQ);
   addRecSignal(GSN_SUB_GCP_COMPLETE_REP, &Dbtup::execSUB_GCP_COMPLETE_REP);
 
+  addRecSignal(GSN_FIRE_TRIG_REQ, &Dbtup::execFIRE_TRIG_REQ);
+
   fragoperrec = 0;
   fragrecord = 0;
   alterTabOperRec = 0;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-04-28 07:47:53 +0000
@@ -32,6 +32,7 @@
 #include <signaldata/DropTrigImpl.hpp>
 #include <signaldata/TuxMaint.hpp>
 #include <signaldata/AlterIndxImpl.hpp>
+#include "../dblqh/Dblqh.hpp"
 
 /* **************************************************************** */
 /* ---------------------------------------------------------------- */
@@ -523,6 +524,93 @@ Dbtup::dropTrigger(Tablerec* table, cons
   return 0;
 }//Dbtup::dropTrigger()
 
+void
+Dbtup::execFIRE_TRIG_REQ(Signal* signal)
+{
+  jam();
+  Uint32 opPtrI = signal->theData[0];
+  Uint32 pass = signal->theData[5];
+
+  FragrecordPtr regFragPtr;
+  OperationrecPtr regOperPtr;
+  TablerecPtr regTabPtr;
+  KeyReqStruct req_struct(this, (When)(KRS_PRE_COMMIT0 + pass));
+
+  regOperPtr.i = opPtrI;
+
+  jamEntry();
+
+  c_operation_pool.getPtr(regOperPtr);
+
+  regFragPtr.i = regOperPtr.p->fragmentPtr;
+  Uint32 no_of_fragrec = cnoOfFragrec;
+  ptrCheckGuard(regFragPtr, no_of_fragrec, fragrecord);
+
+  TransState trans_state = get_trans_state(regOperPtr.p);
+  ndbrequire(trans_state == TRANS_STARTED);
+
+  Uint32 no_of_tablerec = cnoOfTablerec;
+  regTabPtr.i = regFragPtr.p->fragTableId;
+  ptrCheckGuard(regTabPtr, no_of_tablerec, tablerec);
+
+  req_struct.signal = signal;
+  req_struct.TC_ref = signal->theData[1];
+  req_struct.TC_index = signal->theData[2];
+  req_struct.trans_id1 = signal->theData[3];
+  req_struct.trans_id2 = signal->theData[4];
+
+  PagePtr page;
+  Tuple_header* tuple_ptr = (Tuple_header*)
+    get_ptr(&page, &regOperPtr.p->m_tuple_location, regTabPtr.p);
+  req_struct.m_tuple_ptr = tuple_ptr;
+
+  OperationrecPtr lastOperPtr;
+  lastOperPtr.i = tuple_ptr->m_operation_ptr_i;
+  c_operation_pool.getPtr(lastOperPtr);
+
+  /**
+   * Deferred triggers should fire only once per primary key (per pass)
+   *   regardless of no of DML on that primary key
+   *
+   * We keep track of this on *last* operation (which btw, implies that
+   *   a trigger can't update "own" tuple...i.e first op would be better...)
+   *
+   */
+  if (!c_lqh->check_fire_trig_pass(lastOperPtr.p->userpointer, pass))
+  {
+    jam();
+    signal->theData[0] = 0;
+    signal->theData[1] = 0;
+    return;
+  }
+
+  /**
+   * This is deferred triggers...
+   *   which is basically the same as detached,
+   *     i.e before value is <before transaction>
+   *     and after values is <after transaction>
+   *   with the difference that they execute (fire) while
+   *   still having a transaction context...
+   *   i.e can abort transactions, modify transaction
+   */
+  req_struct.no_fired_triggers = 0;
+
+  /**
+   * See DbtupCommit re "Setting the op-list has this effect"
+   */
+  Uint32 save[2] = { lastOperPtr.p->nextActiveOp, lastOperPtr.p->prevActiveOp };
+  lastOperPtr.p->nextActiveOp = RNIL;
+  lastOperPtr.p->prevActiveOp = RNIL;
+
+  checkDeferredTriggers(&req_struct, lastOperPtr.p, regTabPtr.p, false);
+
+  lastOperPtr.p->nextActiveOp = save[0];
+  lastOperPtr.p->prevActiveOp = save[1];
+
+  signal->theData[0] = 0;
+  signal->theData[1] = req_struct.no_fired_triggers;
+}
+
 /* ---------------------------------------------------------------- */
 /* -------------- checkImmediateTriggersAfterOp ------------------ */
 /*                                                                  */
@@ -542,13 +630,24 @@ Dbtup::checkImmediateTriggersAfterInsert
     return;
   }
 
-  if ((regOperPtr->op_struct.primary_replica) &&
-      (!(regTablePtr->afterInsertTriggers.isEmpty()))) {
-    jam();
-    fireImmediateTriggers(req_struct,
-                          regTablePtr->afterInsertTriggers,
-                          regOperPtr,
-                          disk);
+  if (regOperPtr->op_struct.primary_replica)
+  {
+    if (! regTablePtr->afterInsertTriggers.isEmpty())
+    {
+      jam();
+      fireImmediateTriggers(req_struct,
+                            regTablePtr->afterInsertTriggers,
+                            regOperPtr,
+                            disk);
+    }
+
+    if (! regTablePtr->deferredInsertTriggers.isEmpty())
+    {
+      checkDeferredTriggersDuringPrepare(req_struct,
+                                         regTablePtr->deferredInsertTriggers,
+                                         regOperPtr,
+                                         disk);
+    }
   }
 }
 
@@ -562,21 +661,34 @@ Dbtup::checkImmediateTriggersAfterUpdate
     return;
   }
 
-  if ((regOperPtr->op_struct.primary_replica) &&
-      (!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
-    jam();
-    fireImmediateTriggers(req_struct,
-                          regTablePtr->afterUpdateTriggers,
-                          regOperPtr,
-                          disk);
-  }
-  if ((regOperPtr->op_struct.primary_replica) &&
-      (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
-    jam();
-    fireImmediateTriggers(req_struct,
-                          regTablePtr->constraintUpdateTriggers,
-                          regOperPtr,
-                          disk);
+  if (regOperPtr->op_struct.primary_replica)
+  {
+    if (! regTablePtr->afterUpdateTriggers.isEmpty())
+    {
+      jam();
+      fireImmediateTriggers(req_struct,
+                            regTablePtr->afterUpdateTriggers,
+                            regOperPtr,
+                            disk);
+    }
+
+    if (! regTablePtr->constraintUpdateTriggers.isEmpty())
+    {
+      jam();
+      fireImmediateTriggers(req_struct,
+                            regTablePtr->constraintUpdateTriggers,
+                            regOperPtr,
+                            disk);
+    }
+
+    if (! regTablePtr->deferredUpdateTriggers.isEmpty())
+    {
+      jam();
+      checkDeferredTriggersDuringPrepare(req_struct,
+                                         regTablePtr->deferredUpdateTriggers,
+                                         regOperPtr,
+                                         disk);
+    }
   }
 }
 
@@ -590,17 +702,48 @@ Dbtup::checkImmediateTriggersAfterDelete
     return;
   }
 
-  if ((regOperPtr->op_struct.primary_replica) &&
-      (!(regTablePtr->afterDeleteTriggers.isEmpty()))) {
+  if (regOperPtr->op_struct.primary_replica)
+  {
+    if (! regTablePtr->afterDeleteTriggers.isEmpty())
+    {
+      fireImmediateTriggers(req_struct,
+                            regTablePtr->afterDeleteTriggers,
+                            regOperPtr,
+                            disk);
+    }
+
+    if (! regTablePtr->deferredDeleteTriggers.isEmpty())
+    {
+      checkDeferredTriggersDuringPrepare(req_struct,
+                                         regTablePtr->deferredDeleteTriggers,
+                                         regOperPtr,
+                                         disk);
+    }
+  }
+}
+
+void
+Dbtup::checkDeferredTriggersDuringPrepare(KeyReqStruct *req_struct,
+                                          DLList<TupTriggerData>& triggerList,
+                                          Operationrec* const regOperPtr,
+                                          bool disk)
+{
+  jam();
+  TriggerPtr trigPtr;
+  triggerList.first(trigPtr);
+  while (trigPtr.i != RNIL)
+  {
     jam();
-    executeTriggers(req_struct,
-                    regTablePtr->afterDeleteTriggers,
-                    regOperPtr,
-                    disk);
+    if (trigPtr.p->monitorAllAttributes ||
+        trigPtr.p->attributeMask.overlaps(req_struct->changeMask))
+    {
+      jam();
+      NoOfFiredTriggers::setDeferredBit(req_struct->no_fired_triggers);
+      return;
+    }
   }
 }
 
-#if 0
 /* ---------------------------------------------------------------- */
 /* --------------------- checkDeferredTriggers -------------------- */
 /*                                                                  */
@@ -610,14 +753,95 @@ Dbtup::checkImmediateTriggersAfterDelete
 /* Executes deferred triggers by sending FIRETRIGORD                */
 /*                                                                  */
 /* ---------------------------------------------------------------- */
-void Dbtup::checkDeferredTriggers(Signal* signal, 
-                                  Operationrec* const regOperPtr,
-                                  Tablerec* const regTablePtr)
+void Dbtup::checkDeferredTriggers(KeyReqStruct *req_struct,
+                                  Operationrec* regOperPtr,
+                                  Tablerec* regTablePtr,
+                                  bool disk)
 {
   jam();
-  // NYI
+  Uint32 save_type = regOperPtr->op_struct.op_type;
+  Tuple_header *save_ptr = req_struct->m_tuple_ptr;
+  DLList<TupTriggerData> * deferred_list = 0;
+  DLList<TupTriggerData> * constraint_list = 0;
+
+  switch (save_type) {
+  case ZUPDATE:
+  case ZINSERT:
+    req_struct->m_tuple_ptr =get_copy_tuple(&regOperPtr->m_copy_tuple_location);
+    break;
+  }
+
+  /**
+   * Set correct operation type and fix change mask
+   * Note ALLOC is set in "orig" tuple
+   */
+  if (save_ptr->m_header_bits & Tuple_header::ALLOC) {
+    if (save_type == ZDELETE) {
+      // insert + delete = nothing
+      jam();
+      return;
+      goto end;
+    }
+    regOperPtr->op_struct.op_type = ZINSERT;
+  }
+  else if (save_type == ZINSERT) {
+    /**
+     * Tuple was not created but last op is INSERT.
+     * This is possible only on DELETE + INSERT
+     */
+    regOperPtr->op_struct.op_type = ZUPDATE;
+  }
+
+  switch(regOperPtr->op_struct.op_type) {
+  case(ZINSERT):
+    jam();
+    deferred_list = &regTablePtr->deferredInsertTriggers;
+    constraint_list = &regTablePtr->afterInsertTriggers;
+    break;
+  case(ZDELETE):
+    jam();
+    deferred_list = &regTablePtr->deferredDeleteTriggers;
+    constraint_list = &regTablePtr->afterDeleteTriggers;
+    break;
+  case(ZUPDATE):
+    jam();
+    deferred_list = &regTablePtr->deferredUpdateTriggers;
+    constraint_list = &regTablePtr->afterUpdateTriggers;
+    break;
+  default:
+    ndbrequire(false);
+    break;
+  }
+
+  if (req_struct->m_deferred_constraints == false)
+  {
+    constraint_list = 0;
+  }
+
+  if (deferred_list->isEmpty() &&
+      (constraint_list == 0 || constraint_list->isEmpty()))
+  {
+    goto end;
+  }
+
+  /**
+   * Compute change-mask
+   */
+  set_commit_change_mask_info(regTablePtr, req_struct, regOperPtr);
+  if (!deferred_list->isEmpty())
+  {
+    fireDeferredTriggers(req_struct, * deferred_list, regOperPtr, disk);
+  }
+
+  if (constraint_list && !constraint_list->isEmpty())
+  {
+    fireDeferredConstraints(req_struct, * constraint_list, regOperPtr, disk);
+  }
+
+end:
+  regOperPtr->op_struct.op_type = save_type;
+  req_struct->m_tuple_ptr = save_ptr;
 }//Dbtup::checkDeferredTriggers()
-#endif
 
 /* ---------------------------------------------------------------- */
 /* --------------------- checkDetachedTriggers -------------------- */
@@ -716,6 +940,13 @@ end:
   req_struct->m_tuple_ptr = save_ptr;
 }
 
+static
+bool
+is_constraint(const Dbtup::TupTriggerData * trigPtr)
+{
+  return trigPtr->triggerType == TriggerType::SECONDARY_INDEX;
+}
+
 void 
 Dbtup::fireImmediateTriggers(KeyReqStruct *req_struct,
                              DLList<TupTriggerData>& triggerList, 
@@ -729,6 +960,38 @@ Dbtup::fireImmediateTriggers(KeyReqStruc
     if (trigPtr.p->monitorAllAttributes ||
         trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
       jam();
+
+      if (req_struct->m_when == KRS_PREPARE &&
+          req_struct->m_deferred_constraints &&
+          is_constraint(trigPtr.p))
+      {
+        NoOfFiredTriggers::setDeferredBit(req_struct->no_fired_triggers);
+      }
+      else
+      {
+        executeTrigger(req_struct,
+                       trigPtr.p,
+                       regOperPtr,
+                       disk);
+      }
+    }
+    triggerList.next(trigPtr);
+  }//while
+}//Dbtup::fireImmediateTriggers()
+
+void
+Dbtup::fireDeferredConstraints(KeyReqStruct *req_struct,
+                               DLList<TupTriggerData>& triggerList,
+                               Operationrec* const regOperPtr,
+                               bool disk)
+{
+  TriggerPtr trigPtr;
+  triggerList.first(trigPtr);
+  while (trigPtr.i != RNIL) {
+    jam();
+    if (trigPtr.p->monitorAllAttributes ||
+        trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
+      jam();
       executeTrigger(req_struct,
                      trigPtr.p,
                      regOperPtr,
@@ -736,14 +999,13 @@ Dbtup::fireImmediateTriggers(KeyReqStruc
     }//if
     triggerList.next(trigPtr);
   }//while
-}//Dbtup::fireImmediateTriggers()
+}//Dbtup::fireDeferredTriggers()
 
-#if 0
-void 
-Dbtup::fireDeferredTriggers(Signal* signal,
-                            KeyReqStruct *req_struct,
-                            DLList<TupTriggerData>& triggerList, 
-                            Operationrec* const regOperPtr)
+void
+Dbtup::fireDeferredTriggers(KeyReqStruct *req_struct,
+                            DLList<TupTriggerData>& triggerList,
+                            Operationrec* const regOperPtr,
+                            bool disk)
 {
   TriggerPtr trigPtr;
   triggerList.first(trigPtr);
@@ -753,13 +1015,13 @@ Dbtup::fireDeferredTriggers(Signal* sign
         trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
       jam();
       executeTrigger(req_struct,
-                     trigPtr,
-                     regOperPtr);
+                     trigPtr.p,
+                     regOperPtr,
+                     disk);
     }//if
     triggerList.next(trigPtr);
   }//while
 }//Dbtup::fireDeferredTriggers()
-#endif
 
 void 
 Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct,
@@ -1064,6 +1326,44 @@ out:
     return;
   }
 
+  if (triggerType == TriggerType::SECONDARY_INDEX &&
+      req_struct->m_when != KRS_PREPARE)
+  {
+    ndbrequire(req_struct->m_deferred_constraints);
+    if (req_struct->m_when == KRS_PRE_COMMIT0)
+    {
+      switch(regOperPtr->op_struct.op_type){
+      case ZINSERT:
+        NoOfFiredTriggers::setDeferredBit(req_struct->no_fired_triggers);
+        return;
+        break;
+      case ZUPDATE:
+        NoOfFiredTriggers::setDeferredBit(req_struct->no_fired_triggers);
+        noAfterWords = 0;
+        break;
+      case ZDELETE:
+        break;
+      default:
+        ndbrequire(false);
+      }
+    }
+    else
+    {
+      ndbrequire(req_struct->m_when == KRS_PRE_COMMIT1);
+      switch(regOperPtr->op_struct.op_type){
+      case ZINSERT:
+        break;
+      case ZUPDATE:
+        noBeforeWords = 0;
+        break;
+      case ZDELETE:
+        return;
+      default:
+        ndbrequire(false);
+      }
+    }
+  }
+
   req_struct->no_fired_triggers++;
 
   if (longsignal == false)

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp	2011-04-25 16:46:59 +0000
@@ -219,16 +219,11 @@ private:
   static const TreeEnt NullTreeEnt;
 
   /*
-   * Tree node has 1) fixed part 2) a prefix of index key data for min
-   * entry 3) max and min entries 4) rest of entries 5) one extra entry
-   * used as work space.
+   * Tree node has 3 parts:
    *
-   * struct TreeNode            part 1, size 6 words
-   * min prefix                 part 2, size TreeHead::m_prefSize
-   * max entry                  part 3
-   * min entry                  part 3
-   * rest of entries            part 4
-   * work entry                 part 5
+   * 1) struct TreeNode - the header (6 words)
+   * 2) some key values for min entry - the min prefix
+   * 3) list of TreeEnt (each 2 words)
    *
    * There are 3 links to other nodes: left child, right child, parent.
    * Occupancy (number of entries) is at least 1 except temporarily when
@@ -248,17 +243,6 @@ private:
   STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 );
 
   /*
-   * Tree node "access size" was for an early version with signal
-   * interface to TUP.  It is now used only to compute sizes.
-   */
-  enum AccSize {
-    AccNone = 0,
-    AccHead = 1,                // part 1
-    AccPref = 2,                // parts 1-3
-    AccFull = 3                 // parts 1-5
-  };
-
-  /*
    * Tree header.  There is one in each fragment.  Contains tree
    * parameters and address of root node.
    */
@@ -273,7 +257,6 @@ private:
     TupLoc m_root;              // root node
     TreeHead();
     // methods
-    unsigned getSize(AccSize acc) const;
     Data getPref(TreeNode* node) const;
     TreeEnt* getEntList(TreeNode* node) const;
   };
@@ -562,7 +545,6 @@ private:
     // access other parts of the node
     Data getPref();
     TreeEnt getEnt(unsigned pos);
-    TreeEnt getMinMax(unsigned i);
     // for ndbrequire and ndbassert
     void progError(int line, int cause, const char* file);
   };
@@ -583,6 +565,7 @@ private:
   void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize);
   void copyAttrs(TuxCtx&, const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
   void unpackBound(const ScanBound& bound, Data data);
+  void findFrag(const Index& index, Uint32 fragId, FragPtr& fragPtr);
 
   /*
    * DbtuxMeta.cpp
@@ -674,11 +657,14 @@ private:
   /*
    * DbtuxSearch.cpp
    */
+  void findNodeToUpdate(TuxCtx&, Frag& frag, ConstData searchKey, TreeEnt searchEnt, NodeHandle& currNode);
+  bool findPosToAdd(TuxCtx&, Frag& frag, ConstData searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
+  bool findPosToRemove(TuxCtx&, Frag& frag, ConstData searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos);
   bool searchToAdd(TuxCtx&, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
-  bool searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+  bool searchToRemove(TuxCtx&, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+  void findNodeToScan(Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, NodeHandle& currNode);
+  void findPosToScan(Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, NodeHandle& currNode, Uint16* pos);
   void searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos);
-  void searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
-  void searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
 
   /*
    * DbtuxCmp.cpp
@@ -929,22 +915,6 @@ Dbtux::TreeHead::TreeHead() :
 {
 }
 
-inline unsigned
-Dbtux::TreeHead::getSize(AccSize acc) const
-{
-  switch (acc) {
-  case AccNone:
-    return 0;
-  case AccHead:
-    return NodeHeadSize;
-  case AccPref:
-    return NodeHeadSize + m_prefSize + 2 * TreeEntSize;
-  case AccFull:
-    return m_nodeSize;
-  }
-  return 0;
-}
-
 inline Dbtux::Data
 Dbtux::TreeHead::getPref(TreeNode* node) const
 {
@@ -1201,15 +1171,7 @@ Dbtux::NodeHandle::getEnt(unsigned pos)
   TreeEnt* entList = tree.getEntList(m_node);
   const unsigned occup = m_node->m_occup;
   ndbrequire(pos < occup);
-  return entList[(1 + pos) % occup];
-}
-
-inline Dbtux::TreeEnt
-Dbtux::NodeHandle::getMinMax(unsigned i)
-{
-  const unsigned occup = m_node->m_occup;
-  ndbrequire(i <= 1 && occup != 0);
-  return getEnt(i == 0 ? 0 : occup - 1);
+  return entList[pos];
 }
 
 // parameters for methods

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxBuild.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxBuild.cpp	2011-01-30 20:56:00 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxBuild.cpp	2011-04-24 13:10:50 +0000
@@ -83,15 +83,7 @@ Dbtux::mt_buildIndexFragment(mt_BuildInd
   const Uint32 fragId = req->fragId;
   // get the fragment
   FragPtr fragPtr;
-  fragPtr.i = RNIL;
-  for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) {
-    jam();
-    if (indexPtr.p->m_fragId[i] == fragId) {
-      jam();
-      c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
-      break;
-    }
-  }
+  findFrag(*indexPtr.p, fragId, fragPtr);
   ndbrequire(fragPtr.i != RNIL);
   Frag& frag = *fragPtr.p;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp	2011-04-24 16:20:23 +0000
@@ -286,7 +286,7 @@ Dbtux::printNode(TuxCtx & ctx,
   { ConstData data1 = node.getPref();
     Uint32 data2[MaxPrefSize];
     memset(data2, DataFillByte, MaxPrefSize << 2);
-    readKeyAttrs(ctx, frag, node.getMinMax(0), 0, ctx.c_searchKey);
+    readKeyAttrs(ctx, frag, node.getEnt(0), 0, ctx.c_searchKey);
     copyAttrs(ctx, frag, ctx.c_searchKey, data2, tree.m_prefSize);
     for (unsigned n = 0; n < tree.m_prefSize; n++) {
       if (data1[n] != data2[n]) {
@@ -320,7 +320,8 @@ Dbtux::printNode(TuxCtx & ctx,
     if (node.getLink(i) == NullTupLoc)
       continue;
     const TreeEnt ent1 = cpar[i].m_minmax[1 - i];
-    const TreeEnt ent2 = node.getMinMax(i);
+    const unsigned pos = (i == 0 ? 0 : node.getOccup() - 1);
+    const TreeEnt ent2 = node.getEnt(pos);
     unsigned start = 0;
     readKeyAttrs(ctx, frag, ent1, start, ctx.c_searchKey);
     readKeyAttrs(ctx, frag, ent2, start, ctx.c_entryKey);
@@ -337,9 +338,10 @@ Dbtux::printNode(TuxCtx & ctx,
   par.m_depth = 1 + max(cpar[0].m_depth, cpar[1].m_depth);
   par.m_occup = node.getOccup();
   for (unsigned i = 0; i <= 1; i++) {
-    if (node.getLink(i) == NullTupLoc)
-      par.m_minmax[i] = node.getMinMax(i);
-    else
+    if (node.getLink(i) == NullTupLoc) {
+      const unsigned pos = (i == 0 ? 0 : node.getOccup() - 1);
+      par.m_minmax[i] = node.getEnt(pos);
+    } else
       par.m_minmax[i] = cpar[i].m_minmax[i];
   }
 }
@@ -387,9 +389,6 @@ operator<<(NdbOut& out, const Dbtux::Tre
   out << " [prefSize " << dec << tree.m_prefSize << "]";
   out << " [minOccup " << dec << tree.m_minOccup << "]";
   out << " [maxOccup " << dec << tree.m_maxOccup << "]";
-  out << " [AccHead " << dec << tree.getSize(Dbtux::AccHead) << "]";
-  out << " [AccPref " << dec << tree.getSize(Dbtux::AccPref) << "]";
-  out << " [AccFull " << dec << tree.getSize(Dbtux::AccFull) << "]";
   out << " [root " << hex << tree.m_root << "]";
   out << "]";
   return out;
@@ -528,9 +527,8 @@ operator<<(NdbOut& out, const Dbtux::Nod
   unsigned numpos = node.m_node->m_occup;
   data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + tree.m_prefSize;
   const Dbtux::TreeEnt* entList = (const Dbtux::TreeEnt*)data;
-  // print entries in logical order
-  for (unsigned pos = 1; pos <= numpos; pos++)
-    out << " " << entList[pos % numpos];
+  for (unsigned pos = 0; pos < numpos; pos++)
+    out << " " << entList[pos];
   out << "]";
   out << "]";
   return out;

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp	2011-04-24 13:10:50 +0000
@@ -366,4 +366,20 @@ Dbtux::unpackBound(const ScanBound& boun
   }
 }
 
+void
+Dbtux::findFrag(const Index& index, Uint32 fragId, FragPtr& fragPtr)
+{
+  const Uint32 numFrags = index.m_numFrags;
+  for (Uint32 i = 0; i < numFrags; i++) {
+    jam();
+    if (index.m_fragId[i] == fragId) {
+      jam();
+      fragPtr.i = index.m_fragPtrI[i];
+      c_fragPool.getPtr(fragPtr);
+      return;
+    }
+  }
+  fragPtr.i = RNIL;
+}
+
 BLOCK_FUNCTIONS(Dbtux)

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp	2011-02-01 21:05:11 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp	2011-04-25 14:42:38 +0000
@@ -63,15 +63,7 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
   const Uint32 fragId = req->fragId;
   // get the fragment
   FragPtr fragPtr;
-  fragPtr.i = RNIL;
-  for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) {
-    jam();
-    if (indexPtr.p->m_fragId[i] == fragId) {
-      jam();
-      c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
-      break;
-    }
-  }
+  findFrag(*indexPtr.p, fragId, fragPtr);
   ndbrequire(fragPtr.i != RNIL);
   Frag& frag = *fragPtr.p;
   // set up index keys for this operation
@@ -152,7 +144,7 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
     break;
   case TuxMaintReq::OpRemove:
     jam();
-    ok = searchToRemove(frag, c_ctx.c_searchKey, ent, treePos);
+    ok = searchToRemove(c_ctx, frag, c_ctx.c_searchKey, ent, treePos);
 #ifdef VM_TRACE
     if (debugFlags & DebugMaint) {
       debugOut << treePos << (! ok ? " - error" : "") << endl;

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp	2011-04-24 16:20:23 +0000
@@ -313,21 +313,20 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
     tree.m_nodeSize = MAX_TTREE_NODE_SIZE;
     tree.m_prefSize = MAX_TTREE_PREF_SIZE;
     const unsigned maxSlack = MAX_TTREE_NODE_SLACK;
-    // size up to and including first 2 entries
-    const unsigned pref = tree.getSize(AccPref);
-    if (! (pref <= tree.m_nodeSize)) {
+    // size of header and min prefix
+    const unsigned fixedSize = NodeHeadSize + tree.m_prefSize;
+    if (! (fixedSize <= tree.m_nodeSize)) {
       jam();
       errorCode = (TuxFragRef::ErrorCode)TuxAddAttrRef::InvalidNodeSize;
       break;
     }
-    const unsigned slots = (tree.m_nodeSize - pref) / TreeEntSize;
-    // leave out work space entry
-    tree.m_maxOccup = 2 + slots - 1;
+    const unsigned slots = (tree.m_nodeSize - fixedSize) / TreeEntSize;
+    tree.m_maxOccup = slots;
     // min occupancy of interior node must be at least 2
     if (! (2 + maxSlack <= tree.m_maxOccup)) {
       jam();
-        errorCode = (TuxFragRef::ErrorCode)TuxAddAttrRef::InvalidNodeSize;
-        break;
+      errorCode = (TuxFragRef::ErrorCode)TuxAddAttrRef::InvalidNodeSize;
+      break;
     }
     tree.m_minOccup = tree.m_maxOccup - maxSlack;
     // root node does not exist (also set by ctor)

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp	2011-04-24 16:20:23 +0000
@@ -102,7 +102,7 @@ Dbtux::insertNode(NodeHandle& node)
   TreeHead& tree = frag.m_tree;
   memset(node.getPref(), DataFillByte, tree.m_prefSize << 2);
   TreeEnt* entList = tree.getEntList(node.m_node);
-  memset(entList, NodeFillByte, (tree.m_maxOccup + 1) * (TreeEntSize << 2));
+  memset(entList, NodeFillByte, tree.m_maxOccup * (TreeEntSize << 2));
 #endif
 }
 
@@ -156,7 +156,7 @@ Dbtux::setNodePref(TuxCtx & ctx, NodeHan
 {
   const Frag& frag = node.m_frag;
   const TreeHead& tree = frag.m_tree;
-  readKeyAttrs(ctx, frag, node.getMinMax(0), 0, ctx.c_entryKey);
+  readKeyAttrs(ctx, frag, node.getEnt(0), 0, ctx.c_entryKey);
   copyAttrs(ctx, frag, ctx.c_entryKey, node.getPref(), tree.m_prefSize);
 }
 
@@ -185,14 +185,11 @@ Dbtux::nodePushUp(TuxCtx & ctx, NodeHand
     nodePushUpScans(node, pos);
   // fix node
   TreeEnt* const entList = tree.getEntList(node.m_node);
-  entList[occup] = entList[0];
-  TreeEnt* const tmpList = entList + 1;
   for (unsigned i = occup; i > pos; i--) {
     thrjam(ctx.jamBuffer);
-    tmpList[i] = tmpList[i - 1];
+    entList[i] = entList[i - 1];
   }
-  tmpList[pos] = ent;
-  entList[0] = entList[occup + 1];
+  entList[pos] = ent;
   node.setOccup(occup + 1);
   // add new scans
   if (scanList != RNIL)
@@ -258,14 +255,11 @@ Dbtux::nodePopDown(TuxCtx& ctx, NodeHand
   }
   // fix node
   TreeEnt* const entList = tree.getEntList(node.m_node);
-  entList[occup] = entList[0];
-  TreeEnt* const tmpList = entList + 1;
-  ent = tmpList[pos];
+  ent = entList[pos];
   for (unsigned i = pos; i < occup - 1; i++) {
     thrjam(ctx.jamBuffer);
-    tmpList[i] = tmpList[i + 1];
+    entList[i] = entList[i + 1];
   }
-  entList[0] = entList[occup - 1];
   node.setOccup(occup - 1);
   // fix prefix
   if (occup != 1 && pos == 0)
@@ -326,16 +320,13 @@ Dbtux::nodePushDown(TuxCtx& ctx, NodeHan
   }
   // fix node
   TreeEnt* const entList = tree.getEntList(node.m_node);
-  entList[occup] = entList[0];
-  TreeEnt* const tmpList = entList + 1;
-  TreeEnt oldMin = tmpList[0];
+  TreeEnt oldMin = entList[0];
   for (unsigned i = 0; i < pos; i++) {
     thrjam(ctx.jamBuffer);
-    tmpList[i] = tmpList[i + 1];
+    entList[i] = entList[i + 1];
   }
-  tmpList[pos] = ent;
+  entList[pos] = ent;
   ent = oldMin;
-  entList[0] = entList[occup];
   // fix prefix
   if (true)
     setNodePref(ctx, node);
@@ -396,16 +387,13 @@ Dbtux::nodePopUp(TuxCtx& ctx, NodeHandle
   }
   // fix node
   TreeEnt* const entList = tree.getEntList(node.m_node);
-  entList[occup] = entList[0];
-  TreeEnt* const tmpList = entList + 1;
   TreeEnt newMin = ent;
-  ent = tmpList[pos];
+  ent = entList[pos];
   for (unsigned i = pos; i > 0; i--) {
     thrjam(ctx.jamBuffer);
-    tmpList[i] = tmpList[i - 1];
+    entList[i] = entList[i - 1];
   }
-  tmpList[0] = newMin;
-  entList[0] = entList[occup];
+  entList[0] = newMin;
   // add scans
   if (scanList != RNIL)
     addScanList(node, 0, scanList);

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp	2011-04-25 15:57:28 +0000
@@ -39,15 +39,7 @@ Dbtux::execACC_SCANREQ(Signal* signal)
     c_indexPool.getPtr(indexPtr, req->tableId);
     // get the fragment
     FragPtr fragPtr;
-    fragPtr.i = RNIL;
-    for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) {
-      jam();
-      if (indexPtr.p->m_fragId[i] == req->fragmentNo) {
-        jam();
-        c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
-        break;
-      }
-    }
+    findFrag(*indexPtr.p, req->fragmentNo, fragPtr);
     ndbrequire(fragPtr.i != RNIL);
     Frag& frag = *fragPtr.p;
     // check for index not Online (i.e. Dropping)
@@ -858,8 +850,9 @@ Dbtux::scanFind(ScanOpPtr scanPtr)
  * 0 - up from left child (scan this node next)
  * 1 - up from right child (proceed to parent)
  * 2 - up from root (the scan ends)
- * 3 - left to right within node (at end proceed to right child)
+ * 3 - left to right within node (at end set state 5)
  * 4 - down from parent (proceed to left child)
+ * 5 - at node end proceed to right child (state becomes 4)
  *
  * If an entry was found, scan direction is 3.  Therefore tree
  * re-organizations need not worry about scan direction.
@@ -926,6 +919,19 @@ Dbtux::scanNext(ScanOpPtr scanPtr, bool 
       // pretend we came from left child
       pos.m_dir = idir;
     }
+    if (pos.m_dir == 5) {
+      // at node end proceed to right child
+      jam();
+      TupLoc loc = node.getLink(1 - idir);
+      if (loc != NullTupLoc) {
+        jam();
+        pos.m_loc = loc;
+        pos.m_dir = 4;  // down from parent as usual
+        continue;
+      }
+      // pretend we came from right child
+      pos.m_dir = 1 - idir;
+    }
     const unsigned occup = node.getOccup();
     if (occup == 0) {
       jam();
@@ -957,15 +963,8 @@ Dbtux::scanNext(ScanOpPtr scanPtr, bool 
         break;
       }
       // after node proceed to right child
-      TupLoc loc = node.getLink(1 - idir);
-      if (loc != NullTupLoc) {
-        jam();
-        pos.m_loc = loc;
-        pos.m_dir = 4;
-        continue;
-      }
-      // pretend we came from right child
-      pos.m_dir = 1 - idir;
+      pos.m_dir = 5;
+      continue;
     }
     if (pos.m_dir == 1 - idir) {
       // coming up from right child proceed to parent

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp	2011-04-27 21:13:10 +0000
@@ -20,28 +20,23 @@
 #include "Dbtux.hpp"
 
 /*
- * Search for entry to add.
+ * Search down non-empty tree for node to update.  Compare search key to
+ * each node minimum.  If greater, move to right subtree.  This can
+ * overshoot target node.  The last such node is saved.  The search ends
+ * at a final node which is a semi-leaf or leaf.  If search key is less
+ * than final node minimum then the saved node (if any) is the g.l.b of
+ * the final node and we move back to it.
  *
- * Similar to searchToRemove (see below).
+ * Search within the found node is done by caller.  On add, search key
+ * may be before minimum or after maximum entry.  On remove, search key
+ * is within the node.
  */
-bool
-Dbtux::searchToAdd(TuxCtx& ctx, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
+void
+Dbtux::findNodeToUpdate(TuxCtx& ctx, Frag& frag, ConstData searchKey, TreeEnt searchEnt, NodeHandle& currNode)
 {
   const TreeHead& tree = frag.m_tree;
-  const unsigned numAttrs = frag.m_numAttrs;
-  NodeHandle currNode(frag);
-  currNode.m_loc = tree.m_root;
-  if (currNode.m_loc == NullTupLoc) {
-    // empty tree
-    thrjam(ctx.jamBuffer);
-    return true;
-  }
+  const Uint32 numAttrs = frag.m_numAttrs;
   NodeHandle glbNode(frag);     // potential g.l.b of final node
-  /*
-   * In order to not (yet) change old behaviour, a position between
-   * 2 nodes returns the one at the bottom of the tree.
-   */
-  NodeHandle bottomNode(frag);
   while (true) {
     thrjam(ctx.jamBuffer);
     selectNode(currNode, currNode.m_loc);
@@ -53,14 +48,14 @@ Dbtux::searchToAdd(TuxCtx& ctx, Frag& fr
       thrjam(ctx.jamBuffer);
       // read and compare remaining attributes
       ndbrequire(start < numAttrs);
-      readKeyAttrs(ctx, frag, currNode.getMinMax(0), start, ctx.c_entryKey);
+      readKeyAttrs(ctx, frag, currNode.getEnt(0), start, ctx.c_entryKey);
       ret = cmpSearchKey(ctx, frag, start, searchKey, ctx.c_entryKey);
       ndbrequire(ret != NdbSqlUtil::CmpUnknown);
     }
     if (ret == 0) {
       thrjam(ctx.jamBuffer);
       // keys are equal, compare entry values
-      ret = searchEnt.cmp(currNode.getMinMax(0));
+      ret = searchEnt.cmp(currNode.getEnt(0));
     }
     if (ret < 0) {
       thrjam(ctx.jamBuffer);
@@ -73,11 +68,12 @@ Dbtux::searchToAdd(TuxCtx& ctx, Frag& fr
       }
       if (! glbNode.isNull()) {
         thrjam(ctx.jamBuffer);
-        // move up to the g.l.b but remember the bottom node
-        bottomNode = currNode;
+        // move up to the g.l.b
         currNode = glbNode;
       }
-    } else if (ret > 0) {
+      break;
+    }
+    if (ret > 0) {
       thrjam(ctx.jamBuffer);
       const TupLoc loc = currNode.getLink(1);
       if (loc != NullTupLoc) {
@@ -88,26 +84,28 @@ Dbtux::searchToAdd(TuxCtx& ctx, Frag& fr
         currNode.m_loc = loc;
         continue;
       }
-    } else {
-      thrjam(ctx.jamBuffer);
-      treePos.m_loc = currNode.m_loc;
-      treePos.m_pos = 0;
-      // entry found - error
-      return false;
+      break;
     }
+    // ret == 0
+    thrjam(ctx.jamBuffer);
     break;
   }
-  // anticipate
-  treePos.m_loc = currNode.m_loc;
-  // binary search
+}
+
+/*
+ * Find position within the final node to add entry to.  Use binary
+ * search.  Return true if ok i.e. entry to add is not a duplicate.
+ */
+bool
+Dbtux::findPosToAdd(TuxCtx& ctx, Frag& frag, ConstData searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos)
+{
   int lo = -1;
-  int hi = currNode.getOccup();
+  int hi = (int)currNode.getOccup();
   int ret;
-  while (1) {
+  while (hi - lo > 1) {
     thrjam(ctx.jamBuffer);
     // hi - lo > 1 implies lo < j < hi
     int j = (hi + lo) / 2;
-
     // read and compare attributes
     unsigned start = 0;
     readKeyAttrs(ctx, frag, currNode.getEnt(j), start, ctx.c_entryKey);
@@ -118,171 +116,113 @@ Dbtux::searchToAdd(TuxCtx& ctx, Frag& fr
       // keys are equal, compare entry values
       ret = searchEnt.cmp(currNode.getEnt(j));
     }
-    if (ret < 0)
+    if (ret < 0) {
+      thrjam(ctx.jamBuffer);
       hi = j;
-    else if (ret > 0)
+    } else if (ret > 0) {
+      thrjam(ctx.jamBuffer);
       lo = j;
-    else {
+    } else {
       treePos.m_pos = j;
       // entry found - error
       return false;
     }
-    if (hi - lo == 1)
-      break;
   }
-  if (ret < 0) {
+  ndbrequire(hi - lo == 1);
+  // return hi pos, see treeAdd() for next step
+  treePos.m_pos = hi;
+  return true;
+}
+
+/*
+ * Find position within the final node to remove entry from.  Use linear
+ * search.  Return true if ok i.e. the entry was found.
+ */
+bool
+Dbtux::findPosToRemove(TuxCtx& ctx, Frag& frag, ConstData searchKey, TreeEnt searchEnt, NodeHandle& currNode, TreePos& treePos)
+{
+  const unsigned occup = currNode.getOccup();
+  for (unsigned j = 0; j < occup; j++) {
     thrjam(ctx.jamBuffer);
-    treePos.m_pos = hi;
-    return true;
+    // compare only the entry
+    if (searchEnt.eq(currNode.getEnt(j))) {
+      thrjam(ctx.jamBuffer);
+      treePos.m_pos = j;
+      return true;
+    }
   }
-  if ((uint) hi < currNode.getOccup()) {
+  treePos.m_pos = occup;
+  // not found - failed
+  return false;
+}
+
+/*
+ * Search for entry to add.
+ */
+bool
+Dbtux::searchToAdd(TuxCtx& ctx, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
+{
+  const TreeHead& tree = frag.m_tree;
+  NodeHandle currNode(frag);
+  currNode.m_loc = tree.m_root;
+  if (unlikely(currNode.m_loc == NullTupLoc)) {
+    // empty tree
     thrjam(ctx.jamBuffer);
-    treePos.m_pos = hi;
     return true;
   }
-  if (bottomNode.isNull()) {
+  findNodeToUpdate(ctx, frag, searchKey, searchEnt, currNode);
+  treePos.m_loc = currNode.m_loc;
+  if (! findPosToAdd(ctx, frag, searchKey, searchEnt, currNode, treePos)) {
     thrjam(ctx.jamBuffer);
-    treePos.m_pos = hi;
-    return true;
+    return false;
   }
-  thrjam(ctx.jamBuffer);
-  // backwards compatible for now
-  treePos.m_loc = bottomNode.m_loc;
-  treePos.m_pos = 0;
   return true;
 }
 
 /*
  * Search for entry to remove.
- *
- * Compares search key to each node min.  A move to right subtree can
- * overshoot target node.  The last such node is saved.  The final node
- * is a semi-leaf or leaf.  If search key is less than final node min
- * then the saved node is the g.l.b of the final node and we move back
- * to it.
  */
 bool
-Dbtux::searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
+Dbtux::searchToRemove(TuxCtx& ctx, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
 {
   const TreeHead& tree = frag.m_tree;
-  const unsigned numAttrs = frag.m_numAttrs;
   NodeHandle currNode(frag);
   currNode.m_loc = tree.m_root;
-  if (currNode.m_loc == NullTupLoc) {
+  if (unlikely(currNode.m_loc == NullTupLoc)) {
     // empty tree - failed
-    jam();
+    thrjam(ctx.jamBuffer);
     return false;
   }
-  NodeHandle glbNode(frag);     // potential g.l.b of final node
-  while (true) {
-    jam();
-    selectNode(currNode, currNode.m_loc);
-    int ret;
-    // compare prefix
-    unsigned start = 0;
-    ret = cmpSearchKey(c_ctx, frag, start, searchKey, currNode.getPref(), tree.m_prefSize);
-    if (ret == NdbSqlUtil::CmpUnknown) {
-      jam();
-      // read and compare remaining attributes
-      ndbrequire(start < numAttrs);
-      readKeyAttrs(c_ctx, frag, currNode.getMinMax(0), start, c_ctx.c_entryKey);
-      ret = cmpSearchKey(c_ctx, frag, start, searchKey, c_ctx.c_entryKey);
-      ndbrequire(ret != NdbSqlUtil::CmpUnknown);
-    }
-    if (ret == 0) {
-      jam();
-      // keys are equal, compare entry values
-      ret = searchEnt.cmp(currNode.getMinMax(0));
-    }
-    if (ret < 0) {
-      jam();
-      const TupLoc loc = currNode.getLink(0);
-      if (loc != NullTupLoc) {
-        jam();
-        // continue to left subtree
-        currNode.m_loc = loc;
-        continue;
-      }
-      if (! glbNode.isNull()) {
-        jam();
-        // move up to the g.l.b
-        currNode = glbNode;
-      }
-    } else if (ret > 0) {
-      jam();
-      const TupLoc loc = currNode.getLink(1);
-      if (loc != NullTupLoc) {
-        jam();
-        // save potential g.l.b
-        glbNode = currNode;
-        // continue to right subtree
-        currNode.m_loc = loc;
-        continue;
-      }
-    } else {
-      jam();
-      treePos.m_loc = currNode.m_loc;
-      treePos.m_pos = 0;
-      return true;
-    }
-    break;
-  }
-  // anticipate
+  findNodeToUpdate(ctx, frag, searchKey, searchEnt, currNode);
   treePos.m_loc = currNode.m_loc;
-  // pos 0 was handled above
-  for (unsigned j = 1, occup = currNode.getOccup(); j < occup; j++) {
-    jam();
-    // compare only the entry
-    if (searchEnt.eq(currNode.getEnt(j))) {
-      jam();
-      treePos.m_pos = j;
-      return true;
-    }
+  if (! findPosToRemove(ctx, frag, searchKey, searchEnt, currNode, treePos)) {
+    thrjam(ctx.jamBuffer);
+    return false;
   }
-  treePos.m_pos = currNode.getOccup();
-  // not found - failed
-  return false;
+  return true;
 }
 
 /*
- * Search for scan start position.
- *
- * Similar to searchToAdd.  The routines differ somewhat depending on
- * scan direction and are done by separate methods.
+ * Search down non-empty tree for node to start scan from.  Similar to
+ * findNodeToUpdate().  Direction is 0-ascending or 1-descending.
+ * Search within the found node is done by caller.
  */
 void
-Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos)
-{
-  const TreeHead& tree = frag.m_tree;
-  if (tree.m_root != NullTupLoc) {
-    if (! descending)
-      searchToScanAscending(frag, boundInfo, boundCount, treePos);
-    else
-      searchToScanDescending(frag, boundInfo, boundCount, treePos);
-    return;
-  }
-  // empty tree
-}
-
-void
-Dbtux::searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+Dbtux::findNodeToScan(Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, NodeHandle& currNode)
 {
   const TreeHead& tree = frag.m_tree;
-  NodeHandle currNode(frag);
-  currNode.m_loc = tree.m_root;
   NodeHandle glbNode(frag);     // potential g.l.b of final node
-  NodeHandle bottomNode(frag);
   while (true) {
     jam();
     selectNode(currNode, currNode.m_loc);
     int ret;
     // compare prefix
-    ret = cmpScanBound(frag, 0, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
+    ret = cmpScanBound(frag, idir, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
     if (ret == NdbSqlUtil::CmpUnknown) {
       jam();
       // read and compare all attributes
-      readKeyAttrs(c_ctx, frag, currNode.getMinMax(0), 0, c_ctx.c_entryKey);
-      ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_ctx.c_entryKey);
+      readKeyAttrs(c_ctx, frag, currNode.getEnt(0), 0, c_ctx.c_entryKey);
+      ret = cmpScanBound(frag, idir, boundInfo, boundCount, c_ctx.c_entryKey);
       ndbrequire(ret != NdbSqlUtil::CmpUnknown);
     }
     if (ret < 0) {
@@ -297,17 +237,12 @@ Dbtux::searchToScanAscending(Frag& frag,
       }
       if (! glbNode.isNull()) {
         jam();
-        // move up to the g.l.b but remember the bottom node
-        bottomNode = currNode;
+        // move up to the g.l.b
         currNode = glbNode;
-      } else {
-        // start scanning this node
-        treePos.m_loc = currNode.m_loc;
-        treePos.m_pos = 0;
-        treePos.m_dir = 3;
-        return;
       }
-    } else {
+      break;
+    }
+    if (ret > 0) {
       // bound is at or right of this node
       jam();
       const TupLoc loc = currNode.getLink(1);
@@ -319,117 +254,89 @@ Dbtux::searchToScanAscending(Frag& frag,
         currNode.m_loc = loc;
         continue;
       }
+      break;
     }
-    break;
+    // ret == 0 never
+    ndbrequire(false);
   }
-  for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+}
+
+/*
+ * Search across final node for position to start scan from.  Use binary
+ * search similar to findPosToAdd().
+ */
+void
+Dbtux::findPosToScan(Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, NodeHandle& currNode, Uint16* pos)
+{
+  const int jdir = 1 - 2 * int(idir);
+  int lo = -1;
+  int hi = (int)currNode.getOccup();
+  while (hi - lo > 1) {
     jam();
-    int ret;
-    // read and compare attributes
-    readKeyAttrs(c_ctx, frag, currNode.getEnt(j), 0, c_ctx.c_entryKey);
-    ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_ctx.c_entryKey);
-    ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+    // hi - lo > 1 implies lo < j < hi
+    int j = (hi + lo) / 2;
+    int ret = (-1) * jdir;
+    if (boundCount != 0) {
+      // read and compare attributes
+      const TreeEnt currEnt = currNode.getEnt(j);
+      readKeyAttrs(c_ctx, frag, currEnt, 0, c_ctx.c_entryKey);
+      ret = cmpScanBound(frag, idir, boundInfo, boundCount, c_ctx.c_entryKey);
+    }
+    ndbrequire(ret != 0);
     if (ret < 0) {
-      // found first entry satisfying the bound
-      treePos.m_loc = currNode.m_loc;
-      treePos.m_pos = j;
-      treePos.m_dir = 3;
-      return;
+      jam();
+      hi = j;
+    } else if (ret > 0) {
+      jam();
+      lo = j;
+    } else {
+      // ret == 0 never
+      ndbrequire(false);
     }
   }
-  // bound is to right of this node
-  if (! bottomNode.isNull()) {
-    jam();
-    // start scanning the l.u.b
-    treePos.m_loc = bottomNode.m_loc;
-    treePos.m_pos = 0;
-    treePos.m_dir = 3;
-    return;
-  }
-  // start scanning upwards (pretend we came from right child)
-  treePos.m_loc = currNode.m_loc;
-  treePos.m_dir = 1;
+  // return hi pos, caller handles ascending vs descending
+  *pos = hi;
 }
 
+/*
+ * Search for scan start position.
+ */
 void
-Dbtux::searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos)
 {
   const TreeHead& tree = frag.m_tree;
   NodeHandle currNode(frag);
   currNode.m_loc = tree.m_root;
-  NodeHandle glbNode(frag);     // potential g.l.b of final node
-  NodeHandle bottomNode(frag);
-  while (true) {
+  if (unlikely(currNode.m_loc == NullTupLoc)) {
+    // empty tree
     jam();
-    selectNode(currNode, currNode.m_loc);
-    int ret;
-    // compare prefix
-    ret = cmpScanBound(frag, 1, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
-    if (ret == NdbSqlUtil::CmpUnknown) {
+    return;
+  }
+  const unsigned idir = unsigned(descending);
+  findNodeToScan(frag, idir, boundInfo, boundCount, currNode);
+  treePos.m_loc = currNode.m_loc;
+  Uint16 pos;
+  findPosToScan(frag, idir, boundInfo, boundCount, currNode, &pos);
+  const unsigned occup = currNode.getOccup();
+  if (idir == 0) {
+    if (pos < occup) {
       jam();
-      // read and compare all attributes
-      readKeyAttrs(c_ctx, frag, currNode.getMinMax(0), 0, c_ctx.c_entryKey);
-      ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_ctx.c_entryKey);
-      ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+      treePos.m_pos = pos;
+      treePos.m_dir = 3;
+    } else {
+      // start scan after node end i.e. proceed to right child
+      treePos.m_pos = ZNIL;
+      treePos.m_dir = 5;
     }
-    if (ret < 0) {
-      // bound is left of this node
+  } else {
+    if (pos > 0) {
       jam();
-      const TupLoc loc = currNode.getLink(0);
-      if (loc != NullTupLoc) {
-        jam();
-        // continue to left subtree
-        currNode.m_loc = loc;
-        continue;
-      }
-      if (! glbNode.isNull()) {
-        jam();
-        // move up to the g.l.b but remember the bottom node
-        bottomNode = currNode;
-        currNode = glbNode;
-      } else {
-        // empty result set
-        return;
-      }
+      // start scan from previous entry
+      treePos.m_pos = pos - 1;
+      treePos.m_dir = 3;
     } else {
-      // bound is at or right of this node
-      jam();
-      const TupLoc loc = currNode.getLink(1);
-      if (loc != NullTupLoc) {
-        jam();
-        // save potential g.l.b
-        glbNode = currNode;
-        // continue to right subtree
-        currNode.m_loc = loc;
-        continue;
-      }
-    }
-    break;
-  }
-  for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
-    jam();
-    int ret;
-    // read and compare attributes
-    readKeyAttrs(c_ctx, frag, currNode.getEnt(j), 0, c_ctx.c_entryKey);
-    ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_ctx.c_entryKey);
-    ndbrequire(ret != NdbSqlUtil::CmpUnknown);
-    if (ret < 0) {
-      if (j > 0) {
-        // start scanning from previous entry
-        treePos.m_loc = currNode.m_loc;
-        treePos.m_pos = j - 1;
-        treePos.m_dir = 3;
-        return;
-      }
-      // start scanning upwards (pretend we came from left child)
-      treePos.m_loc = currNode.m_loc;
-      treePos.m_pos = 0;
+      treePos.m_pos = ZNIL;
       treePos.m_dir = 0;
-      return;
     }
   }
-  // start scanning this node
-  treePos.m_loc = currNode.m_loc;
-  treePos.m_pos = currNode.getOccup() - 1;
-  treePos.m_dir = 3;
 }

=== modified file 'storage/ndb/src/ndbapi/NdbOperationDefine.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp	2011-04-27 11:50:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp	2011-04-29 09:23:56 +0000
@@ -1404,5 +1404,10 @@ NdbOperation::handleOperationOptions (co
     op->m_flags &= ~Uint8(OF_QUEUEABLE);
   }
 
+  if (opts->optionsPresent & OperationOptions::OO_DEFERRED_CONSTAINTS)
+  {
+    op->m_flags |= OF_DEFERRED_CONSTRAINTS;
+  }
+
   return 0;
 }

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-02-10 08:22:41 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-04-29 09:23:56 +0000
@@ -472,6 +472,7 @@ NdbOperation::prepareSend(Uint32 aTC_Con
   Uint8 tInterpretIndicator = theInterpretIndicator;
   Uint8 tNoDisk = (m_flags & OF_NO_DISK) != 0;
   Uint8 tQueable = (m_flags & OF_QUEUEABLE) != 0;
+  Uint8 tDeferred = (m_flags & OF_DEFERRED_CONSTRAINTS) != 0;
 
   /**
    * A dirty read, can not abort the transaction
@@ -490,6 +491,7 @@ NdbOperation::prepareSend(Uint32 aTC_Con
   tcKeyReq->setInterpretedFlag(tReqInfo, tInterpretIndicator);
   tcKeyReq->setNoDiskFlag(tReqInfo, tNoDisk);
   tcKeyReq->setQueueOnRedoProblemFlag(tReqInfo, tQueable);
+  tcKeyReq->setDeferredConstraints(tReqInfo, tDeferred);
 
   OperationType tOperationType = theOperationType;
   Uint8 abortOption = (ao == DefaultAbortOption) ? (Uint8) m_abortOption : (Uint8) ao;
@@ -1417,12 +1419,18 @@ NdbOperation::prepareSendNdbRecord(Abort
   m_abortOption= theSimpleIndicator && theOperationType==ReadRequest ?
     (Uint8) AO_IgnoreError : (Uint8) abortOption;
 
+  Uint8 tQueable = (m_flags & OF_QUEUEABLE) != 0;
+  Uint8 tDeferred = (m_flags & OF_DEFERRED_CONSTRAINTS) != 0;
+
   TcKeyReq::setAbortOption(tcKeyReq->requestInfo, m_abortOption);
   TcKeyReq::setCommitFlag(tcKeyReq->requestInfo, theCommitIndicator);
   TcKeyReq::setStartFlag(tcKeyReq->requestInfo, theStartIndicator);
   TcKeyReq::setSimpleFlag(tcKeyReq->requestInfo, theSimpleIndicator);
   TcKeyReq::setDirtyFlag(tcKeyReq->requestInfo, theDirtyIndicator);
 
+  TcKeyReq::setQueueOnRedoProblemFlag(tcKeyReq->requestInfo, tQueable);
+  TcKeyReq::setDeferredConstraints(tcKeyReq->requestInfo, tDeferred);
+
   theStatus= WaitResponse;
   theReceiver.prepareSend();
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-04-10 17:32:41 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-04-29 09:23:56 +0000
@@ -494,7 +494,7 @@ ErrorBundle ErrorCodes[] = {
 
   { 1300, DMEC, IE, "Undefined error" },
   { 1301, DMEC, IE, "Backup issued to not master (reissue command to master)" },
-  { 1302, DMEC, IE, "Out of backup record" },
+  { 1302, DMEC, AE, "A backup is already running" },
   { 1303, DMEC, IS, "Out of resources" },
   { 1304, DMEC, IE, "Sequence failure" },
   { 1305, DMEC, IE, "Backup definition not implemented" },

=== modified file 'storage/ndb/test/ndbapi/testIndex.cpp'
--- a/storage/ndb/test/ndbapi/testIndex.cpp	2011-04-14 16:29:07 +0000
+++ b/storage/ndb/test/ndbapi/testIndex.cpp	2011-04-28 07:47:53 +0000
@@ -2843,6 +2843,299 @@ runBug60851(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+static
+const int
+deferred_errors[] = {
+  5064, 0,
+  5065, 0,
+  5066, 0,
+  5067, 0,
+  5068, 0,
+  5069, 0,
+  5070, 0,
+  5071, 0,
+  5072, 1,
+  8090, 0,
+  8091, 0,
+  8092, 2, // connected tc
+  0, 0 // trailer
+};
+
+int
+runTestDeferredError(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter res;
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+
+  const int rows = ctx->getNumRecords();
+
+  const NdbRecord * pRowRecord = pTab->getDefaultRecord();
+  CHK_RET_FAILED(pRowRecord != 0);
+
+  const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord);
+  Uint8 * pRow = new Uint8[len];
+
+  for (int i = 0; deferred_errors[i] != 0; i += 2)
+  {
+    const int errorno = deferred_errors[i];
+    const int nodefail = deferred_errors[i+1];
+
+    for (int j = 0; j<3; j++)
+    {
+      NdbTransaction* pTrans = pNdb->startTransaction();
+      CHK_RET_FAILED(pTrans != 0);
+
+      int nodeId =
+        nodefail == 0 ? 0 :
+        nodefail == 1 ? res.getNode(NdbRestarter::NS_RANDOM) :
+        nodefail == 2 ? pTrans->getConnectedNodeId() :
+        0;
+
+      ndbout_c("errorno: %u(nf: %u - %u) j: %u : %s", errorno,
+               nodefail, nodeId, j,
+               j == 0 ? "test before error insert" :
+               j == 1 ? "test with error insert" :
+               j == 2 ? "test after error insert" :
+               "");
+      if (j == 0 || j == 2)
+      {
+        // First time succeed
+        // Last time succeed
+      }
+      else if (nodefail == 0)
+      {
+        CHK_RET_FAILED(res.insertErrorInAllNodes(errorno) == 0);
+      }
+      else
+      {
+        int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+        CHK_RET_FAILED(res.dumpStateOneNode(nodeId, val2, 2) == 0);
+        CHK_RET_FAILED(res.insertErrorInNode(nodeId, errorno) == 0);
+      }
+
+      for (int rowNo = 0; rowNo < 100; rowNo++)
+      {
+        int rowId = rand() % rows;
+        bzero(pRow, len);
+
+        HugoCalculator calc(* pTab);
+        calc.setValues(pRow, pRowRecord, rowId, rand());
+
+        NdbOperation::OperationOptions opts;
+        bzero(&opts, sizeof(opts));
+        opts.optionsPresent =
+          NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
+
+        const NdbOperation * pOp = pTrans->updateTuple(pRowRecord, (char*)pRow,
+                                                       pRowRecord, (char*)pRow,
+                                                       0,
+                                                       &opts,
+                                                       sizeof(opts));
+        CHK_RET_FAILED(pOp != 0);
+      }
+
+      int result = pTrans->execute(Commit, AO_IgnoreError);
+      if (j == 0 || j == 2)
+      {
+        CHK_RET_FAILED(result == 0);
+      }
+      else
+      {
+        CHK_RET_FAILED(result != 0);
+      }
+      pTrans->close();
+
+
+      if (j == 0 || j == 2)
+      {
+      }
+      else
+      {
+        if (nodefail)
+        {
+          ndbout_c("  waiting for %u to enter not-started", nodeId);
+          // Wait for a node to enter not-started
+          CHK_RET_FAILED(res.waitNodesNoStart(&nodeId, 1) == 0);
+
+          ndbout_c("  starting all");
+          CHK_RET_FAILED(res.startAll() == 0);
+          ndbout_c("  wait cluster started");
+          CHK_RET_FAILED(res.waitClusterStarted() == 0);
+          ndbout_c("  cluster started");
+        }
+        CHK_RET_FAILED(res.insertErrorInAllNodes(0) == 0);
+      }
+    }
+  }
+
+  delete [] pRow;
+
+  return NDBT_OK;
+}
+
+int
+runMixedDML(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+
+  unsigned seed = (unsigned)NdbTick_CurrentMillisecond();
+
+  const int rows = ctx->getNumRecords();
+  const int loops = 10 * ctx->getNumLoops();
+  const int until_stopped = ctx->getProperty("UntilStopped");
+  const int deferred = ctx->getProperty("Deferred");
+  const int batch = ctx->getProperty("Batch", Uint32(50));
+
+  const NdbRecord * pRowRecord = pTab->getDefaultRecord();
+  CHK_RET_FAILED(pRowRecord != 0);
+
+  const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord);
+  Uint8 * pRow = new Uint8[len];
+
+  int count_ok = 0;
+  int count_failed = 0;
+  for (int i = 0; i < loops || (until_stopped && !ctx->isTestStopped()); i++)
+  {
+    NdbTransaction* pTrans = pNdb->startTransaction();
+    CHK_RET_FAILED(pTrans != 0);
+
+    int lastrow = 0;
+    int result = 0;
+    for (int rowNo = 0; rowNo < batch; rowNo++)
+    {
+      int left = rows - lastrow;
+      int rowId = lastrow;
+      if (left)
+      {
+        rowId += ndb_rand_r(&seed) % (left / 10 + 1);
+      }
+      else
+      {
+        break;
+      }
+      lastrow = rowId;
+
+      bzero(pRow, len);
+
+      HugoCalculator calc(* pTab);
+      calc.setValues(pRow, pRowRecord, rowId, rand());
+
+      NdbOperation::OperationOptions opts;
+      bzero(&opts, sizeof(opts));
+      if (deferred)
+      {
+        opts.optionsPresent =
+          NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS;
+      }
+
+      const NdbOperation* pOp = 0;
+      switch(ndb_rand_r(&seed) % 3){
+      case 0:
+        pOp = pTrans->writeTuple(pRowRecord, (char*)pRow,
+                                 pRowRecord, (char*)pRow,
+                                 0,
+                                 &opts,
+                                 sizeof(opts));
+        break;
+      case 1:
+        pOp = pTrans->deleteTuple(pRowRecord, (char*)pRow,
+                                  pRowRecord, (char*)pRow,
+                                  0,
+                                  &opts,
+                                  sizeof(opts));
+        break;
+      case 2:
+        pOp = pTrans->updateTuple(pRowRecord, (char*)pRow,
+                                  pRowRecord, (char*)pRow,
+                                  0,
+                                  &opts,
+                                  sizeof(opts));
+        break;
+      }
+      CHK_RET_FAILED(pOp != 0);
+      result = pTrans->execute(NoCommit, AO_IgnoreError);
+      if (result != 0)
+      {
+        goto found_error;
+      }
+    }
+
+    result = pTrans->execute(Commit, AO_IgnoreError);
+    if (result != 0)
+    {
+  found_error:
+      count_failed++;
+      NdbError err = pTrans->getNdbError();
+      ndbout << err << endl;
+      CHK_RET_FAILED(err.code == 1235 ||
+                     err.code == 1236 ||
+                     err.code == 5066 ||
+                     err.status == NdbError::TemporaryError ||
+                     err.classification == NdbError::NoDataFound ||
+                     err.classification == NdbError::ConstraintViolation);
+    }
+    else
+    {
+      count_ok++;
+    }
+    pTrans->close();
+  }
+
+  ndbout_c("count_ok: %d count_failed: %d",
+           count_ok, count_failed);
+  delete [] pRow;
+
+  return NDBT_OK;
+}
+
+int
+runDeferredError(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter res;
+
+  for (int l = 0; l<ctx->getNumLoops() && !ctx->isTestStopped(); l++)
+  {
+    for (int i = 0; deferred_errors[i] != 0 && !ctx->isTestStopped(); i += 2)
+    {
+      const int errorno = deferred_errors[i];
+      const int nodefail = deferred_errors[i+1];
+
+      int nodeId = res.getNode(NdbRestarter::NS_RANDOM);
+
+      ndbout_c("errorno: %u (nf: %u - %u)",
+               errorno,
+               nodefail, nodeId);
+
+      if (nodefail == 0)
+      {
+        CHK_RET_FAILED(res.insertErrorInNode(nodeId, errorno) == 0);
+        NdbSleep_MilliSleep(300);
+        CHK_RET_FAILED(res.insertErrorInNode(nodeId, errorno) == 0);
+      }
+      else
+      {
+        int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+        CHK_RET_FAILED(res.dumpStateOneNode(nodeId, val2, 2) == 0);
+        CHK_RET_FAILED(res.insertErrorInNode(nodeId, errorno) == 0);
+        ndbout_c("  waiting for %u to enter not-started", nodeId);
+        // Wait for a node to enter not-started
+        CHK_RET_FAILED(res.waitNodesNoStart(&nodeId, 1) == 0);
+
+        ndbout_c("  starting all");
+        CHK_RET_FAILED(res.startAll() == 0);
+        ndbout_c("  wait cluster started");
+        CHK_RET_FAILED(res.waitClusterStarted() == 0);
+        ndbout_c("  cluster started");
+      }
+    }
+  }
+
+  ctx->stopTest();
+  return NDBT_OK;
+}
+
 NDBT_TESTSUITE(testIndex);
 TESTCASE("CreateAll", 
 	 "Test that we can create all various indexes on each table\n"
@@ -3243,6 +3536,66 @@ TESTCASE("FireTrigOverload", ""){
   FINALIZER(runClearError);
   FINALIZER(createRandomIndex_Drop);
 }
+TESTCASE("DeferredError",
+         "Test with deferred unique index handling and error inserts")
+{
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  TC_PROPERTY("OrderedIndex", Uint32(0));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runLoadTable);
+  STEP(runTestDeferredError);
+  FINALIZER(createPkIndex_Drop);
+}
+TESTCASE("DeferredMixedLoad",
+         "Test mixed load of DML with deferred indexes")
+{
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  TC_PROPERTY("OrderedIndex", Uint32(0));
+  TC_PROPERTY("UntilStopped", Uint32(0));
+  TC_PROPERTY("Deferred", Uint32(1));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runLoadTable);
+  STEPS(runMixedDML, 10);
+  FINALIZER(createPkIndex_Drop);
+}
+TESTCASE("DeferredMixedLoadError",
+         "Test mixed load of DML with deferred indexes")
+{
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  TC_PROPERTY("OrderedIndex", Uint32(0));
+  TC_PROPERTY("UntilStopped", Uint32(1));
+  TC_PROPERTY("Deferred", Uint32(1));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runLoadTable);
+  STEPS(runMixedDML, 4);
+  STEP(runDeferredError);
+  FINALIZER(createPkIndex_Drop);
+}
+TESTCASE("NF_DeferredMixed",
+         "Test mixed load of DML with deferred indexes")
+{
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  TC_PROPERTY("OrderedIndex", Uint32(0));
+  TC_PROPERTY("UntilStopped", Uint32(1));
+  TC_PROPERTY("Deferred", Uint32(1));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runLoadTable);
+  STEPS(runMixedDML, 4);
+  STEP(runRestarts);
+  FINALIZER(createPkIndex_Drop);
+}
+TESTCASE("NF_Mixed",
+         "Test mixed load of DML")
+{
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  TC_PROPERTY("OrderedIndex", Uint32(0));
+  TC_PROPERTY("UntilStopped", Uint32(1));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runLoadTable);
+  STEPS(runMixedDML, 4);
+  STEP(runRestarts);
+  FINALIZER(createPkIndex_Drop);
+}
 TESTCASE("Bug56829",
          "Return empty ordered index nodes to index fragment "
          "so that empty fragment pages can be freed"){

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-04-27 08:39:36 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-04-29 09:23:56 +0000
@@ -12,6 +12,26 @@
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+max-time: 900
+cmd: testIndex
+args: -n DeferredError
+
+max-time: 900
+cmd: testIndex
+args: -n DeferredMixedLoad T1 T6 T13
+
+max-time: 900
+cmd: testIndex
+args: -n DeferredMixedLoadError T1 T6 T13
+
+max-time: 900
+cmd: testIndex
+args: -n NF_DeferredMixed T1 T6 T13
+
+max-time: 900
+cmd: testIndex
+args: -n NF_Mixed T1 T6 T13
+
 max-time: 600
 cmd: atrt-testBackup
 args: -n NFMaster T1

No bundle (reason: revision is a merge).
Thread
bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch (jonas:3486) Jonas Oreland29 Apr