List:Commits« Previous MessageNext Message »
From:Jan Wedvik Date:May 26 2011 3:05pm
Subject:bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch
(jan.wedvik:3498)
View as plain text  
#At file:///net/atum17/export/home/tmp/jw159207/mysql/repo/push-scan-scan/ based on revid:jonas@stripped

 3498 Jan Wedvik	2011-05-26 [merge]
      Merge telco-7.0 -> SPJ

    added:
      mysql-test/suite/ndb/r/ndb_dd_bug12581213.result
      mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf
      mysql-test/suite/ndb/t/ndb_dd_bug12581213.test
      storage/ndb/cmake/os/
      storage/ndb/cmake/os/Windows.cmake
      storage/ndb/cmake/os/WindowsCache.cmake
    modified:
      CMakeLists.txt
      configure.in
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/t/ndbinfo.test
      sql/ha_ndbinfo.cc
      sql/ha_ndbinfo.h
      storage/ndb/include/kernel/kernel_types.h
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
      storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
      storage/ndb/include/kernel/signaldata/SumaImpl.hpp
      storage/ndb/include/kernel/signaldata/TupCommit.hpp
      storage/ndb/include/ndb_version.h.in
      storage/ndb/include/ndbapi/NdbEventOperation.hpp
      storage/ndb/include/ndbapi/NdbOperation.hpp
      storage/ndb/include/ndbapi/NdbTransaction.hpp
      storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
      storage/ndb/ndb_configure.cmake
      storage/ndb/ndb_configure.m4
      storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp
      storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
      storage/ndb/src/kernel/blocks/LocalProxy.cpp
      storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
      storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp
      storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
      storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/blocks/suma/Suma.cpp
      storage/ndb/src/mgmsrv/Config.hpp
      storage/ndb/src/ndbapi/NdbEventOperation.cpp
      storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
      storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
      storage/ndb/src/ndbapi/NdbInfo.cpp
      storage/ndb/src/ndbapi/NdbInfo.hpp
      storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp
      storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/src/ndbapi/TransporterFacade.hpp
      storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
      storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/HugoOperations.hpp
      storage/ndb/test/include/HugoTransactions.hpp
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/ndbapi/testIndex.cpp
      storage/ndb/test/ndbapi/testRestartGci.cpp
      storage/ndb/test/ndbapi/test_event.cpp
      storage/ndb/test/run-test/daily-devel-tests.txt
      storage/ndb/test/src/HugoOperations.cpp
      storage/ndb/test/src/HugoTransactions.cpp
      storage/ndb/test/tools/hugoPkUpdate.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndb_config.cpp
      storage/ndb/tools/ndbinfo_sql.cpp
=== modified file 'CMakeLists.txt'
--- a/CMakeLists.txt	2011-05-12 14:13:43 +0000
+++ b/CMakeLists.txt	2011-05-24 11:41:58 +0000
@@ -128,6 +128,14 @@ IF(MSVC)
     # generate map files, set stack size (see bug#20815)
     SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MAP /MAPINFO:EXPORTS")
     SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1048576")
+    IF(NOT MCP_BUG11765145)
+      # Fails to link with error message about missing .map file, turn
+      # off incremental linking to workaround problem 
+      IF(CMAKE_GENERATOR MATCHES "Visual Studio 10")
+        MESSAGE(STATUS "Turning off incremental linking for VS 2010")
+        SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /INCREMENTAL:NO")
+      ENDIF()
+    ENDIF()
 
     # remove support for Exception handling
     STRING(REPLACE "/GX"   "" CMAKE_CXX_FLAGS            ${CMAKE_CXX_FLAGS})
@@ -327,19 +335,3 @@ IF(WITH_EMBEDDED_SERVER) 
 ENDIF(WITH_EMBEDDED_SERVER)
 ADD_SUBDIRECTORY(mysql-test/lib/My/SafeProcess)
 
-# Dump cmake's output and error log to help diagnosing
-# platform checks
-MACRO(DUMP_FILE filename)
-  IF(EXISTS ${filename})
-    FILE(READ ${filename} content)
-    MESSAGE(STATUS "=vvvv= Dumping ${filename} ")
-    MESSAGE(STATUS "${content}")
-    MESSAGE(STATUS "=^^^^=")
-  ELSE()
-    MESSAGE(STATUS "'${filename}' does not exist")
-  ENDIF()
-ENDMACRO()
- 
-DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log")
-DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeOutput.log")
-

=== modified file 'configure.in'
--- a/configure.in	2011-04-26 07:39:21 +0000
+++ b/configure.in	2011-05-24 08:44:31 +0000
@@ -12,7 +12,7 @@ dnl
 dnl When changing the major version number please also check the switch
 dnl statement in mysqlbinlog::check_master_version().  You may also need
 dnl to update version.c in ndb.
-AC_INIT([MySQL Server], [5.1.56-ndb-7.0.25], [], [mysql])
+AC_INIT([MySQL Server], [5.1.56-ndb-7.0.26], [], [mysql])
 
 AC_CONFIG_SRCDIR([sql/mysqld.cc])
 AC_CANONICAL_SYSTEM

=== added file 'mysql-test/suite/ndb/r/ndb_dd_bug12581213.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_bug12581213.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_bug12581213.result	2011-05-23 10:38:41 +0000
@@ -0,0 +1,17 @@
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine ndb;
+drop tablespace ts1
+engine ndb;
+drop logfile group lg1
+engine ndb;

=== modified file 'mysql-test/suite/ndb/r/ndbinfo.result'
--- a/mysql-test/suite/ndb/r/ndbinfo.result	2010-11-03 09:48:25 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo.result	2011-05-23 13:45:57 +0000
@@ -136,6 +136,7 @@ Variable_name	Value
 ndbinfo_database	ndbinfo
 ndbinfo_max_bytes	0
 ndbinfo_max_rows	10
+ndbinfo_offline	OFF
 ndbinfo_show_hidden	OFF
 ndbinfo_table_prefix	ndb$
 ndbinfo_version	NDB_VERSION_D
@@ -179,20 +180,55 @@ node_id != 0
 1
 Warnings:
 Warning	40001	Table 'ndb$test' is defined differently in NDB, there are more columns available. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
-
-## 2) Column does not exist in NDB -> error, with warning
 DROP TABLE ndb$test;
+
+## 2) Column does not exist in NDB -> allowed, with warning, non existing
+##    column(s) return NULL
+## 2a) Extra column at end
 CREATE TABLE ndb$test (node_id int, non_existing int) ENGINE = ndbinfo;
-SELECT * FROM ndb$test;
-ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
-SHOW WARNINGS;
-Level	Code	Message
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+node_id	non_existing
+1	NULL
+2	NULL
+Warnings:
 Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' does not exist. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
-Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
+Warning	40001	Table 'ndb$test' is defined differently in NDB, there are more columns available. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+DROP TABLE ndb$test;
+
+## 2b) Extra column(s) in middle
+CREATE TABLE ndb$test (
+  node_id int unsigned,
+  non_existing int unsigned,
+  block_number int unsigned,
+  block_instance int unsigned,
+  counter int unsigned,
+  counter2 bigint unsigned
+) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing, block_number FROM ndb$test;
+node_id	non_existing	block_number
+1	NULL	249
+2	NULL	249
+Warnings:
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' does not exist. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+DROP TABLE ndb$test;
+
+## 2c) Extra column first
+CREATE TABLE ndb$test (non_existing int, node_id int) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+node_id	non_existing
+1	NULL
+2	NULL
+Warnings:
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' does not exist. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+Warning	40001	Table 'ndb$test' is defined differently in NDB, there are more columns available. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+SELECT DISTINCT non_existing, node_id FROM ndb$test;
+non_existing	node_id
+NULL	1
+NULL	2
+DROP TABLE ndb$test;
 
 ## 3) Incompatible column type -> error, with warning
 ## 3a) int instead of bigint
-DROP TABLE ndb$test;
 CREATE TABLE ndb$test (counter2 int) ENGINE = ndbinfo;
 SELECT * FROM ndb$test;
 ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
@@ -219,6 +255,26 @@ Level	Code	Message
 Error	40001	Table 'ndb$test' is defined differently in NDB, column 'node_id' is not compatible. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
 Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
 DROP TABLE ndb$test;
+## 3d) column which is NOT NULL
+CREATE TABLE ndb$test (node_id int unsigned NOT NULL) ENGINE = ndbinfo;
+SELECT * FROM ndb$test;
+ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
+SHOW WARNINGS;
+Level	Code	Message
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'node_id' is NOT NULL. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
+DROP TABLE ndb$test;
+## 3e) non existing column which is NOT NULL
+CREATE TABLE ndb$test (
+  block_number int unsigned,
+  non_existing int NOT NULL) ENGINE = ndbinfo;
+SELECT * FROM ndb$test;
+ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
+SHOW WARNINGS;
+Level	Code	Message
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' is NOT NULL. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
+DROP TABLE ndb$test;
 
 ## 4) Table with primary key/indexes not supported
 CREATE TABLE ndb$test (node_id int, block_number int PRIMARY KEY) ENGINE = ndbinfo;
@@ -238,3 +294,55 @@ node_id
 1
 2
 
+set @@ndbinfo_offline=1;
+ERROR HY000: Variable 'ndbinfo_offline' is a GLOBAL variable and should be set with SET GLOBAL
+
+SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+node_id
+1
+2
+
+set @@global.ndbinfo_offline=TRUE;
+select @@ndbinfo_offline;
+@@ndbinfo_offline
+1
+
+CREATE TABLE ndb$does_not_exist_in_ndb(
+  node_id int,
+  message varchar(255)
+) ENGINE = ndbinfo;
+
+CREATE VIEW view_on_table_which_does_not_exist_in_ndb AS
+  SELECT node_id, message
+  FROM ndbinfo.ndb$does_not_exist_in_ndb;
+
+SHOW CREATE TABLE ndb$does_not_exist_in_ndb;
+Table	Create Table
+ndb$does_not_exist_in_ndb	CREATE TABLE `ndb$does_not_exist_in_ndb` (
+  `node_id` int(11) DEFAULT NULL,
+  `message` varchar(255) DEFAULT NULL
+) ENGINE=NDBINFO DEFAULT CHARSET=latin1
+
+SELECT * FROM view_on_table_which_does_not_exist_in_ndb;
+node_id	message
+Warnings:
+Note	1	'NDBINFO' has been started in offline mode since the 'NDBCLUSTER' engine is disabled or @@global.ndbinfo_offline is turned on - no rows can be returned
+SELECT * FROM ndb$does_not_exist_in_ndb;
+node_id	message
+Warnings:
+Note	1	'NDBINFO' has been started in offline mode since the 'NDBCLUSTER' engine is disabled or @@global.ndbinfo_offline is turned on - no rows can be returned
+SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+node_id
+Warnings:
+Note	1	'NDBINFO' has been started in offline mode since the 'NDBCLUSTER' engine is disabled or @@global.ndbinfo_offline is turned on - no rows can be returned
+
+DROP VIEW view_on_table_which_does_not_exist_in_ndb;
+DROP TABLE ndb$does_not_exist_in_ndb;
+
+set @@global.ndbinfo_offline = FALSE;
+
+SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+node_id
+1
+2
+

=== added file 'mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf'
--- a/mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf	2011-05-23 10:38:41 +0000
@@ -0,0 +1,7 @@
+!include suite/ndb/my.cnf
+
+[cluster_config.1]
+ndbd=
+NoOfReplicas=1
+MaxNoOfOpenFiles=27
+InitialNoOfOpenFiles=26

=== added file 'mysql-test/suite/ndb/t/ndb_dd_bug12581213.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_bug12581213.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_bug12581213.test	2011-05-23 10:38:41 +0000
@@ -0,0 +1,23 @@
+-- source include/have_ndb.inc
+
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE NDB;
+
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine ndb;
+
+drop tablespace ts1
+engine ndb;
+
+drop logfile group lg1
+engine ndb;

=== modified file 'mysql-test/suite/ndb/t/ndbinfo.test'
--- a/mysql-test/suite/ndb/t/ndbinfo.test	2010-11-03 09:48:25 +0000
+++ b/mysql-test/suite/ndb/t/ndbinfo.test	2011-05-23 13:45:57 +0000
@@ -98,17 +98,35 @@ SELECT count(*) >= 20 FROM blocks;
 DROP TABLE ndb$test;
 CREATE TABLE ndb$test (node_id int unsigned) ENGINE = ndbinfo;
 SELECT node_id != 0 FROM ndb$test LIMIT 1;
-
-## 2) Column does not exist in NDB -> error, with warning
 DROP TABLE ndb$test;
+
+## 2) Column does not exist in NDB -> allowed, with warning, non existing
+##    column(s) return NULL
+## 2a) Extra column at end
 CREATE TABLE ndb$test (node_id int, non_existing int) ENGINE = ndbinfo;
---error ER_GET_ERRMSG
-SELECT * FROM ndb$test;
-SHOW WARNINGS;
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+DROP TABLE ndb$test;
+
+## 2b) Extra column(s) in middle
+CREATE TABLE ndb$test (
+  node_id int unsigned,
+  non_existing int unsigned,
+  block_number int unsigned,
+  block_instance int unsigned,
+  counter int unsigned,
+  counter2 bigint unsigned
+) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing, block_number FROM ndb$test;
+DROP TABLE ndb$test;
+
+## 2c) Extra column first
+CREATE TABLE ndb$test (non_existing int, node_id int) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+SELECT DISTINCT non_existing, node_id FROM ndb$test;
+DROP TABLE ndb$test;
 
 ## 3) Incompatible column type -> error, with warning
 ## 3a) int instead of bigint
-DROP TABLE ndb$test;
 CREATE TABLE ndb$test (counter2 int) ENGINE = ndbinfo;
 --error ER_GET_ERRMSG
 SELECT * FROM ndb$test;
@@ -126,6 +144,21 @@ CREATE TABLE ndb$test (node_id varchar(2
 SELECT * FROM ndb$test;
 SHOW WARNINGS;
 DROP TABLE ndb$test;
+## 3d) column which is NOT NULL
+CREATE TABLE ndb$test (node_id int unsigned NOT NULL) ENGINE = ndbinfo;
+--error ER_GET_ERRMSG
+SELECT * FROM ndb$test;
+SHOW WARNINGS;
+DROP TABLE ndb$test;
+## 3e) non existing column which is NOT NULL
+CREATE TABLE ndb$test (
+  block_number int unsigned,
+  non_existing int NOT NULL) ENGINE = ndbinfo;
+--error ER_GET_ERRMSG
+SELECT * FROM ndb$test;
+SHOW WARNINGS;
+DROP TABLE ndb$test;
+
 
 ## 4) Table with primary key/indexes not supported
 --error ER_TOO_MANY_KEYS
@@ -148,4 +181,52 @@ CREATE TABLE ndb$test (node_id int AUTO_
 select distinct node_id
 from ndbinfo.diskpagebuffer;
 
+
+#
+# BUG#11885602
+# - It was allowed to CREATE TABLE which was not in NDB, but
+#   creating a view on that table failed. Implement ndbinfo_offline
+#   mode which allows tables to be created and opened although they
+#   don't exists or have different table definition.
+#   This is exactly the same behaviour as when NDBCLUSTER
+#   is disabled
+#
+
+# Check ndbinfo_offline is GLOBAL variable
+--error ER_GLOBAL_VARIABLE
+set @@ndbinfo_offline=1;
+
+# Query used to check that open tables are closed
+# when offline mode is turned on and off
+let $q1 = SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+eval $q1;
+
+# Turn on ndbinfo_offline
+set @@global.ndbinfo_offline=TRUE;
+select @@ndbinfo_offline;
+
+CREATE TABLE ndb$does_not_exist_in_ndb(
+  node_id int,
+  message varchar(255)
+) ENGINE = ndbinfo;
+
+CREATE VIEW view_on_table_which_does_not_exist_in_ndb AS
+  SELECT node_id, message
+  FROM ndbinfo.ndb$does_not_exist_in_ndb;
+
+SHOW CREATE TABLE ndb$does_not_exist_in_ndb;
+
+# SELECTs return no rows in offline mode
+SELECT * FROM view_on_table_which_does_not_exist_in_ndb;
+SELECT * FROM ndb$does_not_exist_in_ndb;
+eval $q1;
+
+DROP VIEW view_on_table_which_does_not_exist_in_ndb;
+DROP TABLE ndb$does_not_exist_in_ndb;
+
+# Restore original value
+set @@global.ndbinfo_offline = FALSE;
+
+eval $q1;
+
 --source ndbinfo_drop.inc

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2010-11-10 14:17:13 +0000
+++ b/sql/ha_ndbinfo.cc	2011-05-23 13:45:57 +0000
@@ -56,10 +56,10 @@ static MYSQL_THDVAR_BOOL(
   FALSE                              /* default */
 );
 
-static char* ndbinfo_dbname = (char*)"ndbinfo";
+static char* opt_ndbinfo_dbname = (char*)"ndbinfo";
 static MYSQL_SYSVAR_STR(
   database,                         /* name */
-  ndbinfo_dbname,                   /* var */
+  opt_ndbinfo_dbname,               /* var */
   PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
   "Name of the database used by ndbinfo",
   NULL,                             /* check func. */
@@ -67,10 +67,10 @@ static MYSQL_SYSVAR_STR(
   NULL                              /* default */
 );
 
-static char* table_prefix = (char*)"ndb$";
+static char* opt_ndbinfo_table_prefix = (char*)"ndb$";
 static MYSQL_SYSVAR_STR(
   table_prefix,                     /* name */
-  table_prefix,                     /* var */
+  opt_ndbinfo_table_prefix,         /* var */
   PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
   "Prefix to use for all virtual tables loaded from NDB",
   NULL,                             /* check func. */
@@ -78,10 +78,10 @@ static MYSQL_SYSVAR_STR(
   NULL                              /* default */
 );
 
-static Uint32 version = NDB_VERSION_D;
+static Uint32 opt_ndbinfo_version = NDB_VERSION_D;
 static MYSQL_SYSVAR_UINT(
   version,                          /* name */
-  version,                          /* var */
+  opt_ndbinfo_version,              /* var */
   PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY,
   "Compile version for ndbinfo",
   NULL,                             /* check func. */
@@ -92,6 +92,45 @@ static MYSQL_SYSVAR_UINT(
   0                                 /* block */
 );
 
+static my_bool opt_ndbinfo_offline;
+
+static
+void
+offline_update(THD* thd, struct st_mysql_sys_var* var,
+               void* var_ptr, const void* save)
+{
+  DBUG_ENTER("offline_update");
+
+  const my_bool new_offline =
+    (*(static_cast<const my_bool*>(save)) != 0);
+  if (new_offline == opt_ndbinfo_offline)
+  {
+    // No change
+    DBUG_VOID_RETURN;
+  }
+
+  // Set offline mode, any tables opened from here on will
+  // be opened in the new mode
+  opt_ndbinfo_offline = new_offline;
+
+  // Close any open tables which may be in the old mode
+  (void)close_cached_tables(thd, NULL, false, true, false);
+
+  DBUG_VOID_RETURN;
+}
+
+static MYSQL_SYSVAR_BOOL(
+  offline,                          /* name */
+  opt_ndbinfo_offline,              /* var */
+  PLUGIN_VAR_NOCMDOPT,
+  "Set ndbinfo in offline mode, tables and views can "
+  "be opened even if they don't exist or have different "
+  "definition in NDB. No rows will be returned.",
+  NULL,                             /* check func. */
+  offline_update,                   /* update func. */
+  0                                 /* default */
+);
+
 
 static NdbInfo* g_ndbinfo;
 
@@ -124,10 +163,15 @@ struct ha_ndbinfo_impl
   Vector<const NdbInfoRecAttr *> m_columns;
   bool m_first_use;
 
+  // Indicates if table has been opened in offline mode
+  // can only be reset by closing the table
+  bool m_offline;
+
   ha_ndbinfo_impl() :
     m_table(NULL),
     m_scan_op(NULL),
-    m_first_use(true)
+    m_first_use(true),
+    m_offline(false)
   {
   }
 };
@@ -211,7 +255,7 @@ static void
 generate_sql(const NdbInfo::Table* ndb_tab, BaseString& sql)
 {
   sql.appfmt("'CREATE TABLE `%s`.`%s%s` (",
-             ndbinfo_dbname, table_prefix, ndb_tab->getName());
+             opt_ndbinfo_dbname, opt_ndbinfo_table_prefix, ndb_tab->getName());
 
   const char* separator = "";
   for (unsigned i = 0; i < ndb_tab->columns(); i++)
@@ -265,7 +309,7 @@ warn_incompatible(const NdbInfo::Table* 
 
   msg.assfmt("Table '%s%s' is defined differently in NDB, %s. The "
              "SQL to regenerate is: ",
-             table_prefix, ndb_tab->getName(), explanation);
+             opt_ndbinfo_table_prefix, ndb_tab->getName(), explanation);
   generate_sql(ndb_tab, msg);
 
   const MYSQL_ERROR::enum_warning_level level =
@@ -289,12 +333,18 @@ bool ha_ndbinfo::is_open(void) const
   return m_impl.m_table != NULL;
 }
 
+bool ha_ndbinfo::is_offline(void) const
+{
+  return m_impl.m_offline;
+}
+
 int ha_ndbinfo::open(const char *name, int mode, uint test_if_locked)
 {
   DBUG_ENTER("ha_ndbinfo::open");
   DBUG_PRINT("enter", ("name: %s, mode: %d", name, mode));
 
   assert(is_closed());
+  assert(!is_offline()); // Closed table can not be offline
 
   if (mode == O_RDWR)
   {
@@ -307,9 +357,11 @@ int ha_ndbinfo::open(const char *name, i
     DBUG_ASSERT(false);
   }
 
-  if (ndbcluster_is_disabled())
+  if (opt_ndbinfo_offline ||
+      ndbcluster_is_disabled())
   {
-    // Allow table to be opened with ndbcluster disabled
+    // Mark table as being offline and allow it to be opened
+    m_impl.m_offline = true;
     DBUG_RETURN(0);
   }
 
@@ -321,21 +373,36 @@ int ha_ndbinfo::open(const char *name, i
     DBUG_RETURN(err2mysql(err));
   }
 
+  /*
+    Check table def. to detect incompatible differences which should
+    return an error. Differences which only generate a warning
+    is checked on first use
+  */
   DBUG_PRINT("info", ("Comparing MySQL's table def against NDB"));
   const NdbInfo::Table* ndb_tab = m_impl.m_table;
   for (uint i = 0; i < table->s->fields; i++)
   {
     const Field* field = table->field[i];
-    const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name);
-    if (!col)
+
+    // Check if field is NULLable
+    if (const_cast<Field*>(field)->real_maybe_null() == false)
     {
-      // The column didn't exist
+      // Only NULLable fields supported
       warn_incompatible(ndb_tab, true,
-                        "column '%s' does not exist",
+                        "column '%s' is NOT NULL",
                         field->field_name);
       DBUG_RETURN(ERR_INCOMPAT_TABLE_DEF);
     }
 
+    // Check if column exist in NDB
+    const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name);
+    if (!col)
+    {
+      // The column didn't exist
+      continue;
+    }
+
+    // Check compatible field and column type
     bool compatible = false;
     switch(col->m_type)
     {
@@ -378,7 +445,7 @@ int ha_ndbinfo::close(void)
 {
   DBUG_ENTER("ha_ndbinfo::close");
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
     DBUG_RETURN(0);
 
   assert(is_open());
@@ -395,12 +462,13 @@ int ha_ndbinfo::rnd_init(bool scan)
   DBUG_ENTER("ha_ndbinfo::rnd_init");
   DBUG_PRINT("info", ("scan: %d", scan));
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
   {
     push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, 1,
-                 "'NDBINFO' has been started "
-                 "in limited mode since the 'NDBCLUSTER' "
-                 "engine is disabled - no rows can be returned");
+                 "'NDBINFO' has been started in offline mode "
+                 "since the 'NDBCLUSTER' engine is disabled "
+                 "or @@global.ndbinfo_offline is turned on "
+                 "- no rows can be returned");
     DBUG_RETURN(0);
   }
 
@@ -412,13 +480,30 @@ int ha_ndbinfo::rnd_init(bool scan)
     m_impl.m_first_use = false;
 
     /*
-      Due to different code paths in MySQL Server
-      for prepared statement protocol, some warnings
-      from 'handler::open' are lost and need to be
-      deffered to first use instead
+      Check table def. and generate warnings for incompatibilites
+      which is allowed but should generate a warning.
+      (Done this late due to different code paths in MySQL Server for
+      prepared statement protocol, where warnings from 'handler::open'
+      are lost).
     */
+    uint fields_found_in_ndb = 0;
     const NdbInfo::Table* ndb_tab = m_impl.m_table;
-    if (table->s->fields < ndb_tab->columns())
+    for (uint i = 0; i < table->s->fields; i++)
+    {
+      const Field* field = table->field[i];
+      const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name);
+      if (!col)
+      {
+        // The column didn't exist
+        warn_incompatible(ndb_tab, true,
+                          "column '%s' does not exist",
+                          field->field_name);
+        continue;
+      }
+      fields_found_in_ndb++;
+    }
+
+    if (fields_found_in_ndb < ndb_tab->columns())
     {
       // There are more columns available in NDB
       warn_incompatible(ndb_tab, false,
@@ -466,7 +551,7 @@ int ha_ndbinfo::rnd_end()
 {
   DBUG_ENTER("ha_ndbinfo::rnd_end");
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
     DBUG_RETURN(0);
 
   assert(is_open());
@@ -486,7 +571,7 @@ int ha_ndbinfo::rnd_next(uchar *buf)
   int err;
   DBUG_ENTER("ha_ndbinfo::rnd_next");
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
     DBUG_RETURN(HA_ERR_END_OF_FILE);
 
   assert(is_open());
@@ -546,7 +631,7 @@ ha_ndbinfo::unpack_record(uchar *dst_row
   {
     Field *field = table->field[i];
     const NdbInfoRecAttr* record = m_impl.m_columns[i];
-    if (m_impl.m_columns[i])
+    if (record && !record->isNULL())
     {
       field->set_notnull();
       field->move_field_offset(dst_offset);
@@ -617,7 +702,7 @@ ndbinfo_find_files(handlerton *hton, THD
     List_iterator<LEX_STRING> it(*files);
     while ((dir_name=it++))
     {
-      if (strcmp(dir_name->str, ndbinfo_dbname))
+      if (strcmp(dir_name->str, opt_ndbinfo_dbname))
         continue;
 
       DBUG_PRINT("info", ("Hiding own databse '%s'", dir_name->str));
@@ -628,7 +713,7 @@ ndbinfo_find_files(handlerton *hton, THD
   }
 
   DBUG_ASSERT(db);
-  if (strcmp(db, ndbinfo_dbname))
+  if (strcmp(db, opt_ndbinfo_dbname))
     DBUG_RETURN(0); // Only hide files in "our" db
 
   /* Hide all files that start with "our" prefix */
@@ -636,7 +721,7 @@ ndbinfo_find_files(handlerton *hton, THD
   List_iterator<LEX_STRING> it(*files);
   while ((file_name=it++))
   {
-    if (is_prefix(file_name->str, table_prefix))
+    if (is_prefix(file_name->str, opt_ndbinfo_table_prefix))
     {
       DBUG_PRINT("info", ("Hiding '%s'", file_name->str));
       it.remove();
@@ -668,11 +753,11 @@ int ndbinfo_init(void *plugin)
 
   char prefix[FN_REFLEN];
   build_table_filename(prefix, sizeof(prefix) - 1,
-                       ndbinfo_dbname, table_prefix, "", 0);
+                       opt_ndbinfo_dbname, opt_ndbinfo_table_prefix, "", 0);
   DBUG_PRINT("info", ("prefix: '%s'", prefix));
   assert(g_ndb_cluster_connection);
   g_ndbinfo = new NdbInfo(g_ndb_cluster_connection, prefix,
-                          ndbinfo_dbname, table_prefix);
+                          opt_ndbinfo_dbname, opt_ndbinfo_table_prefix);
   if (!g_ndbinfo)
   {
     sql_print_error("Failed to create NdbInfo");
@@ -712,6 +797,7 @@ struct st_mysql_sys_var* ndbinfo_system_
   MYSQL_SYSVAR(database),
   MYSQL_SYSVAR(table_prefix),
   MYSQL_SYSVAR(version),
+  MYSQL_SYSVAR(offline),
 
   NULL
 };

=== modified file 'sql/ha_ndbinfo.h'
--- a/sql/ha_ndbinfo.h	2011-02-01 14:58:21 +0000
+++ b/sql/ha_ndbinfo.h	2011-05-23 11:57:55 +0000
@@ -83,6 +83,8 @@ private:
   bool is_open(void) const;
   bool is_closed(void) const { return ! is_open(); };
 
+  bool is_offline(void) const;
+
   struct ha_ndbinfo_impl& m_impl;
 
 };

=== added directory 'storage/ndb/cmake/os'
=== added file 'storage/ndb/cmake/os/Windows.cmake'
--- a/storage/ndb/cmake/os/Windows.cmake	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/os/Windows.cmake	2011-05-24 08:45:38 +0000
@@ -0,0 +1,23 @@
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+
+
+# avoid running system checks by using pre-cached check results
+# system checks are expensive on VS since every tiny program is to be compiled in
+# a VC solution.
+GET_FILENAME_COMPONENT(_SCRIPT_DIR ${CMAKE_CURRENT_LIST_FILE} PATH)
+INCLUDE(${_SCRIPT_DIR}/WindowsCache.cmake)
+

=== added file 'storage/ndb/cmake/os/WindowsCache.cmake'
--- a/storage/ndb/cmake/os/WindowsCache.cmake	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/os/WindowsCache.cmake	2011-05-24 08:45:38 +0000
@@ -0,0 +1,66 @@
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+
+# Avoid system checks on Windows by pre-caching  results. Most of the system checks
+# are not relevant for Windows anyway and it takes lot more time to run them,
+# since CMake  to creates a Visual Studio project for each tiny test.
+# Note that values are cached for VC++ only, MinGW would give slightly
+# different results.
+
+
+IF(MSVC)
+SET(HAVE_POSIX_MEMALIGN CACHE INTERNAL "")
+SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "")
+SET(HAVE_PTHREAD_CONDATTR_SETCLOCK CACHE INTERNAL "")
+SET(HAVE_PTHREAD_SELF CACHE INTERNAL "")
+SET(HAVE_SCHED_GET_PRIORITY_MIN CACHE INTERNAL "")
+SET(HAVE_SCHED_GET_PRIORITY_MAX CACHE INTERNAL "")
+SET(HAVE_SCHED_SETAFFINITY CACHE INTERNAL "")
+SET(HAVE_SCHED_SETSCHEDULER CACHE INTERNAL "")
+SET(HAVE_PROCESSOR_BIND CACHE INTERNAL "")
+SET(HAVE_EPOLL_CREATE CACHE INTERNAL "")
+SET(HAVE_MEMALIGN CACHE INTERNAL "")
+SET(HAVE_SYSCONF CACHE INTERNAL "")
+SET(HAVE_DIRECTIO CACHE INTERNAL "")
+SET(HAVE_ATOMIC_SWAP32 CACHE INTERNAL "")
+SET(HAVE_MLOCK CACHE INTERNAL "")
+SET(HAVE_FFS CACHE INTERNAL "")
+SET(HAVE_PTHREAD_MUTEXATTR_INIT CACHE INTERNAL "")
+SET(HAVE_PTHREAD_MUTEXATTR_SETTYPE CACHE INTERNAL "")
+SET(HAVE_PTHREAD_SETSCHEDPARAM CACHE INTERNAL "")
+SET(HAVE_SUN_PREFETCH_H CACHE INTERNAL "")
+SET(HAVE___BUILTIN_FFS CACHE INTERNAL "")
+SET(HAVE__BITSCANFORWARD 1 CACHE INTERNAL "")
+SET(HAVE_LINUX_SCHEDULING CACHE INTERNAL "")
+SET(HAVE_SOLARIS_AFFINITY CACHE INTERNAL "")
+SET(HAVE_LINUX_FUTEX CACHE INTERNAL "")
+SET(HAVE_ATOMIC_H CACHE INTERNAL "")
+
+SET(NDB_SIZEOF_CHAR 1 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_CHAR TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_CHARP ${CMAKE_SIZEOF_VOID_P} CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_CHARP TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_INT 4 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_INT TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_LONG 4 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_LONG TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_LONG_LONG 8 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_LONG_LONG TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_SHORT 2 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_SHORT TRUE CACHE INTERNAL "")
+
+SET(NDB_BUILD_NDBMTD 1 CACHE INTERNAL "")
+ENDIF()

=== modified file 'storage/ndb/include/kernel/kernel_types.h'
--- a/storage/ndb/include/kernel/kernel_types.h	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/include/kernel/kernel_types.h	2011-05-25 13:19:02 +0000
@@ -36,9 +36,7 @@ enum Operation_t {
   ,ZDELETE  = 3
   ,ZWRITE   = 4
   ,ZREAD_EX = 5
-#if 0
-  ,ZREAD_CONSISTENT = 6
-#endif
+  ,ZREFRESH = 6
   ,ZUNLOCK  = 7
 };
 

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2011-04-17 18:25:41 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2011-05-26 15:04:45 +0000
@@ -197,6 +197,7 @@
 
 #define MAX_NDBMT_LQH_WORKERS 4
 #define MAX_NDBMT_LQH_THREADS 4
+#define MAX_NDBMT_TC_THREADS  2
 
 #define NDB_FILE_BUFFER_SIZE (256*1024)
 

=== modified file 'storage/ndb/include/kernel/signaldata/DiGetNodes.hpp'
--- a/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-02-08 14:29:52 +0000
+++ b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-05-26 15:04:45 +0000
@@ -58,10 +58,13 @@ class DiGetNodesReq {
    */
   friend class Dbdih;
 public:
-  STATIC_CONST( SignalLength = 3 );
+  STATIC_CONST( SignalLength = 4 + (sizeof(void*) / sizeof(Uint32)) );
 private:
   Uint32 tableId;
   Uint32 hashValue;
   Uint32 distr_key_indicator;
+  Uint32 unused;
+  Uint32 jamBuffer[2];
 };
+
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp'
--- a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp	2011-05-25 14:31:47 +0000
@@ -57,7 +57,7 @@ class FireTrigOrd {
 public:
   STATIC_CONST( SignalLength = 11 );
   STATIC_CONST( SignalWithGCILength = 9 );
-  STATIC_CONST( SignalLengthSuma = 12 );
+  STATIC_CONST( SignalLengthSuma = 14 );
 
 private:
   Uint32 m_connectionPtr;
@@ -72,15 +72,11 @@ private:
     Uint32 m_gci_hi;
     Uint32 m_triggerType;
   };
-  union {
-    Uint32 m_hashValue;
-    Uint32 m_transId1;
-  };
-  union {
-    Uint32 m_any_value;
-    Uint32 m_transId2;
-  };
+  Uint32 m_transId1;
+  Uint32 m_transId2;
   Uint32 m_gci_lo;
+  Uint32 m_hashValue;
+  Uint32 m_any_value;
   // Public methods
 public:
   Uint32 getConnectionPtr() const;

=== modified file 'storage/ndb/include/kernel/signaldata/SumaImpl.hpp'
--- a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp	2011-05-25 14:31:47 +0000
@@ -306,6 +306,7 @@ struct SubSyncConf {
 struct SubTableData {
   friend bool printSUB_TABLE_DATA(FILE *, const Uint32 *, Uint32, Uint16);
   STATIC_CONST( SignalLength = 8 );
+  STATIC_CONST( SignalLengthWithTransId = 10 );
   SECTION( DICT_TAB_INFO = 0 );
   SECTION( ATTR_INFO = 0 );
   SECTION( AFTER_VALUES = 1 );
@@ -329,6 +330,8 @@ struct SubTableData {
   };
   Uint32 totalLen;
   Uint32 gci_lo;
+  Uint32 transId1;
+  Uint32 transId2;
 
   static void setOperation(Uint32& ri, Uint32 val) { 
     ri = (ri & 0xFFFFFF00) | val;

=== modified file 'storage/ndb/include/kernel/signaldata/TupCommit.hpp'
--- a/storage/ndb/include/kernel/signaldata/TupCommit.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/TupCommit.hpp	2011-05-25 14:31:47 +0000
@@ -38,7 +38,7 @@ class TupCommitReq {
   friend bool printTUPCOMMITREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
 
 public:
-  STATIC_CONST( SignalLength = 5 );
+  STATIC_CONST( SignalLength = 7 );
 
 private:
 
@@ -50,6 +50,8 @@ private:
   Uint32 hashValue;
   Uint32 diskpage;
   Uint32 gci_lo;
+  Uint32 transId1;
+  Uint32 transId2;
 };
 
 #endif

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2011-05-20 05:54:20 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2011-05-26 15:04:45 +0000
@@ -652,4 +652,28 @@ ndb_tup_extrabits(Uint32 x)
   }
 }
 
+#define NDBD_REFRESH_TUPLE_70 NDB_MAKE_VERSION(7,0,26)
+#define NDBD_REFRESH_TUPLE_71 NDB_MAKE_VERSION(7,1,15)
+#define NDBD_REFRESH_TUPLE_72 NDB_MAKE_VERSION(7,2,1)
+
+static
+inline
+int
+ndb_refresh_tuple(Uint32 x)
+{
+  {
+    const Uint32 major = (x >> 16) & 0xFF;
+    const Uint32 minor = (x >>  8) & 0xFF;
+
+    if (major == 7 && minor < 2)
+    {
+      if (minor == 0)
+        return x >= NDBD_REFRESH_TUPLE_70;
+      else if (minor == 1)
+        return x >= NDBD_REFRESH_TUPLE_71;
+    }
+    return x >= NDBD_REFRESH_TUPLE_72;
+  }
+}
+
 #endif

=== modified file 'storage/ndb/include/ndbapi/NdbEventOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbEventOperation.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/ndbapi/NdbEventOperation.hpp	2011-05-25 14:31:47 +0000
@@ -221,6 +221,17 @@ public:
   Uint64 getLatestGCI() const;
 
   /**
+   * Retrieve the TransId of the latest retrieved event
+   *
+   * Only valid for data events.  If the kernel does not
+   * support transaction ids with events, the max Uint64
+   * value is returned.
+   *
+   * @return TransId
+   */
+  Uint64 getTransId() const;
+
+  /**
    * Get the latest error
    *
    * @return   Error object.

=== modified file 'storage/ndb/include/ndbapi/NdbOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp	2011-05-26 15:04:45 +0000
@@ -914,6 +914,7 @@ public:
     DeleteRequest = 3,            ///< Delete Operation
     WriteRequest = 4,             ///< Write Operation
     ReadExclusive = 5,            ///< Read exclusive
+    RefreshRequest = 6,           ///<
     UnlockRequest = 7,            ///< Unlock operation
     OpenScanRequest,              ///< Scan Operation
     OpenRangeScanRequest,         ///< Range scan operation

=== modified file 'storage/ndb/include/ndbapi/NdbTransaction.hpp'
--- a/storage/ndb/include/ndbapi/NdbTransaction.hpp	2011-04-27 11:50:17 +0000
+++ b/storage/ndb/include/ndbapi/NdbTransaction.hpp	2011-05-26 15:04:45 +0000
@@ -752,6 +752,12 @@ public:
                                   const NdbOperation::OperationOptions *opts = 0,
                                   Uint32 sizeOfOptions = 0);
 
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+  const NdbOperation *refreshTuple(const NdbRecord *key_rec, const char *key_row,
+                                   const NdbOperation::OperationOptions *opts = 0,
+                                   Uint32 sizeOfOptions = 0);
+#endif
+
   /**
    * Scan a table, using NdbRecord to read out column data.
    *

=== modified file 'storage/ndb/include/ndbapi/ndb_cluster_connection.hpp'
--- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp	2011-02-04 17:52:38 +0000
+++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp	2011-05-23 14:05:08 +0000
@@ -192,6 +192,7 @@ public:
   unsigned max_nodegroup();
   unsigned node_id();
   unsigned get_connect_count() const;
+  unsigned get_min_db_version() const;
 
   void init_get_next_node(Ndb_cluster_connection_node_iter &iter);
   unsigned int get_next_node(Ndb_cluster_connection_node_iter &iter);

=== modified file 'storage/ndb/ndb_configure.cmake'
--- a/storage/ndb/ndb_configure.cmake	2011-03-15 15:50:34 +0000
+++ b/storage/ndb/ndb_configure.cmake	2011-05-24 08:45:38 +0000
@@ -18,6 +18,26 @@
 #
 # Run platform checks and create ndb_config.h
 #
+
+
+# Include the platform-specific file. To allow exceptions, this code
+# looks for files in order of how specific they are. If there is, for
+# example, a generic Linux.cmake and a version-specific
+# Linux-2.6.28-11-generic, it will pick Linux-2.6.28-11-generic and
+# include it. It is then up to the file writer to include the generic
+# version if necessary.
+FOREACH(_base
+        ${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_VERSION}-${CMAKE_SYSTEM_PROCESSOR}
+        ${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_VERSION}
+        ${CMAKE_SYSTEM_NAME})
+  SET(_file ${CMAKE_CURRENT_SOURCE_DIR}/cmake/os/${_base}.cmake)
+  IF(EXISTS ${_file})
+    INCLUDE(${_file})
+    BREAK()
+  ENDIF()
+ENDFOREACH()
+
+
 INCLUDE(CheckFunctionExists)
 INCLUDE(CheckIncludeFiles)
 INCLUDE(CheckCSourceCompiles)
@@ -31,7 +51,7 @@ CHECK_FUNCTION_EXISTS(pthread_condattr_s
 CHECK_FUNCTION_EXISTS(pthread_self HAVE_PTHREAD_SELF)
 CHECK_FUNCTION_EXISTS(sched_get_priority_min HAVE_SCHED_GET_PRIORITY_MIN)
 CHECK_FUNCTION_EXISTS(sched_get_priority_max HAVE_SCHED_GET_PRIORITY_MAX)
-CHECK_FUNCTION_EXISTS(sched_setaffinity HAVE_SCHED_SETAFFINTIY)
+CHECK_FUNCTION_EXISTS(sched_setaffinity HAVE_SCHED_SETAFFINITY)
 CHECK_FUNCTION_EXISTS(sched_setscheduler HAVE_SCHED_SETSCHEDULER)
 CHECK_FUNCTION_EXISTS(processor_bind HAVE_PROCESSOR_BIND)
 CHECK_FUNCTION_EXISTS(epoll_create HAVE_EPOLL_CREATE)
@@ -153,7 +173,8 @@ IF(WITH_NDBMTD)
     return a;
   }"
   NDB_BUILD_NDBMTD)
-
+ELSE()
+  SET(NDB_BUILD_NDBMTD CACHE INTERNAL "")
 ENDIF()
 
 SET(WITH_NDB_PORT "" CACHE INTEGER

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-05-20 05:54:20 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-05-26 15:04:45 +0000
@@ -2,7 +2,7 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=25
+NDB_VERSION_BUILD=26
 NDB_VERSION_STATUS=""
 
 dnl for build ndb docs

=== modified file 'storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp	2011-05-25 14:31:47 +0000
@@ -182,6 +182,11 @@ printSUB_TABLE_DATA(FILE * output, const
   fprintf(output, " tableId: %x\n", sig->tableId);
   fprintf(output, " operation: %x\n", 
 	  SubTableData::getOperation(sig->requestInfo));
+  if (len == SubTableData::SignalLengthWithTransId)
+  {
+    fprintf(output, " TransId : %x %x\n",
+            sig->transId1, sig->transId2);
+  }
   return false;
 }
 

=== modified file 'storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp	2011-05-26 15:04:45 +0000
@@ -36,6 +36,7 @@ printTCKEYREQ(FILE * output, const Uint3
 	  sig->getOperationType(requestInfo) == ZDELETE  ? "Delete" :
 	  sig->getOperationType(requestInfo) == ZWRITE   ? "Write"  :
           sig->getOperationType(requestInfo) == ZUNLOCK  ? "Unlock" :
+          sig->getOperationType(requestInfo) == ZREFRESH ? "Refresh" :
 	  "Unknown");
   {
     if(sig->getDirtyFlag(requestInfo)){

=== modified file 'storage/ndb/src/kernel/blocks/LocalProxy.cpp'
--- a/storage/ndb/src/kernel/blocks/LocalProxy.cpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/src/kernel/blocks/LocalProxy.cpp	2011-05-26 11:52:38 +0000
@@ -661,6 +661,19 @@ LocalProxy::sendNF_COMPLETEREP(Signal* s
 
     sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP,
                signal, NFCompleteRep::SignalLength, JBB);
+
+    if (number() == DBTC)
+    {
+      /**
+       * DBTC send NF_COMPLETEREP "early" to QMGR
+       *   so that it can allow api to handle node-failure of
+       *   transactions eariler...
+       * See Qmgr::execNF_COMPLETEREP
+       */
+      jam();
+      sendSignal(QMGR_REF, GSN_NF_COMPLETEREP, signal,
+                 NFCompleteRep::SignalLength, JBB);
+    }
   }
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp	2011-05-25 13:19:02 +0000
@@ -140,7 +140,7 @@ ndbout << "Ptr: " << ptr.p->word32 << " 
 /**
  * Check kernel_types for other operation types
  */
-#define ZSCAN_OP 6
+#define ZSCAN_OP 8
 #define ZSCAN_REC_SIZE 256
 #define ZSTAND_BY 2
 #define ZTABLESIZE 16
@@ -642,6 +642,7 @@ public:
   class Dblqh* c_lqh;
 
   void execACCMINUPDATE(Signal* signal);
+  void removerow(Uint32 op, const Local_key*);
 
 private:
   BLOCK_DEFINES(Dbacc);

=== modified file 'storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp	2011-04-20 11:58:16 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp	2011-05-25 13:19:02 +0000
@@ -971,9 +971,12 @@ void Dbacc::initOpRec(Signal* signal) 
   Uint32 readFlag = (((Treqinfo >> 4) & 0x3) == 0);      // Only 1 if Read
   Uint32 dirtyFlag = (((Treqinfo >> 6) & 0x1) == 1);     // Only 1 if Dirty
   Uint32 dirtyReadFlag = readFlag & dirtyFlag;
+  Uint32 operation = Treqinfo & 0xf;
+  if (operation == ZREFRESH)
+    operation = ZWRITE; /* Insert if !exist, otherwise lock */
 
   Uint32 opbits = 0;
-  opbits |= Treqinfo & 0x7;
+  opbits |= operation;
   opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0;
   opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0;
   opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0;
@@ -2323,6 +2326,27 @@ void Dbacc::execACCMINUPDATE(Signal* sig
   ndbrequire(false);
 }//Dbacc::execACCMINUPDATE()
 
+void
+Dbacc::removerow(Uint32 opPtrI, const Local_key* key)
+{
+  jamEntry();
+  operationRecPtr.i = opPtrI;
+  ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+  Uint32 opbits = operationRecPtr.p->m_op_bits;
+  fragrecptr.i = operationRecPtr.p->fragptr;
+
+  /* Mark element disappeared */
+  opbits |= Operationrec::OP_ELEMENT_DISAPPEARED;
+  opbits &= ~Uint32(Operationrec::OP_COMMIT_DELETE_CHECK);
+  operationRecPtr.p->m_op_bits = opbits;
+
+#ifdef VM_TRACE
+  ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+  ndbrequire(operationRecPtr.p->localdata[0] == key->m_page_no);
+  ndbrequire(operationRecPtr.p->localdata[1] == key->m_page_idx);
+#endif
+}//Dbacc::execACCMINUPDATE()
+
 /* ******************--------------------------------------------------------------- */
 /* ACC_COMMITREQ                                        COMMIT  TRANSACTION          */
 /*                                                     SENDER: LQH,    LEVEL B       */
@@ -2371,6 +2395,16 @@ void Dbacc::execACC_COMMITREQ(Signal* si
       }//if
     } else {
       jam();                                                /* EXPAND PROCESS HANDLING */
+      if (unlikely(opbits & Operationrec::OP_ELEMENT_DISAPPEARED))
+      {
+        jam();
+        /* Commit of refresh of non existing tuple.
+         *   ZREFRESH->ZWRITE->ZINSERT
+         * Do not affect element count
+         */
+        ndbrequire((opbits & Operationrec::OP_MASK) == ZINSERT);
+        return;
+      }
       fragrecptr.p->noOfElements++;
       fragrecptr.p->slack -= fragrecptr.p->elementLength;
       if (fragrecptr.p->slack >= (1u << 31)) { 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-05-17 23:29:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-05-24 05:27:15 +0000
@@ -222,10 +222,6 @@ public:
       }
       return false;
     }
-
-    /** Singly linked in internal (attributeId) order */
-    // TODO use DL template when possible to have more than 1
-    Uint32 nextAttributeIdPtrI;
   };
   typedef Ptr<AttributeRecord> AttributeRecordPtr;
   ArrayPool<AttributeRecord> c_attributeRecordPool;

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-05-18 09:07:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-05-26 11:49:47 +0000
@@ -122,7 +122,7 @@ public:
    * ONGOING */
   struct ApiConnectRecord {
     Uint64 apiGci;
-    Uint32 nextApi;
+    Uint32 senderData;
   };
   typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
 
@@ -947,7 +947,6 @@ private:
   bool isMaster();
   bool isActiveMaster();
 
-  void emptyverificbuffer(Signal *, bool aContintueB);
   void handleGcpStateInMaster(Signal *, NodeRecordPtr failedNodeptr);
   void initRestartInfo(Signal*);
   void initRestorableGciFiles();
@@ -1315,21 +1314,25 @@ private:
   struct DIVERIFY_queue
   {
     DIVERIFY_queue() {
-      cfirstVerifyQueue = clastVerifyQueue = RNIL;
-      cverifyQueueCounter = 0;
+      m_ref = 0;
+      cfirstVerifyQueue = clastVerifyQueue = 0;
       apiConnectRecord = 0;
+      m_empty_done = 1;
     }
+    ApiConnectRecord *apiConnectRecord;
     Uint32 cfirstVerifyQueue;
     Uint32 clastVerifyQueue;
-    Uint32 cverifyQueueCounter;
-    ApiConnectRecord *apiConnectRecord;
+    Uint32 m_empty_done;
+    Uint32 m_ref;
   };
 
   bool isEmpty(const DIVERIFY_queue&);
-  void enqueue(DIVERIFY_queue&, Ptr<ApiConnectRecord>);
-  void dequeue(DIVERIFY_queue&, Ptr<ApiConnectRecord> &);
+  void enqueue(DIVERIFY_queue&, Uint32 senderData, Uint64 gci);
+  void dequeue(DIVERIFY_queue&, ApiConnectRecord &);
+  void emptyverificbuffer(Signal *, Uint32 q, bool aContintueB);
+  void emptyverificbuffer_check(Signal*, Uint32, Uint32);
 
-  DIVERIFY_queue c_diverify_queue[1];
+  DIVERIFY_queue c_diverify_queue[MAX_NDBMT_LQH_THREADS];
   Uint32 c_diverify_queue_cnt;
 
   /*------------------------------------------------------------------------*/
@@ -1372,8 +1375,15 @@ private:
    */
   struct MicroGcp
   {
+    MicroGcp() { }
     bool m_enabled;
     Uint32 m_master_ref;
+
+    /**
+     * rw-lock that protects multiple parallel DIVERIFY (readers) from
+     *   updates to gcp-state (e.g GCP_PREPARE, GCP_COMMIT)
+     */
+    NdbSeqLock m_lock;
     Uint64 m_old_gci;
     Uint64 m_current_gci; // Currently active
     Uint64 m_new_gci;     // Currently being prepared...

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-05-20 05:54:20 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-05-26 15:04:45 +0000
@@ -542,7 +542,7 @@ void Dbdih::execCONTINUEB(Signal* signal
     break;
   case DihContinueB::ZEMPTY_VERIFY_QUEUE:
     jam();
-    emptyverificbuffer(signal, true);
+    emptyverificbuffer(signal, signal->theData[1], true);
     return;
     break;
   case DihContinueB::ZCHECK_GCP_STOP:
@@ -1298,6 +1298,8 @@ void Dbdih::execREAD_CONFIG_REQ(Signal* 
   ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_API_CONNECT, 
 					   &capiConnectFileSize),
 		NDBD_EXIT_INVALID_CONFIG);
+  capiConnectFileSize++; // Increase by 1...so that srsw queue never gets full
+
   ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, 
 					   &cfragstoreFileSize),
 		NDBD_EXIT_INVALID_CONFIG);
@@ -3469,6 +3471,12 @@ void Dbdih::execEND_TOREQ(Signal* signal
              EndToConf::SignalLength, JBB);
 }//Dbdih::execEND_TOREQ()
 
+#define DIH_TAB_WRITE_LOCK(tabPtrP) \
+  do { assertOwnThread(); tabPtrP->m_lock.write_lock(); } while (0)
+
+#define DIH_TAB_WRITE_UNLOCK(tabPtrP) \
+  do { assertOwnThread(); tabPtrP->m_lock.write_unlock(); } while (0)
+
 /* --------------------------------------------------------------------------*/
 /*       AN ORDER TO START OR COMMIT THE REPLICA CREATION ARRIVED FROM THE   */
 /*       MASTER.                                                             */
@@ -3509,7 +3517,8 @@ void Dbdih::execCREATE_FRAGREQ(Signal* s
     dump_replica_info(fragPtr.p);
   }
   ndbrequire(frReplicaPtr.i != RNIL);
-  
+
+  DIH_TAB_WRITE_LOCK(tabPtr.p);
   switch (replicaType) {
   case CreateFragReq::STORED:
     jam();
@@ -3544,6 +3553,7 @@ void Dbdih::execCREATE_FRAGREQ(Signal* s
     ndbrequire(false);
     break;
   }//switch
+  DIH_TAB_WRITE_UNLOCK(tabPtr.p);
 
   /* ------------------------------------------------------------------------*/
   /*       THE NEW NODE OF THIS REPLICA IS THE STARTING NODE.                */
@@ -8033,7 +8043,9 @@ Dbdih::sendAddFragreq(Signal* signal, Co
     if (AlterTableReq::getReorgFragFlag(connectPtr.p->m_alter.m_changeMask))
     {
       jam();
+      DIH_TAB_WRITE_LOCK(tabPtr.p);
       tabPtr.p->m_new_map_ptr_i = connectPtr.p->m_alter.m_new_map_ptr_i;
+      DIH_TAB_WRITE_UNLOCK(tabPtr.p);
     }
 
     if (AlterTableReq::getAddFragFlag(connectPtr.p->m_alter.m_changeMask))
@@ -8521,6 +8533,7 @@ void Dbdih::execALTER_TAB_REQ(Signal * s
     if (AlterTableReq::getReorgFragFlag(connectPtr.p->m_alter.m_changeMask))
     {
       jam();
+      DIH_TAB_WRITE_LOCK(tabPtr.p);
       Uint32 save = tabPtr.p->m_map_ptr_i;
       tabPtr.p->m_map_ptr_i = tabPtr.p->m_new_map_ptr_i;
       tabPtr.p->m_new_map_ptr_i = save;
@@ -8532,6 +8545,7 @@ void Dbdih::execALTER_TAB_REQ(Signal * s
         getFragstore(tabPtr.p, i, fragPtr);
         fragPtr.p->distributionKey = (fragPtr.p->distributionKey + 1) & 0xFF;
       }
+      DIH_TAB_WRITE_UNLOCK(tabPtr.p);
 
       ndbassert(tabPtr.p->m_scan_count[1] == 0);
       tabPtr.p->m_scan_count[1] = tabPtr.p->m_scan_count[0];
@@ -8556,8 +8570,10 @@ void Dbdih::execALTER_TAB_REQ(Signal * s
 
     send_alter_tab_conf(signal, connectPtr);
 
+    DIH_TAB_WRITE_LOCK(tabPtr.p);
     tabPtr.p->m_new_map_ptr_i = RNIL;
     tabPtr.p->m_scan_reorg_flag = 0;
+    DIH_TAB_WRITE_UNLOCK(tabPtr.p);
 
     ndbrequire(tabPtr.p->connectrec == connectPtr.i);
     tabPtr.p->connectrec = RNIL;
@@ -9004,7 +9020,7 @@ void Dbdih::execDIGETNODESREQ(Signal* si
   Uint32 fragId, newFragId = RNIL;
   DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
   TabRecord* regTabDesc = tabRecord;
-  EmulatedJamBuffer * jambuf = jamBuffer();
+  EmulatedJamBuffer * jambuf = * (EmulatedJamBuffer**)(req->jamBuffer);
   thrjamEntry(jambuf);
   ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
 
@@ -9015,17 +9031,18 @@ void Dbdih::execDIGETNODESREQ(Signal* si
     ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
   }
 
+loop:
+  Uint32 val = tabPtr.p->m_lock.read_lock();
   Uint32 map_ptr_i = tabPtr.p->m_map_ptr_i;
   Uint32 new_map_ptr_i = tabPtr.p->m_new_map_ptr_i;
 
   /* When distr key indicator is set, regardless
-   * of distribution algorithm in use, hashValue 
+   * of distribution algorithm in use, hashValue
    * IS fragment id.
    */
   if (req->distr_key_indicator)
   {
     fragId = hashValue;
-    
     if (unlikely(fragId >= tabPtr.p->totalfragments))
     {
       thrjam(jambuf);
@@ -9097,6 +9114,9 @@ void Dbdih::execDIGETNODESREQ(Signal* si
       (fragPtr.p->distributionKey << 16) +
       (dihGetInstanceKey(fragPtr) << 24);
   }
+
+  if (unlikely(!tabPtr.p->m_lock.read_unlock(val)))
+    goto loop;
 }//Dbdih::execDIGETNODESREQ()
 
 Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]) 
@@ -9202,62 +9222,71 @@ void Dbdih::initialiseFragstore()
   }//for    
 }//Dbdih::initialiseFragstore()
 
+#ifndef NDB_HAVE_RMB
+#define rmb() do { } while (0)
+#endif
+
+#ifndef NDB_HAVE_WMB
+#define wmb() do { } while (0)
+#endif
+
 inline
 bool
 Dbdih::isEmpty(const DIVERIFY_queue & q)
 {
-  return q.cverifyQueueCounter == 0;
+  return q.cfirstVerifyQueue == q.clastVerifyQueue;
 }
 
 inline
 void
-Dbdih::enqueue(DIVERIFY_queue & q, Ptr<ApiConnectRecord> conRecord)
+Dbdih::enqueue(DIVERIFY_queue & q, Uint32 senderData, Uint64 gci)
 {
+#ifndef NDEBUG
+  /**
+   * - assert only
+   * - we must read first *before* "publishing last
+   *   or else DIH-thread could already have consumed entry
+   *   when we call assert
+   */
   Uint32 first = q.cfirstVerifyQueue;
+#endif
+
   Uint32 last = q.clastVerifyQueue;
-  Uint32 count = q.cverifyQueueCounter;
   ApiConnectRecord * apiConnectRecord = q.apiConnectRecord;
 
-  Ptr<ApiConnectRecord> tmp;
-  tmp.i = last;
-  if (last != RNIL)
-  {
-    tmp.i = last;
-    ptrCheckGuard(tmp, capiConnectFileSize, apiConnectRecord);
-    tmp.p->nextApi = conRecord.i;
+  apiConnectRecord[last].senderData = senderData;
+  apiConnectRecord[last].apiGci = gci;
+  wmb();
+  if (last + 1 == capiConnectFileSize)
+  {
+    q.clastVerifyQueue = 0;
   }
   else
   {
-    ndbassert(count == 0);
-    first = conRecord.i;
+    q.clastVerifyQueue = last + 1;
   }
-  q.cfirstVerifyQueue = first;
-  q.clastVerifyQueue = conRecord.i;
-  q.cverifyQueueCounter = count + 1;
+  assert(q.clastVerifyQueue != first);
 }
 
 inline
 void
-Dbdih::dequeue(DIVERIFY_queue & q, Ptr<ApiConnectRecord> & conRecord)
+Dbdih::dequeue(DIVERIFY_queue & q, ApiConnectRecord & conRecord)
 {
   Uint32 first = q.cfirstVerifyQueue;
-  Uint32 last = q.clastVerifyQueue;
-  Uint32 count = q.cverifyQueueCounter;
   ApiConnectRecord * apiConnectRecord = q.apiConnectRecord;
 
-  conRecord.i = first;
-  ptrCheckGuard(conRecord, capiConnectFileSize, apiConnectRecord);
-  Uint32 next = conRecord.p->nextApi;
-  if (first == last)
-  {
-    ndbrequire(next == RNIL);
-    ndbassert(count == 1);
-    last = RNIL;
-  }
-  ndbrequire(count > 0);
-  q.cfirstVerifyQueue = next;
-  q.clastVerifyQueue = last;
-  q.cverifyQueueCounter = count - 1;
+  rmb();
+  conRecord.senderData = apiConnectRecord[first].senderData;
+  conRecord.apiGci = apiConnectRecord[first].apiGci;
+
+  if (first + 1 == capiConnectFileSize)
+  {
+    q.cfirstVerifyQueue = 0;
+  }
+  else
+  {
+    q.cfirstVerifyQueue = first + 1;
+  }
 }
 
 /*
@@ -9273,10 +9302,15 @@ Dbdih::dequeue(DIVERIFY_queue & q, Ptr<A
   */
 void Dbdih::execDIVERIFYREQ(Signal* signal)
 {
-  EmulatedJamBuffer * jambuf = jamBuffer();
+  EmulatedJamBuffer * jambuf = * (EmulatedJamBuffer**)(signal->theData+2);
   thrjamEntry(jambuf);
-  if ((getBlockCommit() == false) &&
-      isEmpty(c_diverify_queue[0]))
+  Uint32 qno = signal->theData[1];
+  ndbassert(qno < NDB_ARRAY_SIZE(c_diverify_queue));
+  DIVERIFY_queue & q = c_diverify_queue[qno];
+loop:
+  Uint32 val = m_micro_gcp.m_lock.read_lock();
+  Uint32 blocked = getBlockCommit() == true ? 1 : 0;
+  if (blocked == 0 && isEmpty(q))
   {
     thrjam(jambuf);
     /*-----------------------------------------------------------------------*/
@@ -9289,23 +9323,20 @@ void Dbdih::execDIVERIFYREQ(Signal* sign
     signal->theData[1] = (Uint32)(m_micro_gcp.m_current_gci >> 32);
     signal->theData[2] = (Uint32)(m_micro_gcp.m_current_gci & 0xFFFFFFFF);
     signal->theData[3] = 0;
+    if (unlikely(! m_micro_gcp.m_lock.read_unlock(val)))
+      goto loop;
     return;
   }//if
   /*-------------------------------------------------------------------------*/
   // Since we are blocked we need to put this operation last in the verify
   // queue to ensure that operation starts up in the correct order.
   /*-------------------------------------------------------------------------*/
-  ApiConnectRecordPtr localApiConnectptr;
-  DIVERIFY_queue & q = c_diverify_queue[0];
-
-  localApiConnectptr.i = signal->theData[0];
-  ptrCheckGuard(localApiConnectptr, capiConnectFileSize, q.apiConnectRecord);
-  localApiConnectptr.p->apiGci = m_micro_gcp.m_new_gci;
-  localApiConnectptr.p->nextApi = RNIL;
-
-  enqueue(q, localApiConnectptr);
-  emptyverificbuffer(signal, false);
-  signal->theData[3] = 1; // Indicate no immediate return
+  enqueue(q, signal->theData[0], m_micro_gcp.m_new_gci);
+  if (blocked == 0 && jambuf == jamBuffer())
+  {
+    emptyverificbuffer(signal, 0, false);
+  }
+  signal->theData[3] = blocked + 1; // Indicate no immediate return
   return;
 }//Dbdih::execDIVERIFYREQ()
 
@@ -9489,15 +9520,18 @@ Dbdih::execUPGRADE_PROTOCOL_ORD(Signal* 
 }
 
 void
-Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime) 
+Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime)
 {
-  if (! isEmpty(c_diverify_queue[0]))
+  for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
   {
-    // Previous global checkpoint is not yet completed.
-    jam();
-    signal->theData[0] = DihContinueB::ZSTART_GCP;
-    sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
-    return;
+    if (c_diverify_queue[i].m_empty_done == 0)
+    {
+      // Previous global checkpoint is not yet completed.
+      jam();
+      signal->theData[0] = DihContinueB::ZSTART_GCP;
+      sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
+      return;
+    }
   }
 
   emptyWaitGCPMasterQueue(signal,
@@ -10057,10 +10091,12 @@ void Dbdih::execGCP_PREPARE(Signal* sign
   
   ndbrequire(m_micro_gcp.m_state == MicroGcp::M_GCP_IDLE);
 
+  m_micro_gcp.m_lock.write_lock();
   cgckptflag = true;
   m_micro_gcp.m_state = MicroGcp::M_GCP_PREPARE;
   m_micro_gcp.m_new_gci = gci;
   m_micro_gcp.m_master_ref = retRef;
+  m_micro_gcp.m_lock.write_unlock();
 
   if (ERROR_INSERTED(7031))
   {
@@ -10174,10 +10210,18 @@ void Dbdih::execGCP_COMMIT(Signal* signa
   m_micro_gcp.m_state = MicroGcp::M_GCP_COMMIT;
   m_micro_gcp.m_master_ref = calcDihBlockRef(masterNodeId);
   
+  m_micro_gcp.m_lock.write_lock();
   m_micro_gcp.m_old_gci = m_micro_gcp.m_current_gci;
   m_micro_gcp.m_current_gci = gci;
   cgckptflag = false;
-  emptyverificbuffer(signal, true);
+  m_micro_gcp.m_lock.write_unlock();
+
+  for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
+  {
+    jam();
+    c_diverify_queue[i].m_empty_done = 0;
+    emptyverificbuffer(signal, i, true);
+  }
 
   GCPNoMoreTrans* req2 = (GCPNoMoreTrans*)signal->getDataPtrSend();
   req2->senderRef = reference();
@@ -14723,45 +14767,79 @@ void Dbdih::createFileRw(Signal* signal,
   sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
 }//Dbdih::createFileRw()
 
-void Dbdih::emptyverificbuffer(Signal* signal, bool aContinueB)
+void
+Dbdih::emptyverificbuffer(Signal* signal, Uint32 q, bool aContinueB)
 {
-  if (isEmpty(c_diverify_queue[0]))
+  if(unlikely(getBlockCommit() == true))
   {
     jam();
     return;
-  }//if
-  ApiConnectRecordPtr localApiConnectptr;
-  if(getBlockCommit() == false){
+  }
+
+  if (!isEmpty(c_diverify_queue[q]))
+  {
     jam();
-    dequeue(c_diverify_queue[0], localApiConnectptr);
-    ndbrequire(localApiConnectptr.p->apiGci <= m_micro_gcp.m_current_gci);
-    signal->theData[0] = localApiConnectptr.i;
+
+    ApiConnectRecord localApiConnect;
+    dequeue(c_diverify_queue[q], localApiConnect);
+    ndbrequire(localApiConnect.apiGci <= m_micro_gcp.m_current_gci);
+    signal->theData[0] = localApiConnect.senderData;
     signal->theData[1] = (Uint32)(m_micro_gcp.m_current_gci >> 32);
     signal->theData[2] = (Uint32)(m_micro_gcp.m_current_gci & 0xFFFFFFFF);
     signal->theData[3] = 0;
-    sendSignal(clocaltcblockref, GSN_DIVERIFYCONF, signal, 4, JBB);
-    if (aContinueB == true) {
-      jam();
-      //-----------------------------------------------------------------------
-      // This emptying happened as part of a take-out process by continueb signals.
-      // This ensures that we will empty the queue eventually. We will also empty
-      // one item every time we insert one item to ensure that the list doesn't
-      // grow when it is not blocked.
-      //-----------------------------------------------------------------------
-      signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
-      sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
-    }//if
-  } else {
+    sendSignal(c_diverify_queue[q].m_ref, GSN_DIVERIFYCONF, signal, 4, JBB);
+  }
+  else if (aContinueB == true)
+  {
+    jam();
+    /**
+     * Make sure that we don't miss any pending transactions
+     *   (transactions that are added to list by other thread
+     *    while we execute this code)
+     */
+    Uint32 blocks[] = { DBTC, 0 };
+    Callback c = { safe_cast(&Dbdih::emptyverificbuffer_check), q };
+    synchronize_threads_for_blocks(signal, blocks, c);
+    return;
+  }
+
+  if (aContinueB == true)
+  {
     jam();
     //-----------------------------------------------------------------------
-    // We are blocked so it is no use in continuing the emptying of the
-    // verify buffer. Whenever the block is removed the emptying will
-    // restart.
+    // This emptying happened as part of a take-out process by continueb signals
+    // This ensures that we will empty the queue eventually. We will also empty
+    // one item every time we insert one item to ensure that the list doesn't
+    // grow when it is not blocked.
     //-----------------------------------------------------------------------
-  }  
+    signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
+    signal->theData[1] = q;
+    sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+  }//if
+
   return;
 }//Dbdih::emptyverificbuffer()
 
+void
+Dbdih::emptyverificbuffer_check(Signal* signal, Uint32 q, Uint32 retVal)
+{
+  ndbrequire(retVal == 0);
+  if (!isEmpty(c_diverify_queue[q]))
+  {
+    jam();
+    signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
+    signal->theData[1] = q;
+    sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+  }
+  else
+  {
+    /**
+     * Done with emptyverificbuffer
+     */
+    c_diverify_queue[q].m_empty_done = 1;
+  }
+}
+
 /*************************************************************************/
 /*       FIND THE NODES FROM WHICH WE CAN EXECUTE THE LOG TO RESTORE THE */
 /*       DATA NODE IN A SYSTEM RESTART.                                  */
@@ -15456,15 +15534,21 @@ void Dbdih::initialiseRecordsLab(Signal*
   case 1:{
     ApiConnectRecordPtr apiConnectptr;
     jam();
+    c_diverify_queue[0].m_ref = calcTcBlockRef(getOwnNodeId());
     for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
     {
+      if (c_diverify_queue_cnt > 1)
+      {
+        c_diverify_queue[i].m_ref = numberToRef(DBTC, i + 1, 0);
+      }
       /******** INTIALIZING API CONNECT RECORDS ********/
       for (apiConnectptr.i = 0;
            apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++)
       {
         refresh_watch_dog();
         ptrAss(apiConnectptr, c_diverify_queue[i].apiConnectRecord);
-        apiConnectptr.p->nextApi = RNIL;
+        apiConnectptr.p->senderData = RNIL;
+        apiConnectptr.p->apiGci = ~(Uint64)0;
       }//for
     }
     jam();
@@ -17255,10 +17339,11 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal
 	      c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait);
     for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
     {
-      infoEvent("[ %u : cfirstVerifyQueue = 0x%.8x, cverifyQueueCounter = %u ]",
+      infoEvent("[ %u : cfirstVerifyQueue = %u clastVerifyQueue = %u sz: %u]",
                 i,
                 c_diverify_queue[i].cfirstVerifyQueue,
-                c_diverify_queue[i].cverifyQueueCounter);
+                c_diverify_queue[i].clastVerifyQueue,
+                capiConnectFileSize);
     }
     infoEvent("cgcpOrderBlocked = %d",
               cgcpOrderBlocked);
@@ -17942,7 +18027,11 @@ void Dbdih::execUNBLOCK_COMMIT_ORD(Signa
     jam();
     
     c_blockCommit = false;
-    emptyverificbuffer(signal, true);
+    for (Uint32 i = 0; i<c_diverify_queue_cnt; i++)
+    {
+      c_diverify_queue[i].m_empty_done = 0;
+      emptyverificbuffer(signal, i, true);
+    }
   }
 }
 
@@ -18089,11 +18178,15 @@ void Dbdih::execDIH_SWITCH_REPLICA_REQ(S
     sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_REF, signal,
                DihSwitchReplicaRef::SignalLength, JBB);
   }//if
+
+  DIH_TAB_WRITE_LOCK(tabPtr.p);
   for (Uint32 i = 0; i < noOfReplicas; i++) {
     jam();
     ndbrequire(i < MAX_REPLICAS);
     fragPtr.p->activeNodes[i] = req->newNodeOrder[i];
   }//for
+  DIH_TAB_WRITE_UNLOCK(tabPtr.p);
+
   /**
    * Reply
    */

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-05-26 15:04:45 +0000
@@ -3157,8 +3157,10 @@ private:
 
 public:
   bool is_same_trans(Uint32 opId, Uint32 trid1, Uint32 trid2);
-  void get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci_hi, Uint32* gci_lo);
+  void get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci_hi, Uint32* gci_lo,
+                   Uint32* transId1, Uint32* transId2);
   void accminupdate(Signal*, Uint32 opPtrI, const Local_key*);
+  void accremoverow(Signal*, Uint32 opPtrI, const Local_key*);
 
   /**
    *
@@ -3329,7 +3331,8 @@ Dblqh::is_same_trans(Uint32 opId, Uint32
 
 inline
 void
-Dblqh::get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci_hi, Uint32* gci_lo)
+Dblqh::get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci_hi, Uint32* gci_lo,
+                   Uint32* transId1, Uint32* transId2)
 {
   TcConnectionrecPtr regTcPtr;  
   regTcPtr.i= opId;
@@ -3337,6 +3340,8 @@ Dblqh::get_op_info(Uint32 opId, Uint32 *
   *hash = regTcPtr.p->hashValue;
   *gci_hi = regTcPtr.p->gci_hi;
   *gci_lo = regTcPtr.p->gci_lo;
+  *transId1 = regTcPtr.p->transid[0];
+  *transId2 = regTcPtr.p->transid[1];
 }
 
 #include "../dbacc/Dbacc.hpp"
@@ -3368,6 +3373,16 @@ Dblqh::accminupdate(Signal* signal, Uint
 }
 
 inline
+void
+Dblqh::accremoverow(Signal* signal, Uint32 opId, const Local_key* key)
+{
+  TcConnectionrecPtr regTcPtr;
+  regTcPtr.i= opId;
+  ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+  c_acc->removerow(regTcPtr.p->accConnectrec, key);
+}
+
+inline
 bool
 Dblqh::TRACE_OP_CHECK(const TcConnectionrec* regTcPtr)
 {

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-05-20 05:54:20 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-05-26 15:04:45 +0000
@@ -148,6 +148,7 @@ operator<<(NdbOut& out, Operation_t op)
   case ZDELETE: out << "DELETE"; break;
   case ZWRITE: out << "WRITE"; break;
   case ZUNLOCK: out << "UNLOCK"; break;
+  case ZREFRESH: out << "REFRESH"; break;
   }
   return out;
 }
@@ -4533,6 +4534,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
     regTcPtr->lockType = 
       op == ZREAD_EX ? ZUPDATE : 
       (Operation_t) op == ZWRITE ? ZINSERT : 
+      (Operation_t) op == ZREFRESH ? ZINSERT :
       (Operation_t) op == ZUNLOCK ? ZREAD : // lockType not relevant for unlock req
       (Operation_t) op;
   }
@@ -5072,6 +5074,7 @@ void Dblqh::prepareContinueAfterBlockedL
     case ZINSERT: TRACENR("INSERT"); break;
     case ZDELETE: TRACENR("DELETE"); break;
     case ZUNLOCK: TRACENR("UNLOCK"); break;
+    case ZREFRESH: TRACENR("REFRESH"); break;
     default: TRACENR("<Unknown: " << regTcPtr->operation << ">"); break;
     }
     
@@ -5121,7 +5124,6 @@ Dblqh::exec_acckeyreq(Signal* signal, Tc
   Uint32 taccreq;
   regTcPtr.p->transactionState = TcConnectionrec::WAIT_ACC;
   taccreq = regTcPtr.p->operation;
-  taccreq = taccreq + (regTcPtr.p->opSimple << 3);
   taccreq = taccreq + (regTcPtr.p->lockType << 4);
   taccreq = taccreq + (regTcPtr.p->dirtyOp << 6);
   taccreq = taccreq + (regTcPtr.p->replicaType << 7);
@@ -5286,15 +5288,17 @@ Dblqh::handle_nr_copy(Signal* signal, Pt
     if (match)
     {
       jam();
-      if (op != ZDELETE)
+      if (op != ZDELETE && op != ZREFRESH)
       {
 	if (TRACENR_FLAG)
-	  TRACENR(" Changing from to ZWRITE" << endl);
+	  TRACENR(" Changing from INSERT/UPDATE to ZWRITE" << endl);
 	regTcPtr.p->operation = ZWRITE;
       }
       goto run;
     }
-    
+
+    ndbassert(!match && op == ZINSERT);
+
     /**
      * 1) Delete row at specified rowid (if len > 0)
      * 2) Delete specified row at different rowid (if exists)
@@ -6006,7 +6010,7 @@ Dblqh::acckeyconf_tupkeyreq(Signal* sign
   
   TRACE_OP(regTcPtr, "TUPKEYREQ");
   
-  regTcPtr->m_use_rowid |= (op == ZINSERT);
+  regTcPtr->m_use_rowid |= (op == ZINSERT || op == ZREFRESH);
   regTcPtr->m_row_id.m_page_no = page_no;
   regTcPtr->m_row_id.m_page_idx = page_idx;
   
@@ -8066,6 +8070,8 @@ void Dblqh::commitContinueAfterBlockedLa
       tupCommitReq->hashValue = regTcPtr.p->hashValue;
       tupCommitReq->diskpage = RNIL;
       tupCommitReq->gci_lo = regTcPtr.p->gci_lo;
+      tupCommitReq->transId1 = regTcPtr.p->transid[0];
+      tupCommitReq->transId2 = regTcPtr.p->transid[1];
       EXECUTE_DIRECT(tup, GSN_TUP_COMMITREQ, signal, 
 		     TupCommitReq::SignalLength);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-13 08:38:01 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-25 09:31:27 +0000
@@ -3682,10 +3682,11 @@ Dbspj::getNodes(Signal* signal, BuildKey
   req->tableId = tableId;
   req->hashValue = dst.hashInfo[1];
   req->distr_key_indicator = 0; // userDefinedPartitioning not supported!
+  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
 
 #if 1
   EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
-                 DiGetNodesReq::SignalLength);
+                 DiGetNodesReq::SignalLength, 0);
 #else
   sendSignal(DBDIH_REF, GSN_DIGETNODESREQ, signal,
              DiGetNodesReq::SignalLength, JBB);

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-04-29 09:23:56 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-05-26 15:04:45 +0000
@@ -144,6 +144,13 @@
 
 class Dbtc: public SimulatedBlock {
 public:
+
+  /**
+   * Incase of mt-TC...only one instance will perform actual take-over
+   *   let this be TAKE_OVER_INSTANCE
+   */
+  STATIC_CONST( TAKE_OVER_INSTANCE = 1 );
+
   enum ConnectionState {
     CS_CONNECTED = 0,
     CS_DISCONNECTED = 1,
@@ -1694,6 +1701,7 @@ private:
   void checkNodeFailComplete(Signal* signal, Uint32 failedNodeId, Uint32 bit);
 
   void apiFailBlockCleanupCallback(Signal* signal, Uint32 failedNodeId, Uint32 ignoredRc);
+  bool isRefreshSupported() const;
   
   // Initialisation
   void initData();

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-05-04 05:33:14 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-05-26 15:04:45 +0000
@@ -3061,6 +3061,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) 
     case ZINSERT:
     case ZDELETE:
     case ZWRITE:
+    case ZREFRESH:
       jam();
       break;
     default:
@@ -3142,6 +3143,34 @@ handle_reorg_trigger(DiGetNodesConf * co
   }
 }
 
+bool
+Dbtc::isRefreshSupported() const
+{
+  const NodeVersionInfo& nvi = getNodeVersionInfo();
+  const Uint32 minVer = nvi.m_type[NodeInfo::DB].m_min_version;
+  const Uint32 maxVer = nvi.m_type[NodeInfo::DB].m_max_version;
+
+  if (likely (minVer == maxVer))
+  {
+    /* Normal case, use function */
+    return ndb_refresh_tuple(minVer);
+  }
+
+  /* As refresh feature was introduced across three minor versions
+   * we check that all data nodes support it.  This slow path
+   * should only be hit during upgrades between versions
+   */
+  for (Uint32 i=1; i < MAX_NODES; i++)
+  {
+    const NodeInfo& nodeInfo = getNodeInfo(i);
+    if ((nodeInfo.m_type == NODE_TYPE_DB) &&
+        (nodeInfo.m_connected) &&
+        (! ndb_refresh_tuple(nodeInfo.m_version)))
+      return false;
+  }
+  return true;
+}
+
 /**
  * tckeyreq050Lab
  * This method is executed once all KeyInfo has been obtained for
@@ -3188,6 +3217,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal
   req->tableId = Ttableref;
   req->hashValue = TdistrHashValue;
   req->distr_key_indicator = regCachePtr->distributionKeyIndicator;
+  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
 
   /*-------------------------------------------------------------*/
   /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
@@ -3198,7 +3228,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal
   /* IS SPENT IN DIH AND EVEN LESS IN REPLICATED NDB.            */
   /*-------------------------------------------------------------*/
   EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
-                 DiGetNodesReq::SignalLength);
+                 DiGetNodesReq::SignalLength, 0);
   DiGetNodesConf * conf = (DiGetNodesConf *)&signal->theData[0];
   UintR Tdata2 = conf->reqinfo;
   UintR TerrorIndicator = signal->theData[0];
@@ -3367,6 +3397,14 @@ void Dbtc::tckeyreq050Lab(Signal* signal
     TlastReplicaNo = tnoOfBackup + tnoOfStandby;
     regTcPtr->lastReplicaNo = (Uint8)TlastReplicaNo;
     regTcPtr->noOfNodes = (Uint8)(TlastReplicaNo + 1);
+
+    if (unlikely((Toperation == ZREFRESH) &&
+                 (! isRefreshSupported())))
+    {
+      /* Function not implemented yet */
+      TCKEY_abort(signal,63);
+      return;
+    }
   }//if
 
   if (regCachePtr->isLongTcKeyReq || 
@@ -4849,6 +4887,7 @@ void Dbtc::diverify010Lab(Signal* signal
   UintR TfirstfreeApiConnectCopy = cfirstfreeApiConnectCopy;
   ApiConnectRecord * const regApiPtr = apiConnectptr.p;
   signal->theData[0] = apiConnectptr.i;
+  signal->theData[1] = instance() ? instance() - 1 : 0;
   if (ERROR_INSERTED(8022)) {
     jam();
     systemErrorLab(signal, __LINE__);
@@ -4878,7 +4917,9 @@ void Dbtc::diverify010Lab(Signal* signal
        * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE 
        * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
        *---------------------------------------------------------------------*/
-      EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
+      * (EmulatedJamBuffer**)(signal->theData+2) = jamBuffer();
+      EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal,
+                     2 + sizeof(void*)/sizeof(Uint32), 0);
       if (signal->theData[3] == 0) {
         execDIVERIFYCONF(signal);
       }
@@ -8438,11 +8479,23 @@ Dbtc::checkNodeFailComplete(Signal* sign
     nfRep->blockNo      = DBTC;
     nfRep->nodeId       = cownNodeid;
     nfRep->failedNodeId = hostptr.i;
-    sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal, 
-	       NFCompleteRep::SignalLength, JBB);
 
-    sendSignal(QMGR_REF, GSN_NF_COMPLETEREP, signal, 
-	       NFCompleteRep::SignalLength, JBB);
+    if (instance() == 0)
+    {
+      jam();
+      sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
+                 NFCompleteRep::SignalLength, JBB);
+      sendSignal(QMGR_REF, GSN_NF_COMPLETEREP, signal,
+                 NFCompleteRep::SignalLength, JBB);
+    }
+    else
+    {
+      /**
+       * Send to proxy
+       */
+      sendSignal(DBTC_REF, GSN_NF_COMPLETEREP, signal,
+                 NFCompleteRep::SignalLength, JBB);
+    }
   }
 
   CRASH_INSERTION(8058);
@@ -8592,7 +8645,7 @@ Dbtc::checkScanFragList(Signal* signal,
   DEBUG("checkScanActiveInFailedLqh: scanFragError");
 }
 
-void Dbtc::execTAKE_OVERTCCONF(Signal* signal) 
+void Dbtc::execTAKE_OVERTCCONF(Signal* signal)
 {
   jamEntry();
 
@@ -8672,7 +8725,10 @@ void Dbtc::execTAKE_OVERTCREQ(Signal* si
   tcNodeFailptr.i = 0;
   ptrAss(tcNodeFailptr, tcFailRecord);
   if (tcNodeFailptr.p->failStatus != FS_IDLE ||
-      cmasterNodeId != getOwnNodeId())
+      cmasterNodeId != getOwnNodeId() ||
+      (! (instance() == 0 /* single TC */ ||
+          instance() == TAKE_OVER_INSTANCE))) /* in mt-TC case let 1 instance
+                                                 do take-over */
   {
     jam();
     /*------------------------------------------------------------*/
@@ -8687,6 +8743,7 @@ void Dbtc::execTAKE_OVERTCREQ(Signal* si
     tcNodeFailptr.p->queueIndex = tcNodeFailptr.p->queueIndex + 1;
     return;
   }//if
+  ndbrequire(instance() == 0 || instance() == TAKE_OVER_INSTANCE);
   startTakeOverLab(signal);
 }//Dbtc::execTAKE_OVERTCREQ()
 
@@ -10871,9 +10928,9 @@ void Dbtc::execDIH_SCAN_TAB_CONF(Signal*
     req->tableId = tabPtr.i;
     req->hashValue = cachePtr.p->distributionKey;
     req->distr_key_indicator = tabPtr.p->get_user_defined_partitioning();
-
+    * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
     EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
-                   DiGetNodesReq::SignalLength);
+                   DiGetNodesReq::SignalLength, 0);
     UintR TerrorIndicator = signal->theData[0];
     jamEntry();
     if (TerrorIndicator != 0)

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.cpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.cpp	2011-05-26 11:54:16 +0000
@@ -79,6 +79,9 @@ DbtcProxy::DbtcProxy(Block_context& ctx)
   addRecSignal(GSN_DROP_INDX_IMPL_CONF,&DbtcProxy::execDROP_INDX_IMPL_CONF);
   addRecSignal(GSN_DROP_INDX_IMPL_REF, &DbtcProxy::execDROP_INDX_IMPL_REF);
 
+  // GSN_TAKE_OVERTCCONF
+  addRecSignal(GSN_TAKE_OVERTCCONF,&DbtcProxy::execTAKE_OVERTCCONF);
+
   m_tc_seize_req_instance = 0;
 }
 
@@ -500,7 +503,7 @@ DbtcProxy::execTCSEIZEREQ(Signal* signal
     return;
   }
 
-  signal->theData[2] = m_tc_seize_req_instance;
+  signal->theData[2] = 1 + m_tc_seize_req_instance;
   sendSignal(workerRef(m_tc_seize_req_instance), GSN_TCSEIZEREQ, signal,
              signal->getLength(), JBB);
   m_tc_seize_req_instance = (m_tc_seize_req_instance + 1) % c_workers;
@@ -922,4 +925,25 @@ DbtcProxy::sendDROP_INDX_IMPL_CONF(Signa
   ssRelease<Ss_DROP_INDX_IMPL_REQ>(ssId);
 }
 
+void
+DbtcProxy::execTAKE_OVERTCCONF(Signal* signal)
+{
+  jamEntry();
+
+  if (!checkNodeFailSequence(signal))
+  {
+    jam();
+    return;
+  }
+
+  for (Uint32 i = 0; i < c_workers; i++)
+  {
+    jam();
+    Uint32 ref = numberToRef(number(), workerInstance(i), getOwnNodeId());
+    sendSignal(ref, GSN_TAKE_OVERTCCONF, signal,
+               signal->getLength(),
+               JBB);
+  }
+}
+
 BLOCK_FUNCTIONS(DbtcProxy)

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcProxy.hpp	2011-05-26 11:52:38 +0000
@@ -304,6 +304,9 @@ protected:
   void execDROP_INDX_IMPL_CONF(Signal*);
   void execDROP_INDX_IMPL_REF(Signal*);
   void sendDROP_INDX_IMPL_CONF(Signal*, Uint32 ssId);
+
+  // GSN_TAKE_OVERTCCONF
+  void execTAKE_OVERTCCONF(Signal*);
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-20 05:54:20 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-26 15:04:45 +0000
@@ -268,6 +268,7 @@ inline const Uint32* ALIGN_WORD(const vo
 #define ZMUST_BE_ABORTED_ERROR 898
 #define ZTUPLE_DELETED_ERROR 626
 #define ZINSERT_ERROR 630
+#define ZOP_AFTER_REFRESH_ERROR 920
 
 #define ZINVALID_CHAR_FORMAT 744
 #define ZROWID_ALLOCATED 899
@@ -843,6 +844,19 @@ struct Operationrec {
    * version even if in the same transaction.
    */
   Uint16 tupVersion;
+
+  /*
+   * When refreshing a row, there are four scenarios
+   * The actual scenario is encoded in the 'copy tuple location'
+   * to enable special handling at commit time
+   */
+  enum RefreshScenario
+  {
+    RF_SINGLE_NOT_EXIST = 1,    /* Refresh op first in trans, no row */
+    RF_SINGLE_EXIST     = 2,    /* Refresh op first in trans, row exists */
+    RF_MULTI_NOT_EXIST  = 3,    /* Refresh op !first in trans, row deleted */
+    RF_MULTI_EXIST      = 4     /* Refresh op !first in trans, row exists */
+  };
 };
 typedef Ptr<Operationrec> OperationrecPtr;
 
@@ -2080,6 +2094,13 @@ private:
                       KeyReqStruct* req_struct,
 		      bool disk);
 
+  int handleRefreshReq(Signal* signal,
+                       Ptr<Operationrec>,
+                       Ptr<Fragrecord>,
+                       Tablerec*,
+                       KeyReqStruct*,
+                       bool disk);
+
 //------------------------------------------------------------------
 //------------------------------------------------------------------
   int  updateStartLab(Signal* signal,
@@ -3406,6 +3427,8 @@ private:
                            const Dbtup::ScanOp& op);
   void commit_operation(Signal*, Uint32, Uint32, Tuple_header*, PagePtr,
 			Operationrec*, Fragrecord*, Tablerec*);
+  void commit_refresh(Signal*, Uint32, Uint32, Tuple_header*, PagePtr,
+                      KeyReqStruct*, Operationrec*, Fragrecord*, Tablerec*);
   int retrieve_data_page(Signal*,
                          Page_cache_client::Request,
                          OperationrecPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-17 23:29:55 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-25 14:31:47 +0000
@@ -485,12 +485,14 @@ Dbtup::disk_page_commit_callback(Signal*
 {
   Uint32 hash_value;
   Uint32 gci_hi, gci_lo;
+  Uint32 transId1, transId2;
   OperationrecPtr regOperPtr;
 
   jamEntry();
   
   c_operation_pool.getPtr(regOperPtr, opPtrI);
-  c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci_hi, &gci_lo);
+  c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci_hi, &gci_lo,
+                     &transId1, &transId2);
 
   TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr();
   
@@ -499,6 +501,8 @@ Dbtup::disk_page_commit_callback(Signal*
   tupCommitReq->gci_hi= gci_hi;
   tupCommitReq->gci_lo= gci_lo;
   tupCommitReq->diskpage = page_id;
+  tupCommitReq->transId1 = transId1;
+  tupCommitReq->transId2 = transId2;
 
   regOperPtr.p->op_struct.m_load_diskpage_on_commit= 0;
   regOperPtr.p->m_commit_disk_callback_page= page_id;
@@ -526,12 +530,14 @@ Dbtup::disk_page_log_buffer_callback(Sig
 {
   Uint32 hash_value;
   Uint32 gci_hi, gci_lo;
+  Uint32 transId1, transId2;
   OperationrecPtr regOperPtr;
 
   jamEntry();
   
   c_operation_pool.getPtr(regOperPtr, opPtrI);
-  c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci_hi, &gci_lo);
+  c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci_hi, &gci_lo,
+                     &transId1, &transId2);
   Uint32 page= regOperPtr.p->m_commit_disk_callback_page;
 
   TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr();
@@ -541,6 +547,8 @@ Dbtup::disk_page_log_buffer_callback(Sig
   tupCommitReq->gci_hi= gci_hi;
   tupCommitReq->gci_lo= gci_lo;
   tupCommitReq->diskpage = page;
+  tupCommitReq->transId1 = transId1;
+  tupCommitReq->transId2 = transId2;
 
   ndbassert(regOperPtr.p->op_struct.m_load_diskpage_on_commit == 0);
   regOperPtr.p->op_struct.m_wait_log_buffer= 0;
@@ -667,6 +675,8 @@ void Dbtup::execTUP_COMMITREQ(Signal* si
   Uint32 hash_value= tupCommitReq->hashValue;
   Uint32 gci_hi = tupCommitReq->gci_hi;
   Uint32 gci_lo = tupCommitReq->gci_lo;
+  Uint32 transId1 = tupCommitReq->transId1;
+  Uint32 transId2 = tupCommitReq->transId2;
 
   jamEntry();
 
@@ -687,6 +697,9 @@ void Dbtup::execTUP_COMMITREQ(Signal* si
   req_struct.hash_value= hash_value;
   req_struct.gci_hi = gci_hi;
   req_struct.gci_lo = gci_lo;
+  /* Put transid in req_struct, so detached triggers can access it */
+  req_struct.trans_id1 = transId1;
+  req_struct.trans_id2 = transId2;
   regOperPtr.p->m_commit_disk_callback_page = tupCommitReq->diskpage;
 
 #ifdef VM_TRACE
@@ -849,7 +862,15 @@ skip_disk:
     
     tuple_ptr->m_operation_ptr_i = RNIL;
     
-    if(regOperPtr.p->op_struct.op_type != ZDELETE)
+    if (regOperPtr.p->op_struct.op_type == ZDELETE)
+    {
+      jam();
+      if (get_page)
+        ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
+      dealloc_tuple(signal, gci_hi, gci_lo, page.p, tuple_ptr,
+                    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+    }
+    else if(regOperPtr.p->op_struct.op_type != ZREFRESH)
     {
       jam();
       commit_operation(signal, gci_hi, gci_lo, tuple_ptr, page,
@@ -858,14 +879,10 @@ skip_disk:
     else
     {
       jam();
-      if (get_page)
-      {
-	ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
-      }
-      dealloc_tuple(signal, gci_hi, gci_lo, page.p, tuple_ptr,
-		    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+      commit_refresh(signal, gci_hi, gci_lo, tuple_ptr, page,
+                     &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
     }
-  } 
+  }
 
   if (nextOp != RNIL)
   {
@@ -917,3 +934,48 @@ Dbtup::set_commit_change_mask_info(const
     }
   }
 }
+
+void
+Dbtup::commit_refresh(Signal* signal,
+                      Uint32 gci_hi,
+                      Uint32 gci_lo,
+                      Tuple_header* tuple_ptr,
+                      PagePtr pagePtr,
+                      KeyReqStruct * req_struct,
+                      Operationrec* regOperPtr,
+                      Fragrecord* regFragPtr,
+                      Tablerec* regTabPtr)
+{
+  /* Committing a refresh operation.
+   * Refresh of an existing row looks like an update
+   * and can commit normally.
+   * Refresh of a non-existing row looks like an Insert which
+   * is 'undone' at commit time.
+   * This is achieved by making special calls to ACC to get
+   * it to forget, before deallocating the tuple locally.
+   */
+  switch(regOperPtr->m_copy_tuple_location.m_file_no){
+  case Operationrec::RF_SINGLE_NOT_EXIST:
+  case Operationrec::RF_MULTI_NOT_EXIST:
+    break;
+  case Operationrec::RF_SINGLE_EXIST:
+  case Operationrec::RF_MULTI_EXIST:
+    // "Normal" update
+    commit_operation(signal, gci_hi, gci_lo, tuple_ptr, pagePtr,
+                     regOperPtr, regFragPtr, regTabPtr);
+    return;
+
+  default:
+    ndbrequire(false);
+  }
+
+  Local_key key = regOperPtr->m_tuple_location;
+  key.m_page_no = pagePtr.p->frag_page_id;
+
+  /**
+   * Tell ACC to delete
+   */
+  c_lqh->accremoverow(signal, regOperPtr->userpointer, &key);
+  dealloc_tuple(signal, gci_hi, gci_lo, pagePtr.p, tuple_ptr,
+                req_struct, regOperPtr, regFragPtr, regTabPtr);
+}

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-20 05:54:20 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-26 15:04:45 +0000
@@ -214,7 +214,14 @@ Dbtup::insertActiveOpList(OperationrecPt
 	  prevOpPtr.p->op_struct.delete_insert_flag= true;
 	  regOperPtr.p->op_struct.delete_insert_flag= true;
 	  return true;
-	} else {
+	}
+        else if (op == ZREFRESH)
+        {
+          /* ZREFRESH after Delete - ok */
+          return true;
+        }
+        else
+        {
 	  terrorCode= ZTUPLE_DELETED_ERROR;
 	  return false;
 	}
@@ -224,6 +231,12 @@ Dbtup::insertActiveOpList(OperationrecPt
 	terrorCode= ZINSERT_ERROR;
 	return false;
       }
+      else if (prevOp == ZREFRESH)
+      {
+        /* No operation after a ZREFRESH */
+        terrorCode= ZOP_AFTER_REFRESH_ERROR;
+        return false;
+      }
       return true;
     }
     else
@@ -283,21 +296,39 @@ Dbtup::setup_read(KeyReqStruct *req_stru
       dirty= false;
     }
 
+    /* found == true indicates that savepoint is some state
+     * within tuple's current transaction's uncommitted operations
+     */
     bool found= find_savepoint(currOpPtr, savepointId);
     
     Uint32 currOp= currOpPtr.p->op_struct.op_type;
     
+    /* is_insert==true if tuple did not exist before its current
+     * transaction
+     */
     bool is_insert = (bits & Tuple_header::ALLOC);
+
+    /* If savepoint is in transaction, and post-delete-op
+     *   OR
+     * Tuple didn't exist before
+     *      AND
+     *   Read is dirty
+     *           OR
+     *   Savepoint is before-transaction
+     *
+     * Tuple does not exist in read's view
+     */
     if((found && currOp == ZDELETE) || 
        ((dirty || !found) && is_insert))
     {
+      /* Tuple not visible to this read operation */
       terrorCode= ZTUPLE_DELETED_ERROR;
       break;
     }
     
     if(dirty || !found)
     {
-      
+      /* Read existing committed tuple */
     }
     else
     {
@@ -351,6 +382,17 @@ Dbtup::load_diskpage(Signal* signal,
     jam();
     regOperPtr->op_struct.m_wait_log_buffer= 1;
     regOperPtr->op_struct.m_load_diskpage_on_commit= 1;
+    if (unlikely((flags & 7) == ZREFRESH))
+    {
+      jam();
+      /* Refresh of previously nonexistant DD tuple.
+       * No diskpage to load at commit time
+       */
+      regOperPtr->op_struct.m_wait_log_buffer= 0;
+      regOperPtr->op_struct.m_load_diskpage_on_commit= 0;
+    }
+
+    /* In either case return 1 for 'proceed' */
     return 1;
   }
   
@@ -410,6 +452,7 @@ Dbtup::load_diskpage(Signal* signal,
   case ZUPDATE:
   case ZINSERT:
   case ZWRITE:
+  case ZREFRESH:
     regOperPtr->op_struct.m_wait_log_buffer= 1;
     regOperPtr->op_struct.m_load_diskpage_on_commit= 1;
   }
@@ -556,7 +599,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal
    Uint32 Rstoredid= tupKeyReq->storedProcedure;
 
    regOperPtr->fragmentPtr= Rfragptr;
-   regOperPtr->op_struct.op_type= (TrequestInfo >> 6) & 0xf;
+   regOperPtr->op_struct.op_type= (TrequestInfo >> 6) & 0x7;
    regOperPtr->op_struct.delete_insert_flag = false;
    regOperPtr->op_struct.m_reorg = (TrequestInfo >> 12) & 3;
 
@@ -635,10 +678,16 @@ void Dbtup::execTUPKEYREQ(Signal* signal
 
    if (Roptype == ZINSERT && Local_key::isInvalid(pageid, pageidx))
    {
-     // No tuple allocatated yet
+     // No tuple allocated yet
      goto do_insert;
    }
 
+   if (Roptype == ZREFRESH && Local_key::isInvalid(pageid, pageidx))
+   {
+     // No tuple allocated yet
+     goto do_refresh;
+   }
+
    if (unlikely(isCopyTuple(pageid, pageidx)))
    {
      /**
@@ -832,6 +881,23 @@ void Dbtup::execTUPKEYREQ(Signal* signal
        sendTUPKEYCONF(signal, &req_struct, regOperPtr);
        return;
      }
+     else if (Roptype == ZREFRESH)
+     {
+       /**
+        * No TUX or immediate triggers, just detached triggers
+        */
+   do_refresh:
+       if (unlikely(handleRefreshReq(signal, operPtr,
+                                     fragptr, regTabPtr,
+                                     &req_struct, disk_page != RNIL) == -1))
+       {
+         return;
+       }
+
+       sendTUPKEYCONF(signal, &req_struct, regOperPtr);
+       return;
+
+     }
      else
      {
        ndbrequire(false); // Invalid op type
@@ -2055,6 +2121,197 @@ error:
   return -1;
 }
 
+int
+Dbtup::handleRefreshReq(Signal* signal,
+                        Ptr<Operationrec> regOperPtr,
+                        Ptr<Fragrecord>  regFragPtr,
+                        Tablerec* regTabPtr,
+                        KeyReqStruct *req_struct,
+                        bool disk)
+{
+  /* Here we setup the tuple so that a transition to its current
+   * state can be observed by SUMA's detached triggers.
+   *
+   * If the tuple does not exist then we fabricate a tuple
+   * so that it can appear to be 'deleted'.
+   *   The fabricated tuple may have invalid NULL values etc.
+   * If the tuple does exist then we fabricate a null-change
+   * update to the tuple.
+   *
+   * The logic differs depending on whether there are already
+   * other operations on the tuple in this transaction.
+   * No other operations (including Refresh) are allowed after
+   * a refresh.
+   */
+  Uint32 refresh_case;
+  if (regOperPtr.p->is_first_operation())
+  {
+    jam();
+    if (Local_key::isInvalid(req_struct->frag_page_id,
+                             regOperPtr.p->m_tuple_location.m_page_idx))
+    {
+      jam();
+      refresh_case = Operationrec::RF_SINGLE_NOT_EXIST;
+      //ndbout_c("case 1");
+      /**
+       * This is refresh of non-existing tuple...
+       *   i.e "delete", reuse initial insert
+       */
+       Local_key accminupdate;
+       Local_key * accminupdateptr = &accminupdate;
+
+       /**
+        * We don't need ...in this scenario
+        * - disk
+        * - default values
+        */
+       Uint32 save_disk = regTabPtr->m_no_of_disk_attributes;
+       Local_key save_defaults = regTabPtr->m_default_value_location;
+       Bitmask<MAXNROFATTRIBUTESINWORDS> save_mask =
+         regTabPtr->notNullAttributeMask;
+
+       regTabPtr->m_no_of_disk_attributes = 0;
+       regTabPtr->m_default_value_location.setNull();
+       regOperPtr.p->op_struct.op_type = ZINSERT;
+
+       /**
+        * Update notNullAttributeMask  to only include primary keys
+        */
+       regTabPtr->notNullAttributeMask.clear();
+       const Uint32 * primarykeys =
+         (Uint32*)&tableDescriptor[regTabPtr->readKeyArray].tabDescr;
+       for (Uint32 i = 0; i<regTabPtr->noOfKeyAttr; i++)
+         regTabPtr->notNullAttributeMask.set(primarykeys[i] >> 16);
+
+       int res = handleInsertReq(signal, regOperPtr,
+                                 regFragPtr, regTabPtr, req_struct,
+                                 &accminupdateptr);
+
+       regTabPtr->m_no_of_disk_attributes = save_disk;
+       regTabPtr->m_default_value_location = save_defaults;
+       regTabPtr->notNullAttributeMask = save_mask;
+
+       if (unlikely(res == -1))
+       {
+         return -1;
+       }
+
+       regOperPtr.p->op_struct.op_type = ZREFRESH;
+
+       if (accminupdateptr)
+       {
+       /**
+          * Update ACC local-key, once *everything* has completed succesfully
+          */
+         c_lqh->accminupdate(signal,
+                             regOperPtr.p->userpointer,
+                             accminupdateptr);
+       }
+    }
+    else
+    {
+      refresh_case = Operationrec::RF_SINGLE_EXIST;
+      //ndbout_c("case 2");
+      jam();
+
+      Uint32 tup_version_save = req_struct->m_tuple_ptr->get_tuple_version();
+      Uint32 new_tup_version = decr_tup_version(tup_version_save);
+      Tuple_header* origTuple = req_struct->m_tuple_ptr;
+      origTuple->set_tuple_version(new_tup_version);
+      int res = handleUpdateReq(signal, regOperPtr.p, regFragPtr.p,
+                                regTabPtr, req_struct, disk);
+      /* Now we must reset the original tuple header back
+       * to the original version.
+       * The copy tuple will have the correct version due to
+       * the update incrementing it.
+       * On commit, the tuple becomes the copy tuple.
+       * On abort, the original tuple remains.  If we don't
+       * reset it here, then aborts cause the version to
+       * decrease
+       */
+      origTuple->set_tuple_version(tup_version_save);
+      if (res == -1)
+        return -1;
+    }
+  }
+  else
+  {
+    /* Not first operation on tuple in transaction */
+    jam();
+
+    Uint32 tup_version_save = req_struct->prevOpPtr.p->tupVersion;
+    Uint32 new_tup_version = decr_tup_version(tup_version_save);
+    req_struct->prevOpPtr.p->tupVersion = new_tup_version;
+
+    int res;
+    if (req_struct->prevOpPtr.p->op_struct.op_type == ZDELETE)
+    {
+      refresh_case = Operationrec::RF_MULTI_NOT_EXIST;
+      //ndbout_c("case 3");
+
+      jam();
+      /**
+       * We don't need ...in this scenario
+       * - default values
+       *
+       * We keep disk attributes to avoid issues with 'insert'
+       */
+      Local_key save_defaults = regTabPtr->m_default_value_location;
+      Bitmask<MAXNROFATTRIBUTESINWORDS> save_mask =
+        regTabPtr->notNullAttributeMask;
+
+      regTabPtr->m_default_value_location.setNull();
+      regOperPtr.p->op_struct.op_type = ZINSERT;
+
+      /**
+       * Update notNullAttributeMask  to only include primary keys
+       */
+      regTabPtr->notNullAttributeMask.clear();
+      const Uint32 * primarykeys =
+        (Uint32*)&tableDescriptor[regTabPtr->readKeyArray].tabDescr;
+      for (Uint32 i = 0; i<regTabPtr->noOfKeyAttr; i++)
+        regTabPtr->notNullAttributeMask.set(primarykeys[i] >> 16);
+
+      /**
+       * This is multi-update + DELETE + REFRESH
+       */
+      Local_key * accminupdateptr = 0;
+      res = handleInsertReq(signal, regOperPtr,
+                            regFragPtr, regTabPtr, req_struct,
+                            &accminupdateptr);
+
+      regTabPtr->m_default_value_location = save_defaults;
+      regTabPtr->notNullAttributeMask = save_mask;
+
+      if (unlikely(res == -1))
+      {
+        return -1;
+      }
+
+      regOperPtr.p->op_struct.op_type = ZREFRESH;
+    }
+    else
+    {
+      jam();
+      refresh_case = Operationrec::RF_MULTI_EXIST;
+      //ndbout_c("case 4");
+      /**
+       * This is multi-update + INSERT/UPDATE + REFRESH
+       */
+      res = handleUpdateReq(signal, regOperPtr.p, regFragPtr.p,
+                            regTabPtr, req_struct, disk);
+    }
+    req_struct->prevOpPtr.p->tupVersion = tup_version_save;
+    if (res == -1)
+      return -1;
+  }
+
+  /* Store the refresh scenario in the copy tuple location */
+  // TODO : Verify this is never used as a copy tuple location!
+  regOperPtr.p->m_copy_tuple_location.m_file_no = refresh_case;
+  return 0;
+}
+
 bool
 Dbtup::checkNullAttributes(KeyReqStruct * req_struct,
                            Tablerec* regTabPtr)

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-05-25 14:31:47 +0000
@@ -863,6 +863,7 @@ void Dbtup::checkDetachedTriggers(KeyReq
   switch (save_type) {
   case ZUPDATE:
   case ZINSERT:
+  case ZREFRESH:
     req_struct->m_tuple_ptr =get_copy_tuple(&regOperPtr->m_copy_tuple_location);
     break;
   }
@@ -878,7 +879,10 @@ void Dbtup::checkDetachedTriggers(KeyReq
       return;
       goto end;
     }
-    regOperPtr->op_struct.op_type = ZINSERT;
+    else if (save_type != ZREFRESH)
+    {
+      regOperPtr->op_struct.op_type = ZINSERT;
+    }
   }
   else if (save_type == ZINSERT) {
     /**
@@ -930,6 +934,29 @@ void Dbtup::checkDetachedTriggers(KeyReq
                          regTablePtr->subscriptionUpdateTriggers, 
                          regOperPtr, disk);
     break;
+  case ZREFRESH:
+    jam();
+    /* Depending on the Refresh scenario, fire Delete or Insert
+     * triggers to simulate the effect of arriving at the tuple's
+     * current state.
+     */
+    switch(regOperPtr->m_copy_tuple_location.m_file_no){
+    case Operationrec::RF_SINGLE_NOT_EXIST:
+    case Operationrec::RF_MULTI_NOT_EXIST:
+      fireDetachedTriggers(req_struct,
+                           regTablePtr->subscriptionDeleteTriggers,
+                           regOperPtr, disk);
+      break;
+    case Operationrec::RF_SINGLE_EXIST:
+    case Operationrec::RF_MULTI_EXIST:
+      fireDetachedTriggers(req_struct,
+                           regTablePtr->subscriptionInsertTriggers,
+                           regOperPtr, disk);
+      break;
+    default:
+      ndbrequire(false);
+    }
+    break;
   default:
     ndbrequire(false);
     break;
@@ -1375,12 +1402,14 @@ out:
 
     switch(regOperPtr->op_struct.op_type) {
     case(ZINSERT):
+    is_insert:
       jam();
       // Send AttrInfo signals with new attribute values
       trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
       sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
       break;
     case(ZDELETE):
+    is_delete:
       if (trigPtr->sendBeforeValues) {
         jam();
         trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
@@ -1397,6 +1426,23 @@ out:
       trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
       sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
       break;
+    case ZREFRESH:
+      jam();
+      /* Reuse Insert/Delete trigger firing code as necessary */
+      switch(regOperPtr->m_copy_tuple_location.m_file_no){
+      case Operationrec::RF_SINGLE_NOT_EXIST:
+        jam();
+      case Operationrec::RF_MULTI_NOT_EXIST:
+        jam();
+        goto is_delete;
+      case Operationrec::RF_SINGLE_EXIST:
+        jam();
+      case Operationrec::RF_MULTI_EXIST:
+        jam();
+        goto is_insert;
+      default:
+        ndbrequire(false);
+      }
     default:
       ndbrequire(false);
     }
@@ -1424,6 +1470,25 @@ out:
     jam();
     fireTrigOrd->m_triggerEvent = TriggerEvent::TE_DELETE;
     break;
+  case ZREFRESH:
+    jam();
+    switch(regOperPtr->m_copy_tuple_location.m_file_no){
+    case Operationrec::RF_SINGLE_NOT_EXIST:
+      jam();
+    case Operationrec::RF_MULTI_NOT_EXIST:
+      jam();
+      fireTrigOrd->m_triggerEvent = TriggerEvent::TE_DELETE;
+      break;
+    case Operationrec::RF_SINGLE_EXIST:
+      jam();
+    case Operationrec::RF_MULTI_EXIST:
+      jam();
+      fireTrigOrd->m_triggerEvent = TriggerEvent::TE_INSERT;
+      break;
+    default:
+      ndbrequire(false);
+    }
+    break;
   default:
     ndbrequire(false);
     break;
@@ -1445,6 +1510,8 @@ out:
     break;
   case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
     jam();
+    fireTrigOrd->m_transId1 = req_struct->trans_id1;
+    fireTrigOrd->m_transId2 = req_struct->trans_id2;
     fireTrigOrd->setGCI(req_struct->gci_hi);
     fireTrigOrd->setHashValue(req_struct->hash_value);
     fireTrigOrd->m_any_value = regOperPtr->m_any_value;
@@ -1615,7 +1682,7 @@ bool Dbtup::readTriggerInfo(TupTriggerDa
 // Delete without sending before values only read Primary Key
 //--------------------------------------------------------------------
     return true;
-  } else {
+  } else if (regOperPtr->op_struct.op_type != ZREFRESH){
     jam();
 //--------------------------------------------------------------------
 // All others send all attributes that are monitored, except:
@@ -1632,6 +1699,27 @@ bool Dbtup::readTriggerInfo(TupTriggerDa
     numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes,
                                 &readBuffer[0]);
   }
+  else
+  {
+    jam();
+    ndbassert(regOperPtr->op_struct.op_type == ZREFRESH);
+    /* Refresh specific before/after value hacks */
+    switch(regOperPtr->m_copy_tuple_location.m_file_no){
+    case Operationrec::RF_SINGLE_NOT_EXIST:
+    case Operationrec::RF_MULTI_NOT_EXIST:
+      return true; // generate ZDELETE...no before values
+    case Operationrec::RF_SINGLE_EXIST:
+    case Operationrec::RF_MULTI_EXIST:
+      // generate ZINSERT...all after values
+      numAttrsToRead = setAttrIds(trigPtr->attributeMask,
+                                  regTabPtr->m_no_of_attributes,
+                                  &readBuffer[0]);
+      break;
+    default:
+      ndbrequire(false);
+    }
+  }
+
   ndbrequire(numAttrsToRead <= MAX_ATTRIBUTES_IN_TABLE);
 //--------------------------------------------------------------------
 // Read Main tuple values
@@ -1875,6 +1963,9 @@ Dbtup::executeTuxCommitTriggers(Signal* 
       return;
     jam();
     tupVersion= regOperPtr->tupVersion;
+  } else if (regOperPtr->op_struct.op_type == ZREFRESH) {
+    /* Refresh should not affect TUX */
+    return;
   } else {
     ndbrequire(false);
     tupVersion= 0; // remove warning
@@ -1907,6 +1998,10 @@ Dbtup::executeTuxAbortTriggers(Signal* s
   } else if (regOperPtr->op_struct.op_type == ZDELETE) {
     jam();
     return;
+  } else if (regOperPtr->op_struct.op_type == ZREFRESH) {
+    jam();
+    /* Refresh should not affect TUX */
+    return;
   } else {
     ndbrequire(false);
     tupVersion= 0; // remove warning

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp	2011-05-23 10:38:41 +0000
@@ -30,17 +30,17 @@
 #include <EventLogger.hpp>
 extern EventLogger * g_eventLogger;
 
-AsyncIoThread::AsyncIoThread(class Ndbfs& fs, AsyncFile* file)
+AsyncIoThread::AsyncIoThread(class Ndbfs& fs, bool bound)
   : m_fs(fs)
 {
-  m_current_file = file;
-  if (file)
+  m_current_file = 0;
+  if (bound)
   {
-    theMemoryChannelPtr = &theMemoryChannel;
+    theMemoryChannelPtr = &m_fs.theToBoundThreads;
   }
   else
   {
-    theMemoryChannelPtr = &m_fs.theToThreads;
+    theMemoryChannelPtr = &m_fs.theToUnboundThreads;
   }
   theReportTo = &m_fs.theFromThreads;
 }
@@ -149,13 +149,17 @@ AsyncIoThread::run()
     switch (request->action) {
     case Request::open:
       file->openReq(request);
+      if (request->error == 0 && request->m_do_bind)
+        attach(file);
       break;
     case Request::close:
       file->closeReq(request);
+      detach(file);
       break;
     case Request::closeRemove:
       file->closeReq(request);
       file->removeReq(request);
+      detach(file);
       break;
     case Request::readPartial:
     case Request::read:
@@ -265,3 +269,32 @@ AsyncIoThread::buildIndxReq(Request* req
   req.buffer_size = request->file->m_page_cnt * sizeof(GlobalPage);
   request->error = (* req.func_ptr)(&req);
 }
+
+void
+AsyncIoThread::attach(AsyncFile* file)
+{
+  assert(m_current_file == 0);
+  assert(theMemoryChannelPtr == &m_fs.theToBoundThreads);
+  m_current_file = file;
+  theMemoryChannelPtr = &theMemoryChannel;
+  file->attach(this);
+  m_fs.cnt_active_bound(1);
+}
+
+void
+AsyncIoThread::detach(AsyncFile* file)
+{
+  if (m_current_file == 0)
+  {
+    assert(file->getThread() == 0);
+  }
+  else
+  {
+    assert(m_current_file == file);
+    assert(theMemoryChannelPtr = &theMemoryChannel);
+    m_current_file = 0;
+    theMemoryChannelPtr = &m_fs.theToBoundThreads;
+    file->detach(this);
+    m_fs.cnt_active_bound(-1);
+  }
+}

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2011-04-21 09:21:18 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2011-05-23 10:38:41 +0000
@@ -43,6 +43,8 @@ class Request
 public:
   Request() {}
 
+  void atGet() { m_do_bind = false; }
+
   enum Action {
     open,
     close,
@@ -113,6 +115,7 @@ public:
    // Information for open, needed if the first open action fails.
   AsyncFile* file;
   Uint32 theTrace;
+  bool m_do_bind;
 
   MemoryChannel<Request>::ListMember m_mem_channel;
 };
@@ -134,7 +137,7 @@ class AsyncIoThread
   friend class Ndbfs;
   friend class AsyncFile;
 public:
-  AsyncIoThread(class Ndbfs&, AsyncFile* file);
+  AsyncIoThread(class Ndbfs&, bool bound);
   virtual ~AsyncIoThread() {};
 
   struct NdbThread* doStart();
@@ -174,6 +177,8 @@ private:
    */
   void buildIndxReq(Request*);
 
+  void attach(AsyncFile*);
+  void detach(AsyncFile*);
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2011-04-21 09:21:18 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2011-05-23 10:38:41 +0000
@@ -45,6 +45,8 @@
 #include <EventLogger.hpp>
 extern EventLogger * g_eventLogger;
 
+NdbMutex g_active_bound_threads_mutex;
+
 inline
 int pageSize( const NewVARIABLE* baseAddrRef )
 {
@@ -62,10 +64,15 @@ Ndbfs::Ndbfs(Block_context& ctx) :
   scanningInProgress(false),
   theLastId(0),
   theRequestPool(0),
-  m_maxOpenedFiles(0)
+  m_maxOpenedFiles(0),
+  m_bound_threads_cnt(0),
+  m_unbounds_threads_cnt(0),
+  m_active_bound_threads_cnt(0)
 {
   BLOCK_CONSTRUCTOR(Ndbfs);
 
+  NdbMutex_Init(&g_active_bound_threads_mutex);
+
   // Set received signals
   addRecSignal(GSN_READ_CONFIG_REQ, &Ndbfs::execREAD_CONFIG_REQ);
   addRecSignal(GSN_DUMP_STATE_ORD,  &Ndbfs::execDUMP_STATE_ORD);
@@ -100,7 +107,8 @@ Ndbfs::~Ndbfs()
   request.action = Request::end;
   for (unsigned i = 0; i < theThreads.size(); i++)
   {
-    theToThreads.writeChannel(&request);
+    theToBoundThreads.writeChannel(&request);
+    theToUnboundThreads.writeChannel(&request);
   }
 
   for (unsigned i = 0; i < theThreads.size(); i++)
@@ -274,7 +282,12 @@ Ndbfs::execREAD_CONFIG_REQ(Signal* signa
   // Create idle AsyncFiles
   for (Uint32 i = 0; i < noIdleFiles; i++)
   {
-    theIdleBoundFiles.push_back(createAsyncFile(true /* bound */));
+    theIdleFiles.push_back(createAsyncFile());
+    AsyncIoThread * thr = createIoThread(/* bound */ true);
+    if (thr)
+    {
+      theThreads.push_back(thr);
+    }
   }
 
   Uint32 threadpool = 2;
@@ -283,7 +296,7 @@ Ndbfs::execREAD_CONFIG_REQ(Signal* signa
   // Create IoThreads
   for (Uint32 i = 0; i < threadpool; i++)
   {
-    AsyncIoThread * thr = createIoThread(0);
+    AsyncIoThread * thr = createIoThread(/* bound */ false);
     if (thr)
     {
       jam();
@@ -339,7 +352,7 @@ Ndbfs::execSTTOR(Signal* signal)
   ndbrequire(0);
 }
 
-int 
+int
 Ndbfs::forward( AsyncFile * file, Request* request)
 {
   jam();
@@ -348,9 +361,13 @@ Ndbfs::forward( AsyncFile * file, Reques
   {
     thr->dispatch(request);
   }
+  else if (request->m_do_bind)
+  {
+    theToBoundThreads.writeChannel(request);
+  }
   else
   {
-    theToThreads.writeChannel(request);
+    theToUnboundThreads.writeChannel(request);
   }
   return 1;
 }
@@ -444,7 +461,8 @@ Ndbfs::execFSOPENREQ(Signal* signal)
   request->par.open.file_size <<= 32;
   request->par.open.file_size |= fsOpenReq->file_size_lo;
   request->par.open.auto_sync_size = fsOpenReq->auto_sync_size;
-  
+  request->m_do_bind = bound;
+
   ndbrequire(forward(file, request));
 }
 
@@ -454,7 +472,8 @@ Ndbfs::execFSREMOVEREQ(Signal* signal)
   jamEntry();
   const FsRemoveReq * const req = (FsRemoveReq *)signal->getDataPtr();
   const BlockReference userRef = req->userReference;
-  AsyncFile* file = getIdleFile(true);
+  bool bound = true;
+  AsyncFile* file = getIdleFile(bound);
   ndbrequire(file != NULL);
 
   SectionHandle handle(this, signal);
@@ -479,7 +498,8 @@ Ndbfs::execFSREMOVEREQ(Signal* signal)
   request->set(userRef, req->userPointer, newId() );
   request->file = file;
   request->theTrace = signal->getTrace();
-  
+  request->m_do_bind = bound;
+
   if (version == 6)
   {
     ndbrequire(bp < NDB_ARRAY_SIZE(m_base_path));
@@ -541,6 +561,7 @@ Ndbfs::execFSCLOSEREQ(Signal * signal)
   request->file = openFile;
   request->error = 0;
   request->theTrace = signal->getTrace();
+  request->m_do_bind = false;
 
   ndbrequire(forward(openFile, request));
 }
@@ -584,6 +605,7 @@ Ndbfs::readWriteRequest(int action, Sign
   request->file = openFile;
   request->action = (Request::Action) action;
   request->theTrace = signal->getTrace();
+  request->m_do_bind = false;
 
   Uint32 format = fsRWReq->getFormatFlag(fsRWReq->operationFlag);
 
@@ -804,7 +826,8 @@ Ndbfs::execFSSYNCREQ(Signal * signal)
   request->set(userRef, userPointer, filePointer);
   request->file = openFile;
   request->theTrace = signal->getTrace();
-  
+  request->m_do_bind = false;
+
   ndbrequire(forward(openFile,request));
 }
 
@@ -832,6 +855,7 @@ Ndbfs::execFSSUSPENDORD(Signal * signal)
   request->file = openFile;
   request->theTrace = signal->getTrace();
   request->par.suspend.milliseconds = millis;
+  request->m_do_bind = false;
 
   ndbrequire(forward(openFile,request));
 }
@@ -895,6 +919,7 @@ Ndbfs::execFSAPPENDREQ(Signal * signal)
     request->action = Request::append;
   else
     request->action = Request::append_synch;
+  request->m_do_bind = false;
   ndbrequire(forward(openFile, request));
   return;
   
@@ -918,7 +943,8 @@ Ndbfs::execALLOC_MEM_REQ(Signal* signal)
 
   AllocMemReq* req = (AllocMemReq*)signal->getDataPtr();
 
-  AsyncFile* file = getIdleFile(true);
+  bool bound = true;
+  AsyncFile* file = getIdleFile(bound);
   ndbrequire(file != NULL);
 
   Request *request = theRequestPool->get();
@@ -932,6 +958,7 @@ Ndbfs::execALLOC_MEM_REQ(Signal* signal)
   request->par.alloc.requestInfo = req->requestInfo;
   request->par.alloc.bytes = (Uint64(req->bytes_hi) << 32) + req->bytes_lo;
   request->action = Request::allocmem;
+  request->m_do_bind = bound;
   ndbrequire(forward(file, request));
 }
 
@@ -943,7 +970,8 @@ Ndbfs::execBUILD_INDX_IMPL_REQ(Signal* s
   jamEntry();
   mt_BuildIndxReq * req = (mt_BuildIndxReq*)signal->getDataPtr();
 
-  AsyncFile* file = getIdleFile(true);
+  bool bound = true;
+  AsyncFile* file = getIdleFile(bound);
   ndbrequire(file != NULL);
 
   Request *request = theRequestPool->get();
@@ -972,6 +1000,7 @@ Ndbfs::execBUILD_INDX_IMPL_REQ(Signal* s
 
   memcpy(&request->par.build.m_req, req, sizeof(* req));
   request->action = Request::buildindx;
+  request->m_do_bind = bound;
   ndbrequire(forward(file, request));
 }
 
@@ -1000,8 +1029,8 @@ Ndbfs::newId()
 }
 
 AsyncFile*
-Ndbfs::createAsyncFile(bool bound){
-
+Ndbfs::createAsyncFile()
+{
   // Check limit of open files
   if (m_maxFiles !=0 && theFiles.size() ==  m_maxFiles)
   {
@@ -1024,42 +1053,35 @@ Ndbfs::createAsyncFile(bool bound){
     ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile");
   }
 
-  if (bound)
-  {
-    AsyncIoThread * thr = createIoThread(file);
-    theThreads.push_back(thr);
-    file->attach(thr);
-
-#ifdef VM_TRACE
-    ndbout_c("NDBFS: Created new file thread %d", theFiles.size());
-#endif
-  }
-
   theFiles.push_back(file);
-  
   return file;
 }
 
 void
 Ndbfs::pushIdleFile(AsyncFile* file)
 {
-  if (file->getThread())
-  {
-    theIdleBoundFiles.push_back(file);
-  }
-  else
-  {
-    theIdleUnboundFiles.push_back(file);
-  }
+  assert(file->getThread() == 0);
+  theIdleFiles.push_back(file);
 }
 
 AsyncIoThread*
-Ndbfs::createIoThread(AsyncFile* file)
+Ndbfs::createIoThread(bool bound)
 {
-  AsyncIoThread* thr = new AsyncIoThread(*this, file);
+  AsyncIoThread* thr = new AsyncIoThread(*this, bound);
+  if (thr)
+  {
+#ifdef VM_TRACE
+    ndbout_c("NDBFS: Created new file thread %d", theThreads.size());
+#endif
 
-  struct NdbThread* thrptr = thr->doStart();
-  globalEmulatorData.theConfiguration->addThread(thrptr, NdbfsThread);
+    struct NdbThread* thrptr = thr->doStart();
+    globalEmulatorData.theConfiguration->addThread(thrptr, NdbfsThread);
+
+    if (bound)
+      m_bound_threads_cnt++;
+    else
+      m_unbounds_threads_cnt++;
+  }
 
   return thr;
 }
@@ -1067,31 +1089,50 @@ Ndbfs::createIoThread(AsyncFile* file)
 AsyncFile*
 Ndbfs::getIdleFile(bool bound)
 {
-  if (bound)
+  AsyncFile* file = 0;
+  Uint32 sz = theIdleFiles.size();
+  if (sz)
   {
-    Uint32 sz = theIdleBoundFiles.size();
-    if (sz)
-    {
-      AsyncFile* file = theIdleBoundFiles[sz - 1];
-      theIdleBoundFiles.erase(sz - 1);
-      return file;
-    }
+    file = theIdleFiles[sz - 1];
+    theIdleFiles.erase(sz - 1);
   }
   else
   {
-    Uint32 sz = theIdleUnboundFiles.size();
-    if (sz)
+    file = createAsyncFile();
+  }
+
+  if (bound)
+  {
+    /**
+     * Check if we should create thread
+     */
+    if (m_active_bound_threads_cnt == m_bound_threads_cnt)
     {
-      AsyncFile* file = theIdleUnboundFiles[sz - 1];
-      theIdleUnboundFiles.erase(sz - 1);
-      return file;
+      AsyncIoThread * thr = createIoThread(true);
+      if (thr)
+      {
+        theThreads.push_back(thr);
+      }
     }
   }
-
-  return createAsyncFile(bound);
+  return file;
 }
 
-
+void
+Ndbfs::cnt_active_bound(int val)
+{
+  Guard g(&g_active_bound_threads_mutex);
+  if (val < 0)
+  {
+    val = -val;
+    assert(m_active_bound_threads_cnt >= (Uint32)val);
+    m_active_bound_threads_cnt -= val;
+  }
+  else
+  {
+    m_active_bound_threads_cnt += val;
+  }
+}
 
 void
 Ndbfs::report(Request * request, Signal* signal)
@@ -1506,10 +1547,13 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal
     infoEvent("NDBFS: Files: %d Open files: %d",
 	      theFiles.size(),
 	      theOpenFiles.size());
-    infoEvent(" Idle files: (bound: %u unbound: %u) Max opened files: %d",
-              theIdleBoundFiles.size(),
-              theIdleUnboundFiles.size(),
+    infoEvent(" Idle files: %u Max opened files: %d",
+              theIdleFiles.size(),
               m_maxOpenedFiles);
+    infoEvent(" Bound Threads: %u (active %u) Unbound threads: %u",
+              m_bound_threads_cnt,
+              m_active_bound_threads_cnt,
+              m_unbounds_threads_cnt);
     infoEvent(" Max files: %d",
 	      m_maxFiles);
     infoEvent(" Requests: %d",
@@ -1522,7 +1566,10 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal
     
     for (unsigned i = 0; i < theOpenFiles.size(); i++){
       AsyncFile* file = theOpenFiles.getFile(i);
-      infoEvent("%2d (0x%lx): %s", i, (long)file, file->theFileName.c_str());
+      infoEvent("%2d (0x%lx): %s thr: %lx", i,
+                (long)file,
+                file->theFileName.c_str(),
+                (long)file->getThread());
     }
     return;
   }
@@ -1536,18 +1583,14 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal
     return;
   }
   if(signal->theData[0] == DumpStateOrd::NdbfsDumpIdleFiles){
-    infoEvent("NDBFS: Dump idle files: %d %u",
-              theIdleBoundFiles.size(), theIdleUnboundFiles.size());
-    
-    for (unsigned i = 0; i < theIdleBoundFiles.size(); i++){
-      AsyncFile* file = theIdleBoundFiles[i];
-      infoEvent("%2d (0x%lx): %s", i, (long)file, file->isOpen()?"OPEN":"CLOSED");
-    }
+    infoEvent("NDBFS: Dump idle files: %u",
+              theIdleFiles.size());
 
-    for (unsigned i = 0; i < theIdleUnboundFiles.size(); i++){
-      AsyncFile* file = theIdleUnboundFiles[i];
+    for (unsigned i = 0; i < theIdleFiles.size(); i++){
+      AsyncFile* file = theIdleFiles[i];
       infoEvent("%2d (0x%lx): %s", i, (long)file, file->isOpen()?"OPEN":"CLOSED");
     }
+
     return;
   }
 

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp	2011-05-23 10:38:41 +0000
@@ -79,19 +79,19 @@ private:
 
   // Communication from/to files
   MemoryChannel<Request> theFromThreads;
-  MemoryChannel<Request> theToThreads;
+  MemoryChannel<Request> theToBoundThreads;
+  MemoryChannel<Request> theToUnboundThreads;
 
   Pool<Request>* theRequestPool;
 
-  AsyncIoThread* createIoThread(AsyncFile* file);
-  AsyncFile* createAsyncFile(bool bound);
+  AsyncIoThread* createIoThread(bool bound);
+  AsyncFile* createAsyncFile();
   AsyncFile* getIdleFile(bool bound);
   void pushIdleFile(AsyncFile*);
 
   Vector<AsyncIoThread*> theThreads;// List of all created threads
   Vector<AsyncFile*> theFiles;      // List all created AsyncFiles
-  Vector<AsyncFile*> theIdleBoundFiles;   // List of idle AsyncFiles
-  Vector<AsyncFile*> theIdleUnboundFiles; // List of idle AsyncFiles
+  Vector<AsyncFile*> theIdleFiles;  // List of idle AsyncFiles
   OpenFiles theOpenFiles;           // List of open AsyncFiles
 
   BaseString m_base_path[FsOpenReq::BP_MAX];
@@ -105,6 +105,11 @@ private:
   void readWriteRequest(  int action, Signal * signal );
 
   static Uint32 translateErrno(int aErrno);
+
+  Uint32 m_bound_threads_cnt;
+  Uint32 m_unbounds_threads_cnt;
+  Uint32 m_active_bound_threads_cnt;
+  void cnt_active_bound(int val);
 public:
   const BaseString& get_base_path(Uint32 no) const;
 };

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp	2011-05-23 10:38:41 +0000
@@ -249,6 +249,7 @@ template <class T> inline T* Pool<T>::ge
    }
    --theTop;
    tmp = theList[theTop];
+   tmp->atGet();
    return tmp;
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp	2011-04-09 15:48:21 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp	2011-05-24 15:06:09 +0000
@@ -73,6 +73,24 @@ void Qmgr::initData() 
 #ifdef ERROR_INSERT
   nodeFailCount = 0;
 #endif
+
+  cfailureNr = 1;
+  ccommitFailureNr = 1;
+  cprepareFailureNr = 1;
+  cnoFailedNodes = 0;
+  cnoPrepFailedNodes = 0;
+  creadyDistCom = ZFALSE;
+  cpresident = ZNIL;
+  c_start.m_president_candidate = ZNIL;
+  c_start.m_president_candidate_gci = 0;
+  cpdistref = 0;
+  cneighbourh = ZNIL;
+  cneighbourl = ZNIL;
+  cdelayRegreq = ZDELAY_REGREQ;
+  cactivateApiCheck = 0;
+  c_allow_api_connect = 0;
+  ctoStatus = Q_NOT_ACTIVE;
+  clatestTransactionCheck = 0;
 }//Qmgr::initData()
 
 void Qmgr::initRecords() 

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-05-04 05:33:14 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-05-26 15:04:45 +0000
@@ -2481,27 +2481,9 @@ void Qmgr::findNeighbours(Signal* signal
 /*---------------------------------------------------------------------------*/
 void Qmgr::initData(Signal* signal) 
 {
-  cfailureNr = 1;
-  ccommitFailureNr = 1;
-  cprepareFailureNr = 1;
-  cnoFailedNodes = 0;
-  cnoPrepFailedNodes = 0;
-  creadyDistCom = ZFALSE;
-  cpresident = ZNIL;
-  c_start.m_president_candidate = ZNIL;
-  c_start.m_president_candidate_gci = 0;
-  cpdistref = 0;
-  cneighbourh = ZNIL;
-  cneighbourl = ZNIL;
-  cdelayRegreq = ZDELAY_REGREQ;
-  cactivateApiCheck = 0;
-  c_allow_api_connect = 0;
-  ctoStatus = Q_NOT_ACTIVE;
-
   NDB_TICKS now = NdbTick_CurrentMillisecond();
   interface_check_timer.setDelay(1000);
   interface_check_timer.reset(now);
-  clatestTransactionCheck = 0;
 
   // catch-all for missing initializations
   memset(&arbitRec, 0, sizeof(arbitRec));

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2011-02-04 11:45:24 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2011-05-26 15:04:45 +0000
@@ -4292,6 +4292,8 @@ Suma::execFIRE_TRIG_ORD(Signal* signal)
   const Uint64 gci = gci_lo | (Uint64(gci_hi) << 32);
   const Uint32 event     = trg->getTriggerEvent();
   const Uint32 any_value = trg->getAnyValue();
+  const Uint32 transId1  = trg->m_transId1;
+  const Uint32 transId2  = trg->m_transId2;
 
   Ptr<Subscription> subPtr;
   c_subscriptionPool.getPtr(subPtr, trigId & 0xFFFF);
@@ -4364,6 +4366,8 @@ Suma::execFIRE_TRIG_ORD(Signal* signal)
     data->flags          = 0;
     data->anyValue       = any_value;
     data->totalLen       = ptrLen;
+    data->transId1       = transId1;
+    data->transId2       = transId2;
     
     {
       LocalDLList<Subscriber> list(c_subscriberPool, subPtr.p->m_subscribers);
@@ -4372,13 +4376,13 @@ Suma::execFIRE_TRIG_ORD(Signal* signal)
       {
 	data->senderData = subbPtr.p->m_senderData;
 	sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
-		   SubTableData::SignalLength, JBB, ptr, nptr);
+		   SubTableData::SignalLengthWithTransId, JBB, ptr, nptr);
       }
     }
   }
   else 
   {
-    const uint buffer_header_sz = 4;
+    const uint buffer_header_sz = 6;
     Uint32* dst;
     Uint32 sz = f_trigBufferSize + b_trigBufferSize + buffer_header_sz;
     if((dst = get_buffer_ptr(signal, bucket, gci, sz)))
@@ -4387,6 +4391,8 @@ Suma::execFIRE_TRIG_ORD(Signal* signal)
       * dst++ = schemaVersion;
       * dst++ = (event << 16) | f_trigBufferSize;
       * dst++ = any_value;
+      * dst++ = transId1;
+      * dst++ = transId2;
       memcpy(dst, f_buffer, f_trigBufferSize << 2);
       dst += f_trigBufferSize;
       memcpy(dst, b_buffer, b_trigBufferSize << 2);
@@ -6362,13 +6368,15 @@ Suma::resend_bucket(Signal* signal, Uint
     } 
     else
     {
-      const uint buffer_header_sz = 4;
+      const uint buffer_header_sz = 6;
       g_cnt++;
       Uint32 subPtrI = * src++ ;
       Uint32 schemaVersion = * src++;
       Uint32 event = * src >> 16;
       Uint32 sz_1 = (* src ++) & 0xFFFF;
       Uint32 any_value = * src++;
+      Uint32 transId1 = * src++;
+      Uint32 transId2 = * src++;
 
       ndbassert(sz - buffer_header_sz >= sz_1);
       
@@ -6400,6 +6408,8 @@ Suma::resend_bucket(Signal* signal, Uint
 	data->flags          = 0;
 	data->anyValue       = any_value;
 	data->totalLen       = ptrLen;
+        data->transId1       = transId1;
+        data->transId2       = transId2;
 	
 	{
           LocalDLList<Subscriber> list(c_subscriberPool,
@@ -6409,7 +6419,7 @@ Suma::resend_bucket(Signal* signal, Uint
           {
             data->senderData = subbPtr.p->m_senderData;
             sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
-                       SubTableData::SignalLength, JBB, ptr, nptr);
+                       SubTableData::SignalLengthWithTransId, JBB, ptr, nptr);
           }
         }
       }

=== modified file 'storage/ndb/src/mgmsrv/Config.hpp'
--- a/storage/ndb/src/mgmsrv/Config.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/mgmsrv/Config.hpp	2011-05-24 11:51:39 +0000
@@ -22,7 +22,7 @@
 #include <kernel/NodeBitmask.hpp>
 #include "ConfigInfo.hpp"
 #include <mgmapi.h>
-#include <mgmapi_configuration.hpp>
+#include "../mgmapi/mgmapi_configuration.hpp"
 
 
 /**

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperation.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperation.cpp	2011-05-25 14:31:47 +0000
@@ -137,6 +137,12 @@ NdbEventOperation::getLatestGCI() const
   return m_impl.getLatestGCI();
 }
 
+Uint64
+NdbEventOperation::getTransId() const
+{
+  return m_impl.getTransId();
+}
+
 NdbDictionary::Event::TableEvent
 NdbEventOperation::getEventType() const
 {

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2011-02-23 12:15:04 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2011-05-25 14:31:47 +0000
@@ -791,6 +791,15 @@ NdbEventOperationImpl::getLatestGCI()
   return m_ndb->theEventBuffer->getLatestGCI();
 }
 
+Uint64
+NdbEventOperationImpl::getTransId() const
+{
+  /* Return 64 bit composite */
+  Uint32 transId1 = m_data_item->sdata->transId1;
+  Uint32 transId2 = m_data_item->sdata->transId2;
+  return Uint64(transId1) << 32 | transId2;
+}
+
 bool
 NdbEventOperationImpl::execSUB_TABLE_DATA(const NdbApiSignal * signal,
                                           const LinearSectionPtr ptr[3])
@@ -2763,6 +2772,12 @@ NdbEventBuffer::copy_data(const SubTable
   {
     data->sdata->gci_lo = 0;
   }
+  if (len < SubTableData::SignalLengthWithTransId)
+  {
+    /* No TransId, set to uninit value */
+    data->sdata->transId1 = ~Uint32(0);
+    data->sdata->transId2 = ~Uint32(0);
+  }
 
   int i;
   for (i = 0; i <= 2; i++)
@@ -2838,6 +2853,11 @@ NdbEventBuffer::merge_data(const SubTabl
 {
   DBUG_ENTER_EVENT("NdbEventBuffer::merge_data");
 
+  /* TODO : Consider how/if to merge multiple events/key with different
+   * transid
+   * Same consideration probably applies to AnyValue!
+   */
+
   Uint32 nkey = data->m_event_op->m_eventImpl->m_tableImpl->m_noOfKeys;
 
   int t1 = SubTableData::getOperation(data->sdata->requestInfo);

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp	2011-05-25 14:31:47 +0000
@@ -382,6 +382,7 @@ public:
   Uint64 getGCI();
   Uint32 getAnyValue() const;
   Uint64 getLatestGCI();
+  Uint64 getTransId() const;
   bool execSUB_TABLE_DATA(const NdbApiSignal * signal,
                           const LinearSectionPtr ptr[3]);
 

=== modified file 'storage/ndb/src/ndbapi/NdbInfo.cpp'
--- a/storage/ndb/src/ndbapi/NdbInfo.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfo.cpp	2011-05-23 14:05:08 +0000
@@ -23,6 +23,7 @@ NdbInfo::NdbInfo(class Ndb_cluster_conne
                  const char* prefix, const char* dbname,
                  const char* table_prefix) :
   m_connect_count(connection->get_connect_count()),
+  m_min_db_version(0),
   m_connection(connection),
   m_tables_table(NULL), m_columns_table(NULL),
   m_prefix(prefix),
@@ -270,7 +271,9 @@ bool NdbInfo::load_tables()
   }
 
   // After sucessfull load of the tables, set connect count
+  // and the min db version of cluster
   m_connect_count = m_connection->get_connect_count();
+  m_min_db_version = m_connection->get_min_db_version();
   return true;
 }
 
@@ -328,12 +331,14 @@ void NdbInfo::flush_tables()
 bool
 NdbInfo::check_tables()
 {
-  if (m_connection->get_connect_count() != m_connect_count)
+  if (unlikely(m_connection->get_connect_count() != m_connect_count ||
+               m_connection->get_min_db_version() != m_min_db_version))
   {
-    // Connect count has changed -> flush the cached table definitions
+    // Connect count or min db version of cluster has changed
+    //  -> flush the cached table definitions
     flush_tables();
   }
-  if (m_tables.entries() <= NUM_HARDCODED_TABLES)
+  if (unlikely(m_tables.entries() <= NUM_HARDCODED_TABLES))
   {
     // Global table cache is not loaded yet or has been
     // flushed, try to load it

=== modified file 'storage/ndb/src/ndbapi/NdbInfo.hpp'
--- a/storage/ndb/src/ndbapi/NdbInfo.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfo.hpp	2011-05-23 14:05:08 +0000
@@ -89,8 +89,6 @@ public:
   bool init(void);
   ~NdbInfo();
 
-  void flush_tables();
-
   int openTable(const char* table_name, const Table**);
   int openTable(Uint32 tableId, const Table**);
   void closeTable(const Table* table);
@@ -103,6 +101,7 @@ public:
 private:
   static const size_t NUM_HARDCODED_TABLES = 2;
   unsigned m_connect_count;
+  unsigned m_min_db_version;
   class Ndb_cluster_connection* m_connection;
   pthread_mutex_t m_mutex;
   HashMap<BaseString, Table, BaseString_get_key> m_tables;
@@ -119,6 +118,7 @@ private:
   bool load_hardcoded_tables(void);
   bool load_tables();
   bool check_tables();
+  void flush_tables();
 
   BaseString mysql_table_name(const char* table_name) const;
 

=== modified file 'storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp'
--- a/storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp	2011-05-23 13:45:57 +0000
@@ -46,13 +46,18 @@ public:
     return m_len;
   }
 
+  bool isNULL() const {
+    return !m_defined;
+  }
+
 protected:
   friend class NdbInfoScanOperation;
-  NdbInfoRecAttr() : m_data(NULL), m_len(0) {};
+  NdbInfoRecAttr() : m_data(NULL), m_len(0), m_defined(false) {};
   ~NdbInfoRecAttr() {};
 private:
   const char* m_data;
   Uint32 m_len;
+  bool m_defined;
 };
 
 #endif

=== modified file 'storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp	2011-05-23 13:45:57 +0000
@@ -418,34 +418,35 @@ NdbInfoScanOperation::execDBINFO_TRANSID
   m_rows_received++;
   DBUG_PRINT("info", ("rows received: %d", m_rows_received));
 
-  const Uint32* start = signal->ptr[0].p;
-  const Uint32* end = start + signal->ptr[0].sz;
-
-  DBUG_PRINT("info", ("start: %p, end: %p", start, end));
-  for (unsigned col = 0; col < m_table->columns(); col++)
+  // Reset all recattr values before reading the new row
+  for (unsigned i = 0; i < m_recAttrs.size(); i++)
   {
+    if (m_recAttrs[i])
+      m_recAttrs[i]->m_defined = false;
+  }
 
-    // Read attribute header
-    const AttributeHeader ah(*start);
-    const Uint32 len = ah.getByteSize();
+  // Read attributes from long signal section
+  AttributeHeader* attr = (AttributeHeader*)signal->ptr[0].p;
+  AttributeHeader* last = (AttributeHeader*)(signal->ptr[0].p +
+                                            signal->ptr[0].sz);
+  while (attr < last)
+  {
+    const Uint32 col = attr->getAttributeId();
+    const Uint32 len = attr->getByteSize();
     DBUG_PRINT("info", ("col: %u, len: %u", col, len));
-
-    // Step past attribute header
-    start += ah.getHeaderSize();
-
-    NdbInfoRecAttr* attr = m_recAttrs[col];
-    if (attr)
+    if (col < m_recAttrs.size())
     {
-      // Update NdbInfoRecAttr pointer and length
-      attr->m_data = (const char*)start;
-      attr->m_len = len;
+      NdbInfoRecAttr* rec_attr = m_recAttrs[col];
+      if (rec_attr)
+      {
+        // Update NdbInfoRecAttr pointer, length and defined flag
+        rec_attr->m_data = (const char*)attr->getDataPtr();
+        rec_attr->m_len = len;
+        rec_attr->m_defined = true;
+      }
     }
 
-    // Step to next attribute header
-    start += ah.getDataSize();
-
-    // No reading beyond end of signal size
-    assert(start <= end);
+    attr = attr->getNext();
   }
 
   DBUG_RETURN(false); // Don't wait more, process this row

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-05-11 13:37:37 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-05-26 15:04:45 +0000
@@ -1120,7 +1120,8 @@ NdbOperation::buildSignalsNdbRecord(Uint
   /* Final update signal words */
   if ((tOpType == InsertRequest) || 
       (tOpType == WriteRequest) ||
-      (tOpType == UpdateRequest))
+      (tOpType == UpdateRequest) ||
+      (tOpType == RefreshRequest))
   {
     updRow= m_attribute_row;
     NdbBlob *currentBlob= theBlobList;
@@ -1333,7 +1334,8 @@ NdbOperation::buildSignalsNdbRecord(Uint
 
   if ((tOpType == InsertRequest) ||
       (tOpType == WriteRequest) ||
-      (tOpType == UpdateRequest))
+      (tOpType == UpdateRequest) ||
+      (tOpType == RefreshRequest))
   {
     /* Handle setAnyValue() for all cases except delete */
     if ((m_flags & OF_USE_ANY_VALUE) != 0)

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-05-11 13:37:37 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-05-26 15:04:45 +0000
@@ -3185,18 +3185,19 @@ NdbQueryImpl::OrderedFragSet::reorganize
     while(first<last)
     {
       assert(middle<m_activeFragCount);
-      switch(compare(*m_activeFrags[m_activeFragCount-1], 
-                     *m_activeFrags[middle]))
+      const int cmpRes = compare(*m_activeFrags[m_activeFragCount-1], 
+                                 *m_activeFrags[middle]);
+      if (cmpRes < 0)
       {
-      case -1:
         first = middle + 1;
-        break;
-      case 0:
+      }
+      else if (cmpRes == 0)
+      {
         last = first = middle;
-        break;
-      case 1:
+      }
+      else
+      {
         last = middle;
-        break;
       }
       middle = (first+last)/2;
     }
@@ -3245,7 +3246,7 @@ NdbQueryImpl::OrderedFragSet::add(NdbRoo
       int current = 0;
       // Insert the new frag such that the array remains sorted.
       while(current<m_activeFragCount && 
-            compare(frag, *m_activeFrags[current])==-1)
+            compare(frag, *m_activeFrags[current]) < 0)
       {
         current++;
       }
@@ -3289,7 +3290,7 @@ NdbQueryImpl::OrderedFragSet::verifySort
 {
   for(int i = 0; i<m_activeFragCount-2; i++)
   {
-    if(compare(*m_activeFrags[i], *m_activeFrags[i+1])==-1)
+    if(compare(*m_activeFrags[i], *m_activeFrags[i+1]) < 0)
     {
       assert(false);
       return false;
@@ -3302,7 +3303,7 @@ NdbQueryImpl::OrderedFragSet::verifySort
 /**
  * Compare frags such that f1<f2 if f1 is empty but f2 is not.
  * - Othewise compare record contents.
- * @return -1 if frag1<frag2, 0 if frag1 == frag2, otherwise 1.
+ * @return negative if frag1<frag2, 0 if frag1 == frag2, otherwise positive.
 */
 int
 NdbQueryImpl::OrderedFragSet::compare(const NdbRootFragment& frag1,

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-04-27 11:50:17 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-05-26 15:04:45 +0000
@@ -2209,6 +2209,7 @@ NdbTransaction::receiveTCKEY_FAILCONF(co
       case NdbOperation::DeleteRequest:
       case NdbOperation::WriteRequest:
       case NdbOperation::UnlockRequest:
+      case NdbOperation::RefreshRequest:
 	tOp = tOp->next();
 	break;
       case NdbOperation::ReadRequest:
@@ -2713,6 +2714,52 @@ NdbTransaction::writeTuple(const NdbReco
   return op;
 }
 
+const NdbOperation *
+NdbTransaction::refreshTuple(const NdbRecord *key_rec, const char *key_row,
+                             const NdbOperation::OperationOptions *opts,
+                             Uint32 sizeOfOptions)
+{
+  /* Check TC node version lockless */
+  {
+    Uint32 tcVer = theNdb->theImpl->getNodeInfo(theDBnode).m_info.m_version;
+    if (unlikely(! ndb_refresh_tuple(tcVer)))
+    {
+      /* Function not implemented yet */
+      setOperationErrorCodeAbort(4003);
+      return NULL;
+    }
+  }
+
+  /* Check that the NdbRecord specifies the full primary key. */
+  if (!(key_rec->flags & NdbRecord::RecHasAllKeys))
+  {
+    setOperationErrorCodeAbort(4292);
+    return NULL;
+  }
+
+  Uint8 keymask[NDB_MAX_ATTRIBUTES_IN_TABLE/8];
+  bzero(keymask, sizeof(keymask));
+  for (Uint32 i = 0; i<key_rec->key_index_length; i++)
+  {
+    Uint32 id = key_rec->columns[key_rec->key_indexes[i]].attrId;
+    keymask[(id / 8)] |= (1 << (id & 7));
+  }
+
+  NdbOperation *op= setupRecordOp(NdbOperation::RefreshRequest,
+                                  NdbOperation::LM_Exclusive,
+                                  NdbOperation::AbortOnError,
+                                  key_rec, key_row,
+                                  key_rec, key_row,
+                                  keymask /* mask */,
+                                  opts,
+                                  sizeOfOptions);
+  if(!op)
+    return op;
+
+  theSimpleState= 0;
+
+  return op;
+}
 
 NdbScanOperation *
 NdbTransaction::scanTable(const NdbRecord *result_record,

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.hpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.hpp	2011-02-28 12:25:52 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp	2011-05-26 15:04:45 +0000
@@ -303,6 +303,12 @@ unsigned Ndb_cluster_connection_impl::ge
 }
 
 inline
+unsigned Ndb_cluster_connection_impl::get_min_db_version() const
+{
+  return m_transporter_facade->getMinDbNodeVersion();
+}
+
+inline
 bool
 TransporterFacade::get_node_alive(NodeId n) const {
   if (theClusterMgr)

=== modified file 'storage/ndb/src/ndbapi/ndb_cluster_connection.cpp'
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp	2011-04-15 06:29:59 +0000
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp	2011-05-23 14:05:08 +0000
@@ -332,6 +332,11 @@ unsigned Ndb_cluster_connection::get_con
   return m_impl.get_connect_count();
 }
 
+unsigned Ndb_cluster_connection::get_min_db_version() const
+{
+  return m_impl.get_min_db_version();
+}
+
 int Ndb_cluster_connection::get_latest_error() const
 {
   return m_impl.m_latest_error;

=== modified file 'storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp'
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp	2011-02-04 17:52:38 +0000
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp	2011-05-23 14:05:08 +0000
@@ -67,6 +67,7 @@ class Ndb_cluster_connection_impl : publ
   Uint32 get_next_alive_node(Ndb_cluster_connection_node_iter &iter);
 
   inline unsigned get_connect_count() const;
+  inline unsigned get_min_db_version() const;
 public:
   inline Uint64 *get_latest_trans_gci() { return &m_latest_trans_gci; }
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-05-11 13:37:37 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-05-26 15:04:45 +0000
@@ -750,6 +750,7 @@ ErrorBundle ErrorCodes[] = {
   { 2810, DMEC, TR, "No space left on the device" },
   { 2811, DMEC, TR, "Error with file permissions, please check file system" },
   { 2815, DMEC, TR, "Error in reading files, please check file system" },
+  {  920, DMEC, AE, "Row operation defined after refreshTuple()" },
 
   /**
    * NdbQueryBuilder API errors

=== modified file 'storage/ndb/test/include/HugoOperations.hpp'
--- a/storage/ndb/test/include/HugoOperations.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/include/HugoOperations.hpp	2011-05-25 14:31:47 +0000
@@ -87,6 +87,11 @@ public:  
 		     int recordNo,
 		     int numRecords = 1);
   
+  int pkRefreshRecord(Ndb*,
+                      int recordNo,
+                      int numRecords = 1,
+                      int anyValueInfo = 0); /* 0 - none, 1+ Val | record */
+
   int execute_Commit(Ndb*, 
 		     AbortOption ao = AbortOnError);
   int execute_NoCommit(Ndb*,
@@ -147,6 +152,10 @@ public:  
   const NdbError& getNdbError() const;
   void setQuiet() { m_quiet = true; }
 
+  typedef Uint32 (*AnyValueCallback)(Ndb*, NdbTransaction*, int rowid, int updVal);
+
+  void setAnyValueCallback(AnyValueCallback);
+
 protected:
   void allocRows(int rows);
   void deallocRows();
@@ -165,10 +174,13 @@ protected:
   int m_async_return;
   friend void HugoOperations_async_callback(int, NdbTransaction*, void*);
   void callback(int res, NdbTransaction*);
+  Uint32 getAnyValueForRowUpd(int row, int update);
+
 
   void setNdbError(const NdbError& error);
   NdbError m_error;
   bool m_quiet;
+  AnyValueCallback avCallback;
 };
 
 #endif

=== modified file 'storage/ndb/test/include/HugoTransactions.hpp'
--- a/storage/ndb/test/include/HugoTransactions.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/include/HugoTransactions.hpp	2011-05-25 13:19:02 +0000
@@ -110,6 +110,9 @@ public:
 		   int batch = 1,
 		   bool allowConstraintViolation = true,
 		   int doSleep = 0);
+
+  int pkRefreshRecords(Ndb*, int startFrom, int count = 1, int batch = 1);
+
   int lockRecords(Ndb*,
 		  int records,
 		  int percentToLock = 1,

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-07 06:17:02 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-25 13:19:02 +0000
@@ -2415,6 +2415,811 @@ runEnd899(NDBT_Context* ctx, NDBT_Step* 
 }
 
 
+int initSubscription(NDBT_Context* ctx, NDBT_Step* step){
+  /* Subscribe to events on the table, and put access
+   * to the subscription somewhere handy
+   */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table& tab = *ctx->getTab();
+  bool merge_events = false;
+  bool report = false;
+
+  char eventName[1024];
+  sprintf(eventName,"%s_EVENT",tab.getName());
+
+  NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+
+  if (!myDict) {
+    g_err << "Dictionary not found "
+	  << pNdb->getNdbError().code << " "
+	  << pNdb->getNdbError().message << endl;
+    return NDBT_FAILED;
+  }
+
+  myDict->dropEvent(eventName);
+
+  NdbDictionary::Event myEvent(eventName);
+  myEvent.setTable(tab.getName());
+  myEvent.addTableEvent(NdbDictionary::Event::TE_ALL);
+  for(int a = 0; a < tab.getNoOfColumns(); a++){
+    myEvent.addEventColumn(a);
+  }
+  myEvent.mergeEvents(merge_events);
+
+  if (report)
+    myEvent.setReport(NdbDictionary::Event::ER_SUBSCRIBE);
+
+  int res = myDict->createEvent(myEvent); // Add event to database
+
+  if (res == 0)
+    myEvent.print();
+  else if (myDict->getNdbError().classification ==
+	   NdbError::SchemaObjectExists)
+  {
+    g_info << "Event creation failed event exists\n";
+    res = myDict->dropEvent(eventName);
+    if (res) {
+      g_err << "Failed to drop event: "
+	    << myDict->getNdbError().code << " : "
+	    << myDict->getNdbError().message << endl;
+      return NDBT_FAILED;
+    }
+    // try again
+    res = myDict->createEvent(myEvent); // Add event to database
+    if (res) {
+      g_err << "Failed to create event (1): "
+	    << myDict->getNdbError().code << " : "
+	    << myDict->getNdbError().message << endl;
+      return NDBT_FAILED;
+    }
+  }
+  else
+  {
+    g_err << "Failed to create event (2): "
+	  << myDict->getNdbError().code << " : "
+	  << myDict->getNdbError().message << endl;
+    return NDBT_FAILED;
+  }
+
+  return NDBT_OK;
+}
+
+int removeSubscription(NDBT_Context* ctx, NDBT_Step* step){
+  /* Remove subscription created above */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table& tab = *ctx->getTab();
+
+  char eventName[1024];
+  sprintf(eventName,"%s_EVENT",tab.getName());
+
+  NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+
+  if (!myDict) {
+    g_err << "Dictionary not found "
+	  << pNdb->getNdbError().code << " "
+	  << pNdb->getNdbError().message << endl;
+    return NDBT_FAILED;
+  }
+
+  myDict->dropEvent(eventName);
+
+  return NDBT_OK;
+}
+
+int runVerifyRowCount(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* ndb = GETNDB(step);
+
+  /* Check that number of results returned by a normal scan
+   * and per-fragment rowcount sum are equal
+   */
+  Uint32 rowCountSum = 0;
+  Uint32 rowScanCount = 0;
+
+  int result = NDBT_OK;
+  do
+  {
+    NdbTransaction* trans = ndb->startTransaction();
+    CHECK(trans != NULL);
+
+    NdbScanOperation* scan = trans->getNdbScanOperation(ctx->getTab());
+    CHECK(scan != NULL);
+
+    CHECK(scan->readTuples(NdbScanOperation::LM_CommittedRead) == 0);
+
+    NdbInterpretedCode code;
+
+    CHECK(code.interpret_exit_last_row() == 0);
+    CHECK(code.finalise() == 0);
+
+    NdbRecAttr* rowCountRA = scan->getValue(NdbDictionary::Column::ROW_COUNT);
+    CHECK(rowCountRA != NULL);
+    CHECK(scan->setInterpretedCode(&code) == 0);
+
+    CHECK(trans->execute(NoCommit) == 0);
+
+    while (scan->nextResult() == 0)
+      rowCountSum+= rowCountRA->u_32_value();
+
+    trans->close();
+
+    trans = ndb->startTransaction();
+    CHECK(trans != NULL);
+
+    scan = trans->getNdbScanOperation(ctx->getTab());
+    CHECK(scan != NULL);
+
+    CHECK(scan->readTuples(NdbScanOperation::LM_CommittedRead) == 0);
+
+    rowCountRA = scan->getValue(NdbDictionary::Column::ROW_COUNT);
+    CHECK(rowCountRA != NULL);
+
+    CHECK(trans->execute(NoCommit) == 0);
+
+    while (scan->nextResult() == 0)
+      rowScanCount++;
+
+    trans->close();
+  }
+  while(0);
+
+  if (result == NDBT_OK)
+  {
+    ndbout_c("Sum of fragment row counts : %u  Number rows scanned : %u",
+             rowCountSum,
+             rowScanCount);
+
+    if (rowCountSum != rowScanCount)
+    {
+      ndbout_c("MISMATCH");
+      result = NDBT_FAILED;
+    }
+  }
+
+  return result;
+}
+
+enum ApiEventType { Insert, Update, Delete };
+
+template class Vector<ApiEventType>;
+
+struct EventInfo
+{
+  ApiEventType type;
+  int id;
+  Uint64 gci;
+};
+template class Vector<EventInfo>;
+
+int collectEvents(Ndb* ndb,
+                  HugoCalculator& calc,
+                  const NdbDictionary::Table& tab,
+                  Vector<EventInfo>& receivedEvents,
+                  int idCol,
+                  int updateCol,
+                  Vector<NdbRecAttr*>* beforeAttrs,
+                  Vector<NdbRecAttr*>* afterAttrs)
+{
+  int MaxTimeouts = 5;
+  while (true)
+  {
+    int res = ndb->pollEvents(1000);
+
+    if (res > 0)
+    {
+      NdbEventOperation* pOp;
+      while ((pOp = ndb->nextEvent()))
+      {
+        bool isDelete = (pOp->getEventType() == NdbDictionary::Event::TE_DELETE);
+        Vector<NdbRecAttr*>* whichVersion =
+          isDelete?
+          beforeAttrs :
+          afterAttrs;
+        int id = (*whichVersion)[idCol]->u_32_value();
+        Uint64 gci = pOp->getGCI();
+        Uint32 anyValue = pOp->getAnyValue();
+        Uint32 scenario = ((anyValue >> 24) & 0xff) -1;
+        Uint32 optype = ((anyValue >> 16) & 0xff);
+        Uint32 recNum = (anyValue & 0xffff);
+
+        g_err << "# " << receivedEvents.size()
+              << " GCI : " << (gci >> 32)
+              << "/"
+              << (gci & 0xffffffff)
+              << " id : "
+              << id
+              << " scenario : " << scenario
+              << " optype : " << optype
+              << " record : " << recNum
+              << "  ";
+
+        /* Check event has self-consistent data */
+        int updatesValue = (*whichVersion)[updateCol]->u_32_value();
+
+        if ((*whichVersion)[updateCol]->isNULL() ||
+            (*whichVersion)[idCol]->isNULL())
+        {
+          g_err << "Null update/id cols : REFRESH of !EXISTS  ";
+        }
+
+        g_err << "(Updates val = " << updatesValue << ")";
+
+        for (int i=0; i < (int) whichVersion->size(); i++)
+        {
+          /* Check PK columns and also other columns for non-delete */
+          if (!isDelete ||
+              tab.getColumn(i)->getPrimaryKey())
+          {
+            NdbRecAttr* ra = (*whichVersion)[i];
+            if (calc.verifyRecAttr(recNum, updatesValue, ra) != 0)
+            {
+              g_err << "Verify failed on recNum : " << recNum << " with updates value "
+                    << updatesValue << " for column " << ra->getColumn()->getAttrId()
+                    << endl;
+              return NDBT_FAILED;
+            }
+          }
+        }
+
+        EventInfo ei;
+
+        switch (pOp->getEventType())
+        {
+        case NdbDictionary::Event::TE_INSERT:
+          g_err << " Insert event" << endl;
+          ei.type = Insert;
+          break;
+        case NdbDictionary::Event::TE_DELETE:
+          ei.type = Delete;
+          g_err << " Delete event" << endl;
+          break;
+        case NdbDictionary::Event::TE_UPDATE:
+          ei.type = Update;
+          g_err << " Update event" << endl;
+          break;
+        default:
+          g_err << " Event type : " << pOp->getEventType() << endl;
+          abort();
+          break;
+        }
+
+        ei.id = recNum;
+        ei.gci = gci;
+
+        receivedEvents.push_back(ei);
+      }
+    }
+    else
+    {
+      if (--MaxTimeouts == 0)
+      {
+        break;
+      }
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int verifyEvents(const Vector<EventInfo>& receivedEvents,
+                 const Vector<ApiEventType>& expectedEvents,
+                 int records)
+{
+  /* Now verify received events against expected
+   * This is messy as events occurring in the same epoch are unordered
+   * except via id, so we use id-duplicates to determine which event
+   * sequence we're looking at.
+   */
+  g_err << "Received total of " << receivedEvents.size() << " events" << endl;
+  Vector<Uint32> keys;
+  Vector<Uint64> gcis;
+  Uint32 z = 0;
+  Uint64 z2 = 0;
+  keys.fill(records, z);
+  gcis.fill(records, z2);
+  Uint64 currGci = 0;
+
+  for (Uint32 e=0; e < receivedEvents.size(); e++)
+  {
+    EventInfo ei = receivedEvents[e];
+
+    if (ei.gci != currGci)
+    {
+      if (ei.gci < currGci)
+        abort();
+
+      /* Epoch boundary */
+      /* At this point, all id counts must be equal */
+      for (int i=0; i < records; i++)
+      {
+        if (keys[i] != keys[0])
+        {
+          g_err << "Count for id " << i
+                << " is " << keys[i]
+                << " but should be " << keys[0] << endl;
+          return NDBT_OK;
+        }
+      }
+
+      currGci = ei.gci;
+    }
+
+    Uint32 eventIndex = keys[ei.id];
+    keys[ei.id]++;
+
+    ApiEventType et = expectedEvents[eventIndex];
+
+    if (ei.type != et)
+    {
+      g_err << "Expected event of type " << et
+            << " but found " << ei.type
+            << " at expectedEvent " << eventIndex
+            << " and event num " << e << endl;
+      return NDBT_FAILED;
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int runRefreshTuple(NDBT_Context* ctx, NDBT_Step* step){
+  int records = ctx->getNumRecords();
+  Ndb* ndb = GETNDB(step);
+
+  /* Now attempt to create EventOperation */
+  NdbEventOperation* pOp;
+  const NdbDictionary::Table& tab = *ctx->getTab();
+
+  char eventName[1024];
+  sprintf(eventName,"%s_EVENT",tab.getName());
+
+  pOp = ndb->createEventOperation(eventName);
+  if (pOp == NULL)
+  {
+    g_err << "Failed to create event operation\n";
+    return NDBT_FAILED;
+  }
+
+  HugoCalculator calc(tab);
+  Vector<NdbRecAttr*> eventAfterRecAttr;
+  Vector<NdbRecAttr*> eventBeforeRecAttr;
+  int updateCol = -1;
+  int idCol = -1;
+
+  /* Now request all attributes */
+  for (int a = 0; a < tab.getNoOfColumns(); a++)
+  {
+    eventAfterRecAttr.push_back(pOp->getValue(tab.getColumn(a)->getName()));
+    eventBeforeRecAttr.push_back(pOp->getPreValue(tab.getColumn(a)->getName()));
+    if (calc.isIdCol(a))
+      idCol = a;
+    if (calc.isUpdateCol(a))
+      updateCol = a;
+  }
+
+  /* Now execute the event */
+  if (pOp->execute())
+  {
+    g_err << "Event operation execution failed : " << pOp->getNdbError() << endl;
+    return NDBT_FAILED;
+  }
+
+  HugoOperations hugoOps(*ctx->getTab());
+  int scenario = 0;
+
+  Vector<ApiEventType> expectedEvents;
+
+  for (scenario = 0; scenario < 2; scenario++)
+  {
+    g_err << "Scenario = " << scenario
+          << " ( Refresh "
+          << ((scenario == 0)? "before":"after")
+          << " operations )" << endl;
+    int optype = 0;
+    bool done = false;
+    int expectedError = 0;
+    do
+    {
+      check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+
+      if (scenario == 0)
+      {
+        g_err << "Refresh before operations" << endl;
+        int anyValue =
+          ((1) << 8) |
+          optype;
+        check(hugoOps.pkRefreshRecord(ndb, 0, records, anyValue) == 0, hugoOps);
+      }
+
+      switch(optype)
+      {
+      case 0:
+      {
+        /* Refresh with no data present */
+        g_err << "  Do nothing" << endl;
+        expectedError = 0; /* Single refresh should always be fine */
+        expectedEvents.push_back(Delete);
+        break;
+      }
+      case 1:
+      {
+        /* [Refresh] Insert [Refresh] */
+        g_err << "  Insert" << endl;
+        check(hugoOps.pkInsertRecord(ndb, 0, records, 1) == 0, hugoOps);
+        if (scenario == 0)
+        {
+          /* Tuple already existed error when we insert after refresh */
+          expectedError = 630;
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedError = 0;
+          expectedEvents.push_back(Insert);
+        }
+        /* Tuple already existed error when we insert after refresh */
+        break;
+      }
+      case 2:
+      {
+        /* Refresh */
+        g_err << "  Refresh" << endl;
+        if (scenario == 0)
+        {
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedEvents.push_back(Insert);
+        }
+        expectedError = 0;
+        break;
+      }
+      case 3:
+      {
+        /* [Refresh] Update [Refresh] */
+        g_err << "  Update" << endl;
+        check(hugoOps.pkUpdateRecord(ndb, 0, records, 3) == 0, hugoOps);
+        if (scenario == 0)
+        {
+          expectedError = 920;
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedError = 0;
+          expectedEvents.push_back(Insert);
+        }
+        break;
+      }
+      case 4:
+      {
+        /* [Refresh] Delete [Refresh] */
+        g_err << "  [Refresh] Delete [Refresh]" << endl;
+        if (scenario == 0)
+        {
+          expectedError = 920;
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedError = 0;
+          expectedEvents.push_back(Delete);
+        }
+        check(hugoOps.pkDeleteRecord(ndb, 0, records) == 0, hugoOps);
+        break;
+      }
+      case 5:
+      {
+        g_err << "  Refresh" << endl;
+        expectedError = 0;
+        expectedEvents.push_back(Delete);
+        /* Refresh with no data present */
+        break;
+      }
+      case 6:
+      {
+        g_err << "  Double refresh" << endl;
+        int anyValue =
+          ((2) << 8) |
+          optype;
+        check(hugoOps.pkRefreshRecord(ndb, 0, records, anyValue) == 0, hugoOps);
+        expectedError = 920; /* Row operation defined after refreshTuple() */
+        expectedEvents.push_back(Delete);
+      }
+      default:
+        done = true;
+        break;
+      }
+
+      if (scenario == 1)
+      {
+        g_err << "Refresh after operations" << endl;
+        int anyValue =
+          ((4) << 8) |
+          optype;
+        check(hugoOps.pkRefreshRecord(ndb, 0, records, anyValue) == 0, hugoOps);
+      }
+
+      int rc = hugoOps.execute_Commit(ndb, AO_IgnoreError);
+      check(rc == expectedError, hugoOps);
+
+      check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+
+      optype++;
+
+
+      /* Now check fragment counts vs findable row counts */
+      if (runVerifyRowCount(ctx, step) != NDBT_OK)
+        return NDBT_FAILED;
+
+    } while (!done);
+  } // for scenario...
+
+  /* Now check fragment counts vs findable row counts */
+  if (runVerifyRowCount(ctx, step) != NDBT_OK)
+    return NDBT_FAILED;
+
+  /* Now let's dump and check the events */
+  g_err << "Expecting the following sequence..." << endl;
+  for (Uint32 i=0; i < expectedEvents.size(); i++)
+  {
+    g_err << i << ".  ";
+    switch(expectedEvents[i])
+    {
+    case Insert:
+      g_err << "Insert" << endl;
+      break;
+    case Update:
+      g_err << "Update" << endl;
+      break;
+    case Delete:
+      g_err << "Delete" << endl;
+      break;
+    default:
+      abort();
+    }
+  }
+
+  Vector<EventInfo> receivedEvents;
+
+  int rc = collectEvents(ndb, calc, tab, receivedEvents, idCol, updateCol,
+                         &eventBeforeRecAttr,
+                         &eventAfterRecAttr);
+  if (rc == NDBT_OK)
+  {
+    rc = verifyEvents(receivedEvents,
+                      expectedEvents,
+                      records);
+  }
+
+  if (ndb->dropEventOperation(pOp) != 0)
+  {
+    g_err << "Drop Event Operation failed : " << ndb->getNdbError() << endl;
+    return NDBT_FAILED;
+  }
+
+  return rc;
+};
+
+enum PreRefreshOps
+{
+  PR_NONE,
+  PR_INSERT,
+  PR_INSERTDELETE,
+  PR_DELETE
+};
+
+struct RefreshScenario
+{
+  const char*   name;
+  bool          preExist;
+  PreRefreshOps preRefreshOps;
+};
+
+static RefreshScenario refreshTests[] = {
+  { "No row, No pre-ops",        false, PR_NONE         },
+  { "No row, Insert pre-op",     false, PR_INSERT       },
+  { "No row, Insert-Del pre-op", false, PR_INSERTDELETE },
+  { "Row exists, No pre-ops",    true,  PR_NONE         },
+  { "Row exists, Delete pre-op", true,  PR_DELETE       }
+};
+
+enum OpTypes
+{
+  READ_C,
+  READ_S,
+  READ_E,
+  INSERT,
+  UPDATE,
+  WRITE,
+  DELETE,
+  LAST
+};
+
+const char* opTypeNames[] =
+{
+  "READ_C",
+  "READ_S",
+  "READ_E",
+  "INSERT",
+  "UPDATE",
+  "WRITE",
+  "DELETE"
+};
+
+
+int
+runRefreshLocking(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Check that refresh in various situations has the
+   * locks we expect it to
+   * Scenario combinations :
+   *   Now row pre-existing | Row pre-existing
+   *   Trans1 : Refresh | Insert-Refresh | Insert-Delete-Refresh
+   *            Delete-Refresh
+   *   Trans2 : Read [Committed|Shared|Exclusive] | Insert | Update
+   *            Write | Delete
+   *
+   * Expectations : Read committed  always non-blocking
+   *                Read committed sees pre-existing row
+   *                All other trans2 operations deadlock
+   */
+
+  Ndb* ndb = GETNDB(step);
+  Uint32 numScenarios = sizeof(refreshTests) / sizeof(refreshTests[0]);
+  HugoTransactions hugoTrans(*ctx->getTab());
+
+  for (Uint32 s = 0; s < numScenarios; s++)
+  {
+    RefreshScenario& scenario = refreshTests[s];
+
+    if (scenario.preExist)
+    {
+      /* Create pre-existing tuple */
+      if (hugoTrans.loadTable(ndb, 1) != 0)
+      {
+        g_err << "Pre-exist failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+    }
+
+    if (hugoTrans.startTransaction(ndb) != 0)
+    {
+      g_err << "Start trans failed : " << hugoTrans.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    g_err << "Scenario : " << scenario.name << endl;
+
+    /* Do pre-refresh ops */
+    switch (scenario.preRefreshOps)
+    {
+    case PR_NONE:
+      break;
+    case PR_INSERT:
+    case PR_INSERTDELETE:
+      if (hugoTrans.pkInsertRecord(ndb, 0) != 0)
+      {
+        g_err << "Pre insert failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+
+      if (scenario.preRefreshOps == PR_INSERT)
+        break;
+    case PR_DELETE:
+      if (hugoTrans.pkDeleteRecord(ndb, 0) != 0)
+      {
+        g_err << "Pre delete failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+      break;
+    }
+
+    /* Then refresh */
+    if (hugoTrans.pkRefreshRecord(ndb, 0) != 0)
+    {
+      g_err << "Refresh failed : " << hugoTrans.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    /* Now execute */
+    if (hugoTrans.execute_NoCommit(ndb) != 0)
+    {
+      g_err << "Execute failed : " << hugoTrans.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    {
+      /* Now try ops from another transaction */
+      HugoOperations hugoOps(*ctx->getTab());
+      Uint32 ot = READ_C;
+
+      while (ot < LAST)
+      {
+        if (hugoOps.startTransaction(ndb) != 0)
+        {
+          g_err << "Start trans2 failed : " << hugoOps.getNdbError() << endl;
+          return NDBT_FAILED;
+        }
+
+        g_err << "Operation type : " << opTypeNames[ot] << endl;
+        int res = 0;
+        switch (ot)
+        {
+        case READ_C:
+          res = hugoOps.pkReadRecord(ndb,0,1,NdbOperation::LM_CommittedRead);
+          break;
+        case READ_S:
+          res = hugoOps.pkReadRecord(ndb,0,1,NdbOperation::LM_Read);
+          break;
+        case READ_E:
+          res = hugoOps.pkReadRecord(ndb,0,1,NdbOperation::LM_Exclusive);
+          break;
+        case INSERT:
+          res = hugoOps.pkInsertRecord(ndb, 0);
+          break;
+        case UPDATE:
+          res = hugoOps.pkUpdateRecord(ndb, 0);
+          break;
+        case WRITE:
+          res = hugoOps.pkWriteRecord(ndb, 0);
+          break;
+        case DELETE:
+          res = hugoOps.pkDeleteRecord(ndb, 0);
+          break;
+        case LAST:
+          abort();
+        }
+
+        hugoOps.execute_Commit(ndb);
+
+        if ((ot == READ_C) && (scenario.preExist))
+        {
+          if (hugoOps.getNdbError().code == 0)
+          {
+            g_err << "Read committed succeeded" << endl;
+          }
+          else
+          {
+            g_err << "UNEXPECTED : Read committed failed. " << hugoOps.getNdbError() << endl;
+            return NDBT_FAILED;
+          }
+        }
+        else
+        {
+          if (hugoOps.getNdbError().code == 0)
+          {
+            g_err << opTypeNames[ot] << " succeeded, should not have" << endl;
+            return NDBT_FAILED;
+          }
+        }
+
+        hugoOps.closeTransaction(ndb);
+
+        ot = ot + 1;
+      }
+
+    }
+
+    /* Close refresh transaction */
+    hugoTrans.closeTransaction(ndb);
+
+    if (scenario.preExist)
+    {
+      /* Cleanup pre-existing before next iteration */
+      if (hugoTrans.pkDelRecords(ndb, 0) != 0)
+      {
+        g_err << "Delete pre existing failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+    }
+  }
+
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testBasic);
 TESTCASE("PkInsert", 
 	 "Verify that we can insert and delete from this table using PK"
@@ -2746,6 +3551,12 @@ TESTCASE("UnlockUpdateBatch",
   STEP(runPkRead);
   FINALIZER(runClearTable);
 }
+TESTCASE("RefreshTuple",
+         "Test refreshTuple() operation properties"){
+  INITIALIZER(initSubscription);
+  INITIALIZER(runRefreshTuple);
+  FINALIZER(removeSubscription);
+}
 TESTCASE("Bug54986", "")
 {
   INITIALIZER(runBug54986);
@@ -2773,6 +3584,11 @@ TESTCASE("899", "")
   STEP(runTest899);
   FINALIZER(runEnd899);
 }
+TESTCASE("RefreshLocking",
+         "Test Refresh locking properties")
+{
+  INITIALIZER(runRefreshLocking);
+}
 NDBT_TESTSUITE_END(testBasic);
 
 #if 0

=== modified file 'storage/ndb/test/ndbapi/testIndex.cpp'
--- a/storage/ndb/test/ndbapi/testIndex.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/test/ndbapi/testIndex.cpp	2011-05-25 13:19:02 +0000
@@ -1744,6 +1744,73 @@ runMixed2(NDBT_Context* ctx, NDBT_Step* 
   return NDBT_FAILED;
 }
 
+#define check(b, e)                                                     \
+  if (!(b)) { g_err << "ERR: " << step->getName() << " failed on line " << __LINE__ << ": " << e.getNdbError() << endl; return NDBT_FAILED; }
+
+int runRefreshTupleAbort(NDBT_Context* ctx, NDBT_Step* step){
+  int records = ctx->getNumRecords();
+  int loops = ctx->getNumLoops();
+
+  Ndb* ndb = GETNDB(step);
+
+  const NdbDictionary::Table& tab = *ctx->getTab();
+
+  for (int i=0; i < tab.getNoOfColumns(); i++)
+  {
+    if (tab.getColumn(i)->getStorageType() == NDB_STORAGETYPE_DISK)
+    {
+      g_err << "Table has disk column(s) skipping." << endl;
+      return NDBT_OK;
+    }
+  }
+
+
+  g_err << "Loading table." << endl;
+  HugoTransactions hugoTrans(*ctx->getTab());
+  check(hugoTrans.loadTable(ndb, records) == 0, hugoTrans);
+
+  HugoOperations hugoOps(*ctx->getTab());
+
+  /* Check refresh, abort sequence with an ordered index
+   * Previously this gave bugs due to corruption of the
+   * tuple version
+   */
+  while (loops--)
+  {
+    Uint32 numRefresh = 2 + rand() % 10;
+
+    g_err << "Refresh, rollback * " << numRefresh << endl;
+
+    while (--numRefresh)
+    {
+      /* Refresh, rollback */
+      check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+      check(hugoOps.pkRefreshRecord(ndb, 0, records, 0) == 0, hugoOps);
+      check(hugoOps.execute_NoCommit(ndb) == 0, hugoOps);
+      check(hugoOps.execute_Rollback(ndb) == 0, hugoOps);
+      check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+    }
+
+    g_err << "Refresh, commit" << endl;
+    /* Refresh, commit */
+    check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+    check(hugoOps.pkRefreshRecord(ndb, 0, records, 0) == 0, hugoOps);
+    check(hugoOps.execute_NoCommit(ndb) == 0, hugoOps);
+    check(hugoOps.execute_Commit(ndb) == 0, hugoOps);
+    check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+
+    g_err << "Update, commit" << endl;
+    /* Update */
+    check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+    check(hugoOps.pkUpdateRecord(ndb, 0, records, 2 + loops) == 0, hugoOps);
+    check(hugoOps.execute_NoCommit(ndb) == 0, hugoOps);
+    check(hugoOps.execute_Commit(ndb) == 0, hugoOps);
+    check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+  }
+
+  return NDBT_OK;
+}
+
 
 int
 runBuildDuring(NDBT_Context* ctx, NDBT_Step* step){
@@ -3619,6 +3686,16 @@ TESTCASE("Bug60851", "")
   INITIALIZER(runBug60851);
   FINALIZER(createPkIndex_Drop);
 }
+TESTCASE("RefreshWithOrderedIndex",
+         "Refresh tuples with ordered index(es)")
+{
+  TC_PROPERTY("OrderedIndex", 1);
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runRefreshTupleAbort);
+  FINALIZER(createPkIndex_Drop);
+  FINALIZER(runClearTable);
+}
 NDBT_TESTSUITE_END(testIndex);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testRestartGci.cpp'
--- a/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-05-18 05:25:47 +0000
+++ b/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-05-23 16:13:34 +0000
@@ -63,9 +63,9 @@ maybeExtraBits(Ndb* ndb, NdbDictionary::
     return 0;
   }
 
-  bool useExtendedBits = ((rand() % 5) != 0);
-  Uint32 numGciBits= rand() % 32;      /* 0 -> 31 */
-  Uint32 numAuthorBits = rand() % 32;  /* 0 -> 31 */
+  bool useExtendedBits = ((ndb_rand() % 5) != 0);
+  Uint32 numGciBits= ndb_rand() % 32;      /* 0 -> 31 */
+  Uint32 numAuthorBits = ndb_rand() % 32;  /* 0 -> 31 */
 
   if (useExtendedBits && (numGciBits || numAuthorBits))
   {
@@ -136,7 +136,7 @@ int runInsertRememberGci(NDBT_Context* c
     Uint32 authorVal = 0;
     if (ctx->getTab()->getExtraRowAuthorBits() > 0)
     {
-      authorVal = (rand() & authorMask);
+      authorVal = (ndb_rand() & authorMask);
       /* Pain here due to need to use NdbRecord */
       char rowBuff[NDB_MAX_TUPLE_SIZE];
       const NdbDictionary::Table* tab = ctx->getTab();
@@ -378,7 +378,7 @@ int runVerifyInserts(NDBT_Context* ctx, 
 	ndbout << "ERR: Record "<<i<<" should not have existed" << endl;
 	result = NDBT_FAILED;
       }
-      bool expectRounding = (expectedRecordGci && 0xffffffff) >= firstSaturatedValue;
+      bool expectRounding = (expectedRecordGci & 0xffffffff) >= firstSaturatedValue;
       Uint64 expectedRoundedGci = (expectedRecordGci | 0xffffffff);
       Uint64 readGci = rowGci->u_64_value();
       Uint64 expectedRead = (expectRounding)?expectedRoundedGci :

=== modified file 'storage/ndb/test/ndbapi/test_event.cpp'
--- a/storage/ndb/test/ndbapi/test_event.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/ndbapi/test_event.cpp	2011-05-25 14:31:47 +0000
@@ -164,6 +164,18 @@ static int runCreateEvent(NDBT_Context* 
   return NDBT_OK;
 }
 
+Uint32 setAnyValue(Ndb* ndb, NdbTransaction* trans, int rowid, int updVal)
+{
+  /* XOR 2 32bit words of transid together */
+  Uint64 transId = trans->getTransactionId();
+  return transId ^ (transId >> 32);
+}
+
+bool checkAnyValueTransId(Uint64 transId, Uint32 anyValue)
+{
+  return transId && (anyValue == Uint32(transId ^ (transId >> 32)));
+}
+
 struct receivedEvent {
   Uint32 pk;
   Uint32 count;
@@ -295,6 +307,24 @@ eventOperation(Ndb* pNdb, const NdbDicti
 	  abort();
 	}
 
+        /* Check event transaction id */
+        Uint32 anyValue = pOp->getAnyValue();
+        Uint64 transId = pOp->getTransId();
+        if (anyValue)
+        {
+          if (!checkAnyValueTransId(transId, anyValue))
+          {
+            g_err << "ERROR : TransId and AnyValue mismatch.  "
+                  << "Transid : " << transId
+                  << ", AnyValue : " << anyValue
+                  << ", Expected AnyValue : "
+                  << (Uint32) ((transId >> 32) ^ transId)
+                  << endl;
+            abort();
+            return NDBT_FAILED;
+          }
+        }
+
 	if ((int)pk < records) {
 	  recEvent[pk].pk = pk;
 	  recEvent[pk].count++;
@@ -498,6 +528,8 @@ int runEventLoad(NDBT_Context* ctx, NDBT
   int records = ctx->getNumRecords();
   HugoTransactions hugoTrans(*ctx->getTab());
 
+  hugoTrans.setAnyValueCallback(setAnyValue);
+
   sleep(1);
 #if 0
   sleep(5);
@@ -520,6 +552,7 @@ int runEventMixedLoad(NDBT_Context* ctx,
   int loops = ctx->getNumLoops();
   int records = ctx->getNumRecords();
   HugoTransactions hugoTrans(*ctx->getTab());
+  hugoTrans.setAnyValueCallback(setAnyValue);
   
   if(ctx->getPropertyWait("LastGCI_hi", ~(Uint32)0))
   {
@@ -721,6 +754,24 @@ int runEventApplier(NDBT_Context* ctx, N
 	    abort();
 	  }
 
+          /* Check event transaction id */
+          Uint32 anyValue = pOp->getAnyValue();
+          Uint64 transId = pOp->getTransId();
+          if (anyValue)
+          {
+            if (!checkAnyValueTransId(transId, anyValue))
+            {
+              g_err << "ERROR : TransId and AnyValue mismatch.  "
+                    << "Transid : " << transId
+                    << ", AnyValue : " << anyValue
+                    << ", Expected AnyValue : "
+                    << (Uint32) ((transId >> 32) ^ transId)
+                    << endl;
+              abort();
+              return NDBT_FAILED;
+            }
+          }
+
 	  for (i= 0; i < n_columns; i++)
 	  {
 	    if (recAttr[i]->isNULL())

=== modified file 'storage/ndb/test/run-test/daily-devel-tests.txt'
--- a/storage/ndb/test/run-test/daily-devel-tests.txt	2011-04-08 11:06:53 +0000
+++ b/storage/ndb/test/run-test/daily-devel-tests.txt	2011-05-25 13:19:02 +0000
@@ -129,3 +129,16 @@ max-time: 1800
 cmd: testDict
 args: -n SchemaTrans -l 1
 
+# Refresh tuple
+max-time: 300
+cmd: testBasic
+args: -n RefreshTuple T6 D1
+
+max-time: 300
+cmd: testIndex
+args: -n RefreshWithOrderedIndex T2 D2
+
+max-time: 300
+cmd: testBasic
+args: -n RefreshLocking D1
+

=== modified file 'storage/ndb/test/src/HugoOperations.cpp'
--- a/storage/ndb/test/src/HugoOperations.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/src/HugoOperations.cpp	2011-05-25 14:31:47 +0000
@@ -400,6 +400,8 @@ int HugoOperations::pkUpdateRecord(Ndb* 
     Uint32 partId;
     if(getPartIdForRow(pOp, r+recordNo, partId))
       pOp->setPartitionId(partId);
+
+    pOp->setAnyValue(getAnyValueForRowUpd(r+recordNo, updatesValue));
     
   }
   return NDBT_OK;
@@ -567,6 +569,47 @@ int HugoOperations::pkDeleteRecord(Ndb* 
   return NDBT_OK;
 }
 
+int HugoOperations::pkRefreshRecord(Ndb* pNdb,
+                                    int recordNo,
+                                    int numRecords,
+                                    int anyValueInfo){
+
+  char buffer[NDB_MAX_TUPLE_SIZE];
+  const NdbDictionary::Table * pTab =
+    pNdb->getDictionary()->getTable(tab.getName());
+
+  if (pTab == 0)
+  {
+    return NDBT_FAILED;
+  }
+
+  const NdbRecord * record = pTab->getDefaultRecord();
+  NdbOperation::OperationOptions opts;
+  opts.optionsPresent = NdbOperation::OperationOptions::OO_ANYVALUE;
+  for(int r=0; r < numRecords; r++)
+  {
+    bzero(buffer, sizeof(buffer));
+    if (calc.equalForRow((Uint8*)buffer, record, r + recordNo))
+    {
+      return NDBT_FAILED;
+    }
+
+    opts.anyValue = anyValueInfo?
+      (anyValueInfo << 16) | (r+recordNo) :
+      0;
+
+    const NdbOperation* pOp = pTrans->refreshTuple(record, buffer,
+                                                   &opts, sizeof(opts));
+    if (pOp == NULL)
+    {
+      ERR(pTrans->getNdbError());
+      setNdbError(pTrans->getNdbError());
+      return NDBT_FAILED;
+    }
+  }
+  return NDBT_OK;
+}
+
 int HugoOperations::execute_Commit(Ndb* pNdb,
 				   AbortOption eao){
 
@@ -762,7 +805,8 @@ HugoOperations::HugoOperations(const Ndb
   UtilTransactions(_tab, idx),
   pIndexScanOp(NULL),
   calc(_tab),
-  m_quiet(false)
+  m_quiet(false),
+  avCallback(NULL)
 {
 }
 
@@ -1160,5 +1204,21 @@ HugoOperations::setNdbError(const NdbErr
   m_error.code = error.code ? error.code : 1;
 }
 
+void
+HugoOperations::setAnyValueCallback(AnyValueCallback avc)
+{
+  avCallback = avc;
+}
+
+Uint32
+HugoOperations::getAnyValueForRowUpd(int row, int update)
+{
+  if (avCallback == NULL)
+    return 0;
+
+  return (avCallback)(pTrans->getNdb(), pTrans,
+                      row, update);
+}
+
 template class Vector<HugoOperations::RsPair>;
 template class Vector<const NdbLockHandle*>;

=== modified file 'storage/ndb/test/src/HugoTransactions.cpp'
--- a/storage/ndb/test/src/HugoTransactions.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/HugoTransactions.cpp	2011-05-25 13:19:02 +0000
@@ -1502,6 +1502,79 @@ HugoTransactions::pkDelRecords(Ndb* pNdb
 }
 
 int 
+HugoTransactions::pkRefreshRecords(Ndb* pNdb,
+                                   int startFrom,
+                                   int count,
+                                   int batch)
+{
+  int r = 0;
+  int retryAttempt = 0;
+
+  g_info << "|- Refreshing records..." << startFrom << "-" << (startFrom+count)
+         << " (batch=" << batch << ")" << endl;
+
+  while (r < count)
+  {
+    if(r + batch > count)
+      batch = count - r;
+
+    if (retryAttempt >= m_retryMax)
+    {
+      g_info << "ERROR: has retried this operation " << retryAttempt
+	     << " times, failing!" << endl;
+      return NDBT_FAILED;
+    }
+
+    pTrans = pNdb->startTransaction();
+    if (pTrans == NULL)
+    {
+      const NdbError err = pNdb->getNdbError();
+
+      if (err.status == NdbError::TemporaryError){
+	ERR(err);
+	NdbSleep_MilliSleep(50);
+	retryAttempt++;
+	continue;
+      }
+      ERR(err);
+      return NDBT_FAILED;
+    }
+
+    if (pkRefreshRecord(pNdb, r, batch) != NDBT_OK)
+    {
+      ERR(pTrans->getNdbError());
+      closeTransaction(pNdb);
+      return NDBT_FAILED;
+    }
+
+    if (pTrans->execute(Commit, AbortOnError) == -1)
+    {
+      const NdbError err = pTrans->getNdbError();
+
+      switch(err.status){
+      case NdbError::TemporaryError:
+	ERR(err);
+	closeTransaction(pNdb);
+	NdbSleep_MilliSleep(50);
+	retryAttempt++;
+	continue;
+	break;
+
+      default:
+	ERR(err);
+	closeTransaction(pNdb);
+	return NDBT_FAILED;
+      }
+    }
+
+    closeTransaction(pNdb);
+    r += batch; // Read next record
+  }
+
+  return NDBT_OK;
+}
+
+int
 HugoTransactions::pkReadUnlockRecords(Ndb* pNdb, 
                                       int records,
                                       int batch,

=== modified file 'storage/ndb/test/tools/hugoPkUpdate.cpp'
--- a/storage/ndb/test/tools/hugoPkUpdate.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/tools/hugoPkUpdate.cpp	2011-05-25 13:19:02 +0000
@@ -43,6 +43,8 @@ struct ThrOutput {
   NDBT_Stats latency;
 };
 
+static int _refresh = 0;
+
 int main(int argc, const char** argv){
   ndb_init();
 
@@ -63,7 +65,9 @@ int main(int argc, const char** argv){
     //    { "batch", 'b', arg_integer, &_batch, "batch value", "batch" },
     { "records", 'r', arg_integer, &_records, "Number of records", "records" },
     { "usage", '?', arg_flag, &_help, "Print help", "" },
-    { "database", 'd', arg_string, &db, "Database", "" }
+    { "database", 'd', arg_string, &db, "Database", "" },
+    { "refresh", 0, arg_flag, &_refresh, "refresh record rather than update them", "" }
+
   };
   int num_args = sizeof(args) / sizeof(args[0]);
   int optind = 0;
@@ -135,7 +139,10 @@ int main(int argc, const char** argv){
     ths.stop();
 
     if (ths.get_err())
+    {
+      ths.disconnect();
       NDBT_ProgramExit(NDBT_FAILED);
+    }
 
     if (_stats) {
       NDBT_Stats latency;
@@ -160,6 +167,8 @@ int main(int argc, const char** argv){
     i++;
   }
 
+  ths.disconnect();
+
   return NDBT_ProgramExit(NDBT_OK);
 }
 
@@ -177,9 +186,19 @@ static void hugoPkUpdate(NDBT_Thread& th
   hugoTrans.setThrInfo(ths.get_count(), thr.get_thread_no());
 
   int ret;
-  ret = hugoTrans.pkUpdateRecords(thr.get_ndb(),
-                                  input->records,
-                                  input->batch);
+  if (_refresh == 0)
+  {
+    ret = hugoTrans.pkUpdateRecords(thr.get_ndb(),
+                                    input->records,
+                                    input->batch);
+  }
+  else
+  {
+    ret = hugoTrans.pkRefreshRecords(thr.get_ndb(),
+                                     0,
+                                     input->records,
+                                     input->batch);
+  }
   if (ret != 0)
     thr.set_err(ret);
 }

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-05-24 11:51:39 +0000
@@ -72,12 +72,6 @@ MYSQL_ADD_EXECUTABLE(ndb_config
   COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_config ndbmgmclient ndbconf)
 
-SET(options "-I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmapi")
-SET(options "${options} -I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmsrv")
-SET(options "${options} -I${CMAKE_SOURCE_DIR}/storage/ndb/include/mgmcommon")
-SET_TARGET_PROPERTIES(ndb_config PROPERTIES
-                      COMPILE_FLAGS "${options}")
-
 # Build ndbinfo_sql and run it to create ndbinfo.sql
 ADD_EXECUTABLE(ndbinfo_sql ndbinfo_sql.cpp)
 TARGET_LINK_LIBRARIES(ndbinfo_sql ndbclient)

=== modified file 'storage/ndb/tools/Makefile.am'
--- a/storage/ndb/tools/Makefile.am	2011-02-23 22:48:42 +0000
+++ b/storage/ndb/tools/Makefile.am	2011-05-24 11:51:39 +0000
@@ -60,10 +60,6 @@ ndb_config_SOURCES = ndb_config.cpp \
 	../src/mgmsrv/ConfigInfo.cpp \
 	../src/mgmsrv/InitConfigFileParser.cpp
 
-ndb_config_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/mgmapi \
-                      -I$(top_srcdir)/storage/ndb/src/mgmsrv \
-                      -I$(top_srcdir)/storage/ndb/include/mgmcommon
-
 ndb_restore_LDADD = $(top_builddir)/storage/ndb/src/common/util/libndbazio.la \
                     $(LDADD)
 

=== modified file 'storage/ndb/tools/ndb_config.cpp'
--- a/storage/ndb/tools/ndb_config.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/tools/ndb_config.cpp	2011-05-24 11:51:39 +0000
@@ -28,8 +28,8 @@
 
 #include <NdbOut.hpp>
 #include <mgmapi.h>
-#include <mgmapi_configuration.hpp>
-#include <ConfigInfo.hpp>
+#include "../src/mgmapi/mgmapi_configuration.hpp"
+#include "../src/mgmsrv/ConfigInfo.hpp"
 #include <NdbAutoPtr.hpp>
 #include <NdbTCP.h>
 
@@ -552,7 +552,7 @@ noconnect:
   return conf;
 }
 
-#include <Config.hpp>
+#include "../src/mgmsrv/Config.hpp"
 #include <EventLogger.hpp>
 
 extern EventLogger *g_eventLogger;

=== modified file 'storage/ndb/tools/ndbinfo_sql.cpp'
--- a/storage/ndb/tools/ndbinfo_sql.cpp	2011-04-17 18:25:41 +0000
+++ b/storage/ndb/tools/ndbinfo_sql.cpp	2011-05-26 15:04:45 +0000
@@ -327,6 +327,12 @@ int main(int argc, char** argv){
   sql.assfmt("CREATE DATABASE IF NOT EXISTS `%s`", opt_ndbinfo_db);
   print_conditional_sql(sql);
 
+  printf("# Set NDBINFO in offline mode during (re)create of tables\n");
+  printf("# and views to avoid errors caused by no such table or\n");
+  printf("# different table definition in NDB\n");
+  sql.assfmt("SET @@global.ndbinfo_offline=TRUE");
+  print_conditional_sql(sql);
+
   printf("# Drop any old views in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_views; i++)
   {
@@ -430,6 +436,10 @@ int main(int argc, char** argv){
     print_conditional_sql(sql);
   }
 
+  printf("# Finally turn off offline mode\n");
+  sql.assfmt("SET @@global.ndbinfo_offline=FALSE");
+  print_conditional_sql(sql);
+
   return 0;
 }
 

No bundle (reason: revision is a merge).
Thread
bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch(jan.wedvik:3498) Jan Wedvik26 May