List:Commits« Previous MessageNext Message »
From:Pekka Nousiainen Date:May 25 2011 3:03pm
Subject:bzr commit into mysql-5.1-telco-7.0-wl4124 branch (pekka.nousiainen:4383)
View as plain text  
#At file:///export/space/pekka/ms/ms-wl4124-70/ based on revid:pekka@stripped

 4383 Pekka Nousiainen	2011-05-25 [merge]
      merge from main into wl4124

    added:
      mysql-test/suite/ndb/r/ndb_dd_bug12581213.result
      mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf
      mysql-test/suite/ndb/t/ndb_dd_bug12581213.test
      storage/ndb/cmake/os/
      storage/ndb/cmake/os/Windows.cmake
      storage/ndb/cmake/os/WindowsCache.cmake
    modified:
      CMakeLists.txt
      configure.in
      mysql-test/suite/ndb/r/ndb_native_default_support.result
      mysql-test/suite/ndb/r/ndbinfo.result
      mysql-test/suite/ndb/t/ndbinfo.test
      sql/ha_ndbinfo.cc
      sql/ha_ndbinfo.h
      storage/ndb/CMakeLists.txt
      storage/ndb/include/kernel/AttributeHeader.hpp
      storage/ndb/include/kernel/kernel_types.h
      storage/ndb/include/kernel/signaldata/CreateTab.hpp
      storage/ndb/include/kernel/signaldata/CreateTable.hpp
      storage/ndb/include/kernel/signaldata/DiGetNodes.hpp
      storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
      storage/ndb/include/ndb_version.h.in
      storage/ndb/include/ndbapi/NdbDictionary.hpp
      storage/ndb/include/ndbapi/NdbOperation.hpp
      storage/ndb/include/ndbapi/NdbTransaction.hpp
      storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
      storage/ndb/ndb_configure.cmake
      storage/ndb/ndb_configure.m4
      storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
      storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
      storage/ndb/src/kernel/blocks/ERROR_codes.txt
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
      storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp
      storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
      storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/blocks/trix/Trix.cpp
      storage/ndb/src/kernel/vm/LongSignal.cpp
      storage/ndb/src/kernel/vm/LongSignalImpl.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/mgmsrv/Config.hpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
      storage/ndb/src/ndbapi/NdbInfo.cpp
      storage/ndb/src/ndbapi/NdbInfo.hpp
      storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp
      storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/src/ndbapi/TransporterFacade.hpp
      storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
      storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/HugoOperations.hpp
      storage/ndb/test/include/HugoTransactions.hpp
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/ndbapi/testIndex.cpp
      storage/ndb/test/ndbapi/testRestartGci.cpp
      storage/ndb/test/run-test/conf-blade08.cnf
      storage/ndb/test/run-test/conf-dl145a.cnf
      storage/ndb/test/run-test/conf-fimafeng08.cnf
      storage/ndb/test/run-test/conf-fimafeng09.cnf*
      storage/ndb/test/run-test/conf-loki27.cnf*
      storage/ndb/test/run-test/conf-ndb07.cnf
      storage/ndb/test/run-test/conf-ndbmaster.cnf
      storage/ndb/test/run-test/conf-repl.cnf
      storage/ndb/test/run-test/conf-techra29.cnf*
      storage/ndb/test/run-test/conf-test.cnf
      storage/ndb/test/run-test/conf-tyr64.cnf*
      storage/ndb/test/run-test/conf-upgrade.cnf
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/test/run-test/daily-devel-tests.txt
      storage/ndb/test/src/HugoOperations.cpp
      storage/ndb/test/src/HugoTransactions.cpp
      storage/ndb/test/src/NDBT_Table.cpp
      storage/ndb/test/tools/hugoPkUpdate.cpp
      storage/ndb/tools/CMakeLists.txt
      storage/ndb/tools/Makefile.am
      storage/ndb/tools/ndb_config.cpp
      storage/ndb/tools/ndbinfo_sql.cpp
      storage/ndb/tools/select_all.cpp
=== modified file 'CMakeLists.txt'
--- a/CMakeLists.txt	2011-05-12 14:13:43 +0000
+++ b/CMakeLists.txt	2011-05-24 11:41:58 +0000
@@ -128,6 +128,14 @@ IF(MSVC)
     # generate map files, set stack size (see bug#20815)
     SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MAP /MAPINFO:EXPORTS")
     SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:1048576")
+    IF(NOT MCP_BUG11765145)
+      # Fails to link with error message about missing .map file, turn
+      # off incremental linking to workaround problem 
+      IF(CMAKE_GENERATOR MATCHES "Visual Studio 10")
+        MESSAGE(STATUS "Turning off incremental linking for VS 2010")
+        SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /INCREMENTAL:NO")
+      ENDIF()
+    ENDIF()
 
     # remove support for Exception handling
     STRING(REPLACE "/GX"   "" CMAKE_CXX_FLAGS            ${CMAKE_CXX_FLAGS})
@@ -327,19 +335,3 @@ IF(WITH_EMBEDDED_SERVER)
 ENDIF(WITH_EMBEDDED_SERVER)
 ADD_SUBDIRECTORY(mysql-test/lib/My/SafeProcess)
 
-# Dump cmake's output and error log to help diagnosing
-# platform checks
-MACRO(DUMP_FILE filename)
-  IF(EXISTS ${filename})
-    FILE(READ ${filename} content)
-    MESSAGE(STATUS "=vvvv= Dumping ${filename} ")
-    MESSAGE(STATUS "${content}")
-    MESSAGE(STATUS "=^^^^=")
-  ELSE()
-    MESSAGE(STATUS "'${filename}' does not exist")
-  ENDIF()
-ENDMACRO()
- 
-DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log")
-DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeOutput.log")
-

=== modified file 'configure.in'
--- a/configure.in	2011-04-26 07:39:21 +0000
+++ b/configure.in	2011-05-24 08:44:31 +0000
@@ -12,7 +12,7 @@ dnl
 dnl When changing the major version number please also check the switch
 dnl statement in mysqlbinlog::check_master_version().  You may also need
 dnl to update version.c in ndb.
-AC_INIT([MySQL Server], [5.1.56-ndb-7.0.25], [], [mysql])
+AC_INIT([MySQL Server], [5.1.56-ndb-7.0.26], [], [mysql])
 
 AC_CONFIG_SRCDIR([sql/mysqld.cc])
 AC_CANONICAL_SYSTEM

=== added file 'mysql-test/suite/ndb/r/ndb_dd_bug12581213.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_bug12581213.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_bug12581213.result	2011-05-23 10:38:41 +0000
@@ -0,0 +1,17 @@
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine ndb;
+drop tablespace ts1
+engine ndb;
+drop logfile group lg1
+engine ndb;

=== modified file 'mysql-test/suite/ndb/r/ndb_native_default_support.result'
--- a/mysql-test/suite/ndb/r/ndb_native_default_support.result	2010-11-10 13:39:11 +0000
+++ b/mysql-test/suite/ndb/r/ndb_native_default_support.result	2011-05-17 23:29:55 +0000
@@ -140,6 +140,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
@@ -178,6 +180,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 pk Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
@@ -246,6 +250,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
@@ -283,6 +289,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 pk Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
@@ -360,6 +368,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
@@ -450,6 +460,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
@@ -497,6 +509,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -538,6 +552,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -587,6 +603,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -630,6 +648,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -672,6 +692,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -714,6 +736,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -758,6 +782,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -805,6 +831,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -847,6 +875,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -891,6 +921,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -936,6 +968,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -985,6 +1019,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1028,6 +1064,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1070,6 +1108,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1112,6 +1152,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1154,6 +1196,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1198,6 +1242,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1245,6 +1291,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
@@ -1333,6 +1381,8 @@ Row GCI: 1
 SingleUserMode: 0
 ForceVarPart: 1
 FragmentCount: 2
+ExtraRowGciBits: 0
+ExtraRowAuthorBits: 0
 TableStatus: Retrieved
 -- Attributes -- 
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR

=== modified file 'mysql-test/suite/ndb/r/ndbinfo.result'
--- a/mysql-test/suite/ndb/r/ndbinfo.result	2010-11-03 09:48:25 +0000
+++ b/mysql-test/suite/ndb/r/ndbinfo.result	2011-05-23 13:45:57 +0000
@@ -136,6 +136,7 @@ Variable_name	Value
 ndbinfo_database	ndbinfo
 ndbinfo_max_bytes	0
 ndbinfo_max_rows	10
+ndbinfo_offline	OFF
 ndbinfo_show_hidden	OFF
 ndbinfo_table_prefix	ndb$
 ndbinfo_version	NDB_VERSION_D
@@ -179,20 +180,55 @@ node_id != 0
 1
 Warnings:
 Warning	40001	Table 'ndb$test' is defined differently in NDB, there are more columns available. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
-
-## 2) Column does not exist in NDB -> error, with warning
 DROP TABLE ndb$test;
+
+## 2) Column does not exist in NDB -> allowed, with warning, non existing
+##    column(s) return NULL
+## 2a) Extra column at end
 CREATE TABLE ndb$test (node_id int, non_existing int) ENGINE = ndbinfo;
-SELECT * FROM ndb$test;
-ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
-SHOW WARNINGS;
-Level	Code	Message
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+node_id	non_existing
+1	NULL
+2	NULL
+Warnings:
 Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' does not exist. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
-Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
+Warning	40001	Table 'ndb$test' is defined differently in NDB, there are more columns available. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+DROP TABLE ndb$test;
+
+## 2b) Extra column(s) in middle
+CREATE TABLE ndb$test (
+  node_id int unsigned,
+  non_existing int unsigned,
+  block_number int unsigned,
+  block_instance int unsigned,
+  counter int unsigned,
+  counter2 bigint unsigned
+) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing, block_number FROM ndb$test;
+node_id	non_existing	block_number
+1	NULL	249
+2	NULL	249
+Warnings:
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' does not exist. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+DROP TABLE ndb$test;
+
+## 2c) Extra column first
+CREATE TABLE ndb$test (non_existing int, node_id int) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+node_id	non_existing
+1	NULL
+2	NULL
+Warnings:
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' does not exist. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+Warning	40001	Table 'ndb$test' is defined differently in NDB, there are more columns available. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+SELECT DISTINCT non_existing, node_id FROM ndb$test;
+non_existing	node_id
+NULL	1
+NULL	2
+DROP TABLE ndb$test;
 
 ## 3) Incompatible column type -> error, with warning
 ## 3a) int instead of bigint
-DROP TABLE ndb$test;
 CREATE TABLE ndb$test (counter2 int) ENGINE = ndbinfo;
 SELECT * FROM ndb$test;
 ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
@@ -219,6 +255,26 @@ Level	Code	Message
 Error	40001	Table 'ndb$test' is defined differently in NDB, column 'node_id' is not compatible. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
 Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
 DROP TABLE ndb$test;
+## 3d) column which is NOT NULL
+CREATE TABLE ndb$test (node_id int unsigned NOT NULL) ENGINE = ndbinfo;
+SELECT * FROM ndb$test;
+ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
+SHOW WARNINGS;
+Level	Code	Message
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'node_id' is NOT NULL. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
+DROP TABLE ndb$test;
+## 3e) non existing column which is NOT NULL
+CREATE TABLE ndb$test (
+  block_number int unsigned,
+  non_existing int NOT NULL) ENGINE = ndbinfo;
+SELECT * FROM ndb$test;
+ERROR HY000: Got error 40001 'Incompatible table definitions' from NDBINFO
+SHOW WARNINGS;
+Level	Code	Message
+Error	40001	Table 'ndb$test' is defined differently in NDB, column 'non_existing' is NOT NULL. The SQL to regenerate is: 'CREATE TABLE `ndbinfo`.`ndb$test` (`node_id` INT UNSIGNED, `block_number` INT UNSIGNED, `block_instance` INT UNSIGNED, `counter` INT UNSIGNED, `counter2` BIGINT UNSIGNED) ENGINE=NDBINFO'
+Error	1296	Got error 40001 'Incompatible table definitions' from NDBINFO
+DROP TABLE ndb$test;
 
 ## 4) Table with primary key/indexes not supported
 CREATE TABLE ndb$test (node_id int, block_number int PRIMARY KEY) ENGINE = ndbinfo;
@@ -237,4 +293,56 @@ from ndbinfo.diskpagebuffer;
 node_id
 1
 2
+
+set @@ndbinfo_offline=1;
+ERROR HY000: Variable 'ndbinfo_offline' is a GLOBAL variable and should be set with SET GLOBAL
+
+SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+node_id
+1
+2
+
+set @@global.ndbinfo_offline=TRUE;
+select @@ndbinfo_offline;
+@@ndbinfo_offline
+1
+
+CREATE TABLE ndb$does_not_exist_in_ndb(
+  node_id int,
+  message varchar(255)
+) ENGINE = ndbinfo;
+
+CREATE VIEW view_on_table_which_does_not_exist_in_ndb AS
+  SELECT node_id, message
+  FROM ndbinfo.ndb$does_not_exist_in_ndb;
+
+SHOW CREATE TABLE ndb$does_not_exist_in_ndb;
+Table	Create Table
+ndb$does_not_exist_in_ndb	CREATE TABLE `ndb$does_not_exist_in_ndb` (
+  `node_id` int(11) DEFAULT NULL,
+  `message` varchar(255) DEFAULT NULL
+) ENGINE=NDBINFO DEFAULT CHARSET=latin1
+
+SELECT * FROM view_on_table_which_does_not_exist_in_ndb;
+node_id	message
+Warnings:
+Note	1	'NDBINFO' has been started in offline mode since the 'NDBCLUSTER' engine is disabled or @@global.ndbinfo_offline is turned on - no rows can be returned
+SELECT * FROM ndb$does_not_exist_in_ndb;
+node_id	message
+Warnings:
+Note	1	'NDBINFO' has been started in offline mode since the 'NDBCLUSTER' engine is disabled or @@global.ndbinfo_offline is turned on - no rows can be returned
+SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+node_id
+Warnings:
+Note	1	'NDBINFO' has been started in offline mode since the 'NDBCLUSTER' engine is disabled or @@global.ndbinfo_offline is turned on - no rows can be returned
+
+DROP VIEW view_on_table_which_does_not_exist_in_ndb;
+DROP TABLE ndb$does_not_exist_in_ndb;
+
+set @@global.ndbinfo_offline = FALSE;
+
+SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+node_id
+1
+2
 

=== added file 'mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf'
--- a/mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_bug12581213.cnf	2011-05-23 10:38:41 +0000
@@ -0,0 +1,7 @@
+!include suite/ndb/my.cnf
+
+[cluster_config.1]
+ndbd=
+NoOfReplicas=1
+MaxNoOfOpenFiles=27
+InitialNoOfOpenFiles=26

=== added file 'mysql-test/suite/ndb/t/ndb_dd_bug12581213.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_bug12581213.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_bug12581213.test	2011-05-23 10:38:41 +0000
@@ -0,0 +1,23 @@
+-- source include/have_ndb.inc
+
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE NDB;
+
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine ndb;
+
+drop tablespace ts1
+engine ndb;
+
+drop logfile group lg1
+engine ndb;

=== modified file 'mysql-test/suite/ndb/t/ndbinfo.test'
--- a/mysql-test/suite/ndb/t/ndbinfo.test	2010-11-03 09:48:25 +0000
+++ b/mysql-test/suite/ndb/t/ndbinfo.test	2011-05-23 13:45:57 +0000
@@ -98,17 +98,35 @@ SELECT count(*) >= 20 FROM blocks;
 DROP TABLE ndb$test;
 CREATE TABLE ndb$test (node_id int unsigned) ENGINE = ndbinfo;
 SELECT node_id != 0 FROM ndb$test LIMIT 1;
-
-## 2) Column does not exist in NDB -> error, with warning
 DROP TABLE ndb$test;
+
+## 2) Column does not exist in NDB -> allowed, with warning, non existing
+##    column(s) return NULL
+## 2a) Extra column at end
 CREATE TABLE ndb$test (node_id int, non_existing int) ENGINE = ndbinfo;
---error ER_GET_ERRMSG
-SELECT * FROM ndb$test;
-SHOW WARNINGS;
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+DROP TABLE ndb$test;
+
+## 2b) Extra column(s) in middle
+CREATE TABLE ndb$test (
+  node_id int unsigned,
+  non_existing int unsigned,
+  block_number int unsigned,
+  block_instance int unsigned,
+  counter int unsigned,
+  counter2 bigint unsigned
+) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing, block_number FROM ndb$test;
+DROP TABLE ndb$test;
+
+## 2c) Extra column first
+CREATE TABLE ndb$test (non_existing int, node_id int) ENGINE = ndbinfo;
+SELECT DISTINCT node_id, non_existing FROM ndb$test;
+SELECT DISTINCT non_existing, node_id FROM ndb$test;
+DROP TABLE ndb$test;
 
 ## 3) Incompatible column type -> error, with warning
 ## 3a) int instead of bigint
-DROP TABLE ndb$test;
 CREATE TABLE ndb$test (counter2 int) ENGINE = ndbinfo;
 --error ER_GET_ERRMSG
 SELECT * FROM ndb$test;
@@ -126,6 +144,21 @@ CREATE TABLE ndb$test (node_id varchar(2
 SELECT * FROM ndb$test;
 SHOW WARNINGS;
 DROP TABLE ndb$test;
+## 3d) column which is NOT NULL
+CREATE TABLE ndb$test (node_id int unsigned NOT NULL) ENGINE = ndbinfo;
+--error ER_GET_ERRMSG
+SELECT * FROM ndb$test;
+SHOW WARNINGS;
+DROP TABLE ndb$test;
+## 3e) non existing column which is NOT NULL
+CREATE TABLE ndb$test (
+  block_number int unsigned,
+  non_existing int NOT NULL) ENGINE = ndbinfo;
+--error ER_GET_ERRMSG
+SELECT * FROM ndb$test;
+SHOW WARNINGS;
+DROP TABLE ndb$test;
+
 
 ## 4) Table with primary key/indexes not supported
 --error ER_TOO_MANY_KEYS
@@ -148,4 +181,52 @@ CREATE TABLE ndb$test (node_id int AUTO_
 select distinct node_id
 from ndbinfo.diskpagebuffer;
 
+
+#
+# BUG#11885602
+# - It was allowed to CREATE TABLE which was not in NDB, but
+#   creating a view on that table failed. Implement ndbinfo_offline
+#   mode which allows tables to be created and opened although they
+#   don't exists or have different table definition.
+#   This is exactly the same behaviour as when NDBCLUSTER
+#   is disabled
+#
+
+# Check ndbinfo_offline is GLOBAL variable
+--error ER_GLOBAL_VARIABLE
+set @@ndbinfo_offline=1;
+
+# Query used to check that open tables are closed
+# when offline mode is turned on and off
+let $q1 = SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
+eval $q1;
+
+# Turn on ndbinfo_offline
+set @@global.ndbinfo_offline=TRUE;
+select @@ndbinfo_offline;
+
+CREATE TABLE ndb$does_not_exist_in_ndb(
+  node_id int,
+  message varchar(255)
+) ENGINE = ndbinfo;
+
+CREATE VIEW view_on_table_which_does_not_exist_in_ndb AS
+  SELECT node_id, message
+  FROM ndbinfo.ndb$does_not_exist_in_ndb;
+
+SHOW CREATE TABLE ndb$does_not_exist_in_ndb;
+
+# SELECTs return no rows in offline mode
+SELECT * FROM view_on_table_which_does_not_exist_in_ndb;
+SELECT * FROM ndb$does_not_exist_in_ndb;
+eval $q1;
+
+DROP VIEW view_on_table_which_does_not_exist_in_ndb;
+DROP TABLE ndb$does_not_exist_in_ndb;
+
+# Restore original value
+set @@global.ndbinfo_offline = FALSE;
+
+eval $q1;
+
 --source ndbinfo_drop.inc

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2010-11-10 14:17:13 +0000
+++ b/sql/ha_ndbinfo.cc	2011-05-23 13:45:57 +0000
@@ -56,10 +56,10 @@ static MYSQL_THDVAR_BOOL(
   FALSE                              /* default */
 );
 
-static char* ndbinfo_dbname = (char*)"ndbinfo";
+static char* opt_ndbinfo_dbname = (char*)"ndbinfo";
 static MYSQL_SYSVAR_STR(
   database,                         /* name */
-  ndbinfo_dbname,                   /* var */
+  opt_ndbinfo_dbname,               /* var */
   PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
   "Name of the database used by ndbinfo",
   NULL,                             /* check func. */
@@ -67,10 +67,10 @@ static MYSQL_SYSVAR_STR(
   NULL                              /* default */
 );
 
-static char* table_prefix = (char*)"ndb$";
+static char* opt_ndbinfo_table_prefix = (char*)"ndb$";
 static MYSQL_SYSVAR_STR(
   table_prefix,                     /* name */
-  table_prefix,                     /* var */
+  opt_ndbinfo_table_prefix,         /* var */
   PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
   "Prefix to use for all virtual tables loaded from NDB",
   NULL,                             /* check func. */
@@ -78,10 +78,10 @@ static MYSQL_SYSVAR_STR(
   NULL                              /* default */
 );
 
-static Uint32 version = NDB_VERSION_D;
+static Uint32 opt_ndbinfo_version = NDB_VERSION_D;
 static MYSQL_SYSVAR_UINT(
   version,                          /* name */
-  version,                          /* var */
+  opt_ndbinfo_version,              /* var */
   PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY,
   "Compile version for ndbinfo",
   NULL,                             /* check func. */
@@ -92,6 +92,45 @@ static MYSQL_SYSVAR_UINT(
   0                                 /* block */
 );
 
+static my_bool opt_ndbinfo_offline;
+
+static
+void
+offline_update(THD* thd, struct st_mysql_sys_var* var,
+               void* var_ptr, const void* save)
+{
+  DBUG_ENTER("offline_update");
+
+  const my_bool new_offline =
+    (*(static_cast<const my_bool*>(save)) != 0);
+  if (new_offline == opt_ndbinfo_offline)
+  {
+    // No change
+    DBUG_VOID_RETURN;
+  }
+
+  // Set offline mode, any tables opened from here on will
+  // be opened in the new mode
+  opt_ndbinfo_offline = new_offline;
+
+  // Close any open tables which may be in the old mode
+  (void)close_cached_tables(thd, NULL, false, true, false);
+
+  DBUG_VOID_RETURN;
+}
+
+static MYSQL_SYSVAR_BOOL(
+  offline,                          /* name */
+  opt_ndbinfo_offline,              /* var */
+  PLUGIN_VAR_NOCMDOPT,
+  "Set ndbinfo in offline mode, tables and views can "
+  "be opened even if they don't exist or have different "
+  "definition in NDB. No rows will be returned.",
+  NULL,                             /* check func. */
+  offline_update,                   /* update func. */
+  0                                 /* default */
+);
+
 
 static NdbInfo* g_ndbinfo;
 
@@ -124,10 +163,15 @@ struct ha_ndbinfo_impl
   Vector<const NdbInfoRecAttr *> m_columns;
   bool m_first_use;
 
+  // Indicates if table has been opened in offline mode
+  // can only be reset by closing the table
+  bool m_offline;
+
   ha_ndbinfo_impl() :
     m_table(NULL),
     m_scan_op(NULL),
-    m_first_use(true)
+    m_first_use(true),
+    m_offline(false)
   {
   }
 };
@@ -211,7 +255,7 @@ static void
 generate_sql(const NdbInfo::Table* ndb_tab, BaseString& sql)
 {
   sql.appfmt("'CREATE TABLE `%s`.`%s%s` (",
-             ndbinfo_dbname, table_prefix, ndb_tab->getName());
+             opt_ndbinfo_dbname, opt_ndbinfo_table_prefix, ndb_tab->getName());
 
   const char* separator = "";
   for (unsigned i = 0; i < ndb_tab->columns(); i++)
@@ -265,7 +309,7 @@ warn_incompatible(const NdbInfo::Table*
 
   msg.assfmt("Table '%s%s' is defined differently in NDB, %s. The "
              "SQL to regenerate is: ",
-             table_prefix, ndb_tab->getName(), explanation);
+             opt_ndbinfo_table_prefix, ndb_tab->getName(), explanation);
   generate_sql(ndb_tab, msg);
 
   const MYSQL_ERROR::enum_warning_level level =
@@ -289,12 +333,18 @@ bool ha_ndbinfo::is_open(void) const
   return m_impl.m_table != NULL;
 }
 
+bool ha_ndbinfo::is_offline(void) const
+{
+  return m_impl.m_offline;
+}
+
 int ha_ndbinfo::open(const char *name, int mode, uint test_if_locked)
 {
   DBUG_ENTER("ha_ndbinfo::open");
   DBUG_PRINT("enter", ("name: %s, mode: %d", name, mode));
 
   assert(is_closed());
+  assert(!is_offline()); // Closed table can not be offline
 
   if (mode == O_RDWR)
   {
@@ -307,9 +357,11 @@ int ha_ndbinfo::open(const char *name, i
     DBUG_ASSERT(false);
   }
 
-  if (ndbcluster_is_disabled())
+  if (opt_ndbinfo_offline ||
+      ndbcluster_is_disabled())
   {
-    // Allow table to be opened with ndbcluster disabled
+    // Mark table as being offline and allow it to be opened
+    m_impl.m_offline = true;
     DBUG_RETURN(0);
   }
 
@@ -321,21 +373,36 @@ int ha_ndbinfo::open(const char *name, i
     DBUG_RETURN(err2mysql(err));
   }
 
+  /*
+    Check table def. to detect incompatible differences which should
+    return an error. Differences which only generate a warning
+    is checked on first use
+  */
   DBUG_PRINT("info", ("Comparing MySQL's table def against NDB"));
   const NdbInfo::Table* ndb_tab = m_impl.m_table;
   for (uint i = 0; i < table->s->fields; i++)
   {
     const Field* field = table->field[i];
-    const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name);
-    if (!col)
+
+    // Check if field is NULLable
+    if (const_cast<Field*>(field)->real_maybe_null() == false)
     {
-      // The column didn't exist
+      // Only NULLable fields supported
       warn_incompatible(ndb_tab, true,
-                        "column '%s' does not exist",
+                        "column '%s' is NOT NULL",
                         field->field_name);
       DBUG_RETURN(ERR_INCOMPAT_TABLE_DEF);
     }
 
+    // Check if column exist in NDB
+    const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name);
+    if (!col)
+    {
+      // The column didn't exist
+      continue;
+    }
+
+    // Check compatible field and column type
     bool compatible = false;
     switch(col->m_type)
     {
@@ -378,7 +445,7 @@ int ha_ndbinfo::close(void)
 {
   DBUG_ENTER("ha_ndbinfo::close");
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
     DBUG_RETURN(0);
 
   assert(is_open());
@@ -395,12 +462,13 @@ int ha_ndbinfo::rnd_init(bool scan)
   DBUG_ENTER("ha_ndbinfo::rnd_init");
   DBUG_PRINT("info", ("scan: %d", scan));
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
   {
     push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, 1,
-                 "'NDBINFO' has been started "
-                 "in limited mode since the 'NDBCLUSTER' "
-                 "engine is disabled - no rows can be returned");
+                 "'NDBINFO' has been started in offline mode "
+                 "since the 'NDBCLUSTER' engine is disabled "
+                 "or @@global.ndbinfo_offline is turned on "
+                 "- no rows can be returned");
     DBUG_RETURN(0);
   }
 
@@ -412,13 +480,30 @@ int ha_ndbinfo::rnd_init(bool scan)
     m_impl.m_first_use = false;
 
     /*
-      Due to different code paths in MySQL Server
-      for prepared statement protocol, some warnings
-      from 'handler::open' are lost and need to be
-      deffered to first use instead
+      Check table def. and generate warnings for incompatibilites
+      which is allowed but should generate a warning.
+      (Done this late due to different code paths in MySQL Server for
+      prepared statement protocol, where warnings from 'handler::open'
+      are lost).
     */
+    uint fields_found_in_ndb = 0;
     const NdbInfo::Table* ndb_tab = m_impl.m_table;
-    if (table->s->fields < ndb_tab->columns())
+    for (uint i = 0; i < table->s->fields; i++)
+    {
+      const Field* field = table->field[i];
+      const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name);
+      if (!col)
+      {
+        // The column didn't exist
+        warn_incompatible(ndb_tab, true,
+                          "column '%s' does not exist",
+                          field->field_name);
+        continue;
+      }
+      fields_found_in_ndb++;
+    }
+
+    if (fields_found_in_ndb < ndb_tab->columns())
     {
       // There are more columns available in NDB
       warn_incompatible(ndb_tab, false,
@@ -466,7 +551,7 @@ int ha_ndbinfo::rnd_end()
 {
   DBUG_ENTER("ha_ndbinfo::rnd_end");
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
     DBUG_RETURN(0);
 
   assert(is_open());
@@ -486,7 +571,7 @@ int ha_ndbinfo::rnd_next(uchar *buf)
   int err;
   DBUG_ENTER("ha_ndbinfo::rnd_next");
 
-  if (ndbcluster_is_disabled())
+  if (is_offline())
     DBUG_RETURN(HA_ERR_END_OF_FILE);
 
   assert(is_open());
@@ -546,7 +631,7 @@ ha_ndbinfo::unpack_record(uchar *dst_row
   {
     Field *field = table->field[i];
     const NdbInfoRecAttr* record = m_impl.m_columns[i];
-    if (m_impl.m_columns[i])
+    if (record && !record->isNULL())
     {
       field->set_notnull();
       field->move_field_offset(dst_offset);
@@ -617,7 +702,7 @@ ndbinfo_find_files(handlerton *hton, THD
     List_iterator<LEX_STRING> it(*files);
     while ((dir_name=it++))
     {
-      if (strcmp(dir_name->str, ndbinfo_dbname))
+      if (strcmp(dir_name->str, opt_ndbinfo_dbname))
         continue;
 
       DBUG_PRINT("info", ("Hiding own databse '%s'", dir_name->str));
@@ -628,7 +713,7 @@ ndbinfo_find_files(handlerton *hton, THD
   }
 
   DBUG_ASSERT(db);
-  if (strcmp(db, ndbinfo_dbname))
+  if (strcmp(db, opt_ndbinfo_dbname))
     DBUG_RETURN(0); // Only hide files in "our" db
 
   /* Hide all files that start with "our" prefix */
@@ -636,7 +721,7 @@ ndbinfo_find_files(handlerton *hton, THD
   List_iterator<LEX_STRING> it(*files);
   while ((file_name=it++))
   {
-    if (is_prefix(file_name->str, table_prefix))
+    if (is_prefix(file_name->str, opt_ndbinfo_table_prefix))
     {
       DBUG_PRINT("info", ("Hiding '%s'", file_name->str));
       it.remove();
@@ -668,11 +753,11 @@ int ndbinfo_init(void *plugin)
 
   char prefix[FN_REFLEN];
   build_table_filename(prefix, sizeof(prefix) - 1,
-                       ndbinfo_dbname, table_prefix, "", 0);
+                       opt_ndbinfo_dbname, opt_ndbinfo_table_prefix, "", 0);
   DBUG_PRINT("info", ("prefix: '%s'", prefix));
   assert(g_ndb_cluster_connection);
   g_ndbinfo = new NdbInfo(g_ndb_cluster_connection, prefix,
-                          ndbinfo_dbname, table_prefix);
+                          opt_ndbinfo_dbname, opt_ndbinfo_table_prefix);
   if (!g_ndbinfo)
   {
     sql_print_error("Failed to create NdbInfo");
@@ -712,6 +797,7 @@ struct st_mysql_sys_var* ndbinfo_system_
   MYSQL_SYSVAR(database),
   MYSQL_SYSVAR(table_prefix),
   MYSQL_SYSVAR(version),
+  MYSQL_SYSVAR(offline),
 
   NULL
 };

=== modified file 'sql/ha_ndbinfo.h'
--- a/sql/ha_ndbinfo.h	2011-02-01 14:58:21 +0000
+++ b/sql/ha_ndbinfo.h	2011-05-23 11:57:55 +0000
@@ -83,6 +83,8 @@ private:
   bool is_open(void) const;
   bool is_closed(void) const { return ! is_open(); };
 
+  bool is_offline(void) const;
+
   struct ha_ndbinfo_impl& m_impl;
 
 };

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-05-06 13:26:05 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-05-19 09:05:45 +0000
@@ -17,6 +17,8 @@
 SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
     ${CMAKE_SOURCE_DIR}/cmake
     ${CMAKE_SOURCE_DIR}/storage/ndb/cmake)
+
+MESSAGE(STATUS "Using cmake version ${CMAKE_VERSION}")
     
 # Check if this is MySQL Cluster build i.e the MySQL Server
 # version string ends in -ndb-Y.Y.Y[-status]    

=== added directory 'storage/ndb/cmake/os'
=== added file 'storage/ndb/cmake/os/Windows.cmake'
--- a/storage/ndb/cmake/os/Windows.cmake	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/os/Windows.cmake	2011-05-24 08:45:38 +0000
@@ -0,0 +1,23 @@
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+
+
+# avoid running system checks by using pre-cached check results
+# system checks are expensive on VS since every tiny program is to be compiled in
+# a VC solution.
+GET_FILENAME_COMPONENT(_SCRIPT_DIR ${CMAKE_CURRENT_LIST_FILE} PATH)
+INCLUDE(${_SCRIPT_DIR}/WindowsCache.cmake)
+

=== added file 'storage/ndb/cmake/os/WindowsCache.cmake'
--- a/storage/ndb/cmake/os/WindowsCache.cmake	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/os/WindowsCache.cmake	2011-05-24 08:45:38 +0000
@@ -0,0 +1,66 @@
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+
+# Avoid system checks on Windows by pre-caching  results. Most of the system checks
+# are not relevant for Windows anyway and it takes lot more time to run them,
+# since CMake  to creates a Visual Studio project for each tiny test.
+# Note that values are cached for VC++ only, MinGW would give slightly
+# different results.
+
+
+IF(MSVC)
+SET(HAVE_POSIX_MEMALIGN CACHE INTERNAL "")
+SET(HAVE_CLOCK_GETTIME CACHE INTERNAL "")
+SET(HAVE_PTHREAD_CONDATTR_SETCLOCK CACHE INTERNAL "")
+SET(HAVE_PTHREAD_SELF CACHE INTERNAL "")
+SET(HAVE_SCHED_GET_PRIORITY_MIN CACHE INTERNAL "")
+SET(HAVE_SCHED_GET_PRIORITY_MAX CACHE INTERNAL "")
+SET(HAVE_SCHED_SETAFFINITY CACHE INTERNAL "")
+SET(HAVE_SCHED_SETSCHEDULER CACHE INTERNAL "")
+SET(HAVE_PROCESSOR_BIND CACHE INTERNAL "")
+SET(HAVE_EPOLL_CREATE CACHE INTERNAL "")
+SET(HAVE_MEMALIGN CACHE INTERNAL "")
+SET(HAVE_SYSCONF CACHE INTERNAL "")
+SET(HAVE_DIRECTIO CACHE INTERNAL "")
+SET(HAVE_ATOMIC_SWAP32 CACHE INTERNAL "")
+SET(HAVE_MLOCK CACHE INTERNAL "")
+SET(HAVE_FFS CACHE INTERNAL "")
+SET(HAVE_PTHREAD_MUTEXATTR_INIT CACHE INTERNAL "")
+SET(HAVE_PTHREAD_MUTEXATTR_SETTYPE CACHE INTERNAL "")
+SET(HAVE_PTHREAD_SETSCHEDPARAM CACHE INTERNAL "")
+SET(HAVE_SUN_PREFETCH_H CACHE INTERNAL "")
+SET(HAVE___BUILTIN_FFS CACHE INTERNAL "")
+SET(HAVE__BITSCANFORWARD 1 CACHE INTERNAL "")
+SET(HAVE_LINUX_SCHEDULING CACHE INTERNAL "")
+SET(HAVE_SOLARIS_AFFINITY CACHE INTERNAL "")
+SET(HAVE_LINUX_FUTEX CACHE INTERNAL "")
+SET(HAVE_ATOMIC_H CACHE INTERNAL "")
+
+SET(NDB_SIZEOF_CHAR 1 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_CHAR TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_CHARP ${CMAKE_SIZEOF_VOID_P} CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_CHARP TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_INT 4 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_INT TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_LONG 4 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_LONG TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_LONG_LONG 8 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_LONG_LONG TRUE CACHE INTERNAL "")
+SET(NDB_SIZEOF_SHORT 2 CACHE INTERNAL "")
+SET(HAVE_NDB_SIZEOF_SHORT TRUE CACHE INTERNAL "")
+
+SET(NDB_BUILD_NDBMTD 1 CACHE INTERNAL "")
+ENDIF()

=== modified file 'storage/ndb/include/kernel/AttributeHeader.hpp'
--- a/storage/ndb/include/kernel/AttributeHeader.hpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp	2011-05-25 15:03:11 +0000
@@ -66,6 +66,17 @@ public:
   STATIC_CONST( CORR_FACTOR64 = 0xFFE8 ); // including root-frag
 
   /**
+   * 64-bit row gci (extending lower if not sufficient bits)
+   *   read-only
+   */
+  STATIC_CONST( ROW_GCI64    = 0xFFE7);
+
+  /**
+   * Row author... autoset to 0, can be over written
+   */
+  STATIC_CONST( ROW_AUTHOR    = 0xFFE6);
+
+  /**
    * Optimize pseudo column and optimization options
    */
   STATIC_CONST( OPTIMIZE     = 0xFFE0 );          //pseudo column id to optimize

=== modified file 'storage/ndb/include/kernel/kernel_types.h'
--- a/storage/ndb/include/kernel/kernel_types.h	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/include/kernel/kernel_types.h	2011-05-25 13:19:02 +0000
@@ -36,9 +36,7 @@ enum Operation_t {
   ,ZDELETE  = 3
   ,ZWRITE   = 4
   ,ZREAD_EX = 5
-#if 0
-  ,ZREAD_CONSISTENT = 6
-#endif
+  ,ZREFRESH = 6
   ,ZUNLOCK  = 7
 };
 

=== modified file 'storage/ndb/include/kernel/signaldata/CreateTab.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateTab.hpp	2011-02-15 10:52:32 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateTab.hpp	2011-05-17 23:29:55 +0000
@@ -24,7 +24,7 @@
 struct CreateTabReq
 {
   STATIC_CONST( SignalLength = 6 );
-  STATIC_CONST( SignalLengthLDM = 6 + 10 );
+  STATIC_CONST( SignalLengthLDM = 6 + 11 );
 
   enum RequestType {
   };
@@ -49,6 +49,7 @@ struct CreateTabReq
   Uint32 noOfKeyAttr;
   Uint32 checksumIndicator;
   Uint32 GCPIndicator;
+  Uint32 extraRowAuthorBits;
 
   SECTION( DICT_TAB_INFO = 0 );
   SECTION( FRAGMENTATION = 1 );

=== modified file 'storage/ndb/include/kernel/signaldata/CreateTable.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2011-05-17 23:29:55 +0000
@@ -75,7 +75,8 @@ struct CreateTableRef {
     OutOfStringBuffer = 773,
     NoLoggingTemporaryTable = 778,
     InvalidHashMap = 790,
-    TableDefinitionTooBig = 793
+    TableDefinitionTooBig = 793,
+    FeatureRequiresUpgrade = 794
   };
 
   Uint32 senderRef;

=== modified file 'storage/ndb/include/kernel/signaldata/DiGetNodes.hpp'
--- a/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-02-08 13:55:54 +0000
+++ b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp	2011-05-25 06:58:00 +0000
@@ -58,10 +58,13 @@ class DiGetNodesReq {
    */
   friend class Dbdih;
 public:
-  STATIC_CONST( SignalLength = 3 );
+  STATIC_CONST( SignalLength = 4 + (sizeof(void*) / sizeof(Uint32)) );
 private:
   Uint32 tableId;
   Uint32 hashValue;
   Uint32 distr_key_indicator;
+  Uint32 unused;
+  Uint32 jamBuffer[2];
 };
+
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/DictTabInfo.hpp'
--- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp	2011-02-16 14:53:53 +0000
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp	2011-05-17 23:29:55 +0000
@@ -150,6 +150,9 @@ public:
 
     TableStorageType   = 155,
 
+    ExtraRowGCIBits    = 156,
+    ExtraRowAuthorBits = 157,
+
     TableEnd           = 999,
     
     AttributeName          = 1000, // String, Mandatory
@@ -381,6 +384,9 @@ public:
 
     Uint32 TableStorageType;
 
+    Uint32 ExtraRowGCIBits;
+    Uint32 ExtraRowAuthorBits;
+
     Table() {}
     void init();
   };

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2011-05-25 13:19:02 +0000
@@ -628,4 +628,52 @@ ndbd_deferred_unique_constraints(Uint32
   return x >= NDBD_DEFERRED_UNIQUE_CONSTRAINTS_71;
 }
 
+#define NDBD_TUP_EXTRABITS_70 NDB_MAKE_VERSION(7,0,25)
+#define NDBD_TUP_EXTRABITS_71 NDB_MAKE_VERSION(7,1,14)
+#define NDBD_TUP_EXTRABITS_72 NDB_MAKE_VERSION(7,2,1)
+
+static
+inline
+int
+ndb_tup_extrabits(Uint32 x)
+{
+  {
+    const Uint32 major = (x >> 16) & 0xFF;
+    const Uint32 minor = (x >>  8) & 0xFF;
+
+    if (major == 7 && minor < 2)
+    {
+      if (minor == 0)
+        return x >= NDBD_TUP_EXTRABITS_70;
+      else if (minor == 1)
+        return x >= NDBD_TUP_EXTRABITS_71;
+    }
+    return x >= NDBD_TUP_EXTRABITS_72;
+  }
+}
+
+#define NDBD_REFRESH_TUPLE_70 NDB_MAKE_VERSION(7,0,26)
+#define NDBD_REFRESH_TUPLE_71 NDB_MAKE_VERSION(7,1,15)
+#define NDBD_REFRESH_TUPLE_72 NDB_MAKE_VERSION(7,2,1)
+
+static
+inline
+int
+ndb_refresh_tuple(Uint32 x)
+{
+  {
+    const Uint32 major = (x >> 16) & 0xFF;
+    const Uint32 minor = (x >>  8) & 0xFF;
+
+    if (major == 7 && minor < 2)
+    {
+      if (minor == 0)
+        return x >= NDBD_REFRESH_TUPLE_70;
+      else if (minor == 1)
+        return x >= NDBD_REFRESH_TUPLE_71;
+    }
+    return x >= NDBD_REFRESH_TUPLE_72;
+  }
+}
+
 #endif

=== modified file 'storage/ndb/include/ndbapi/NdbDictionary.hpp'
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp	2011-05-25 15:03:11 +0000
@@ -603,6 +603,8 @@ public:
     static const Column * RECORDS_IN_RANGE;
     static const Column * ROWID;
     static const Column * ROW_GCI;
+    static const Column * ROW_GCI64;
+    static const Column * ROW_AUTHOR;
     static const Column * ANY_VALUE;
     static const Column * COPY_ROWID;
     static const Column * LOCK_REF;
@@ -1063,6 +1065,18 @@ public:
      */
     void setStorageType(Column::StorageType);
     Column::StorageType getStorageType() const;
+
+    /**
+     * Get/set extra GCI bits (max 31)
+     */
+    void setExtraRowGciBits(Uint32);
+    Uint32 getExtraRowGciBits() const;
+
+    /**
+     * Get/set extra row author bits (max 31)
+     */
+    void setExtraRowAuthorBits(Uint32);
+    Uint32 getExtraRowAuthorBits() const;
 #endif
 
     // these 2 are not de-doxygenated

=== modified file 'storage/ndb/include/ndbapi/NdbOperation.hpp'
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp	2011-05-25 13:19:02 +0000
@@ -914,6 +914,7 @@ public:
     DeleteRequest = 3,            ///< Delete Operation
     WriteRequest = 4,             ///< Write Operation
     ReadExclusive = 5,            ///< Read exclusive
+    RefreshRequest = 6,           ///<
     UnlockRequest = 7,            ///< Unlock operation
     OpenScanRequest,              ///< Scan Operation
     OpenRangeScanRequest,         ///< Range scan operation

=== modified file 'storage/ndb/include/ndbapi/NdbTransaction.hpp'
--- a/storage/ndb/include/ndbapi/NdbTransaction.hpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/include/ndbapi/NdbTransaction.hpp	2011-05-25 13:19:02 +0000
@@ -752,6 +752,12 @@ public:
                                   const NdbOperation::OperationOptions *opts = 0,
                                   Uint32 sizeOfOptions = 0);
 
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+  const NdbOperation *refreshTuple(const NdbRecord *key_rec, const char *key_row,
+                                   const NdbOperation::OperationOptions *opts = 0,
+                                   Uint32 sizeOfOptions = 0);
+#endif
+
   /**
    * Scan a table, using NdbRecord to read out column data.
    *

=== modified file 'storage/ndb/include/ndbapi/ndb_cluster_connection.hpp'
--- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp	2011-02-04 17:52:38 +0000
+++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp	2011-05-23 14:05:08 +0000
@@ -192,6 +192,7 @@ public:
   unsigned max_nodegroup();
   unsigned node_id();
   unsigned get_connect_count() const;
+  unsigned get_min_db_version() const;
 
   void init_get_next_node(Ndb_cluster_connection_node_iter &iter);
   unsigned int get_next_node(Ndb_cluster_connection_node_iter &iter);

=== modified file 'storage/ndb/ndb_configure.cmake'
--- a/storage/ndb/ndb_configure.cmake	2011-03-15 15:50:34 +0000
+++ b/storage/ndb/ndb_configure.cmake	2011-05-24 08:45:38 +0000
@@ -18,6 +18,26 @@
 #
 # Run platform checks and create ndb_config.h
 #
+
+
+# Include the platform-specific file. To allow exceptions, this code
+# looks for files in order of how specific they are. If there is, for
+# example, a generic Linux.cmake and a version-specific
+# Linux-2.6.28-11-generic, it will pick Linux-2.6.28-11-generic and
+# include it. It is then up to the file writer to include the generic
+# version if necessary.
+FOREACH(_base
+        ${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_VERSION}-${CMAKE_SYSTEM_PROCESSOR}
+        ${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_VERSION}
+        ${CMAKE_SYSTEM_NAME})
+  SET(_file ${CMAKE_CURRENT_SOURCE_DIR}/cmake/os/${_base}.cmake)
+  IF(EXISTS ${_file})
+    INCLUDE(${_file})
+    BREAK()
+  ENDIF()
+ENDFOREACH()
+
+
 INCLUDE(CheckFunctionExists)
 INCLUDE(CheckIncludeFiles)
 INCLUDE(CheckCSourceCompiles)
@@ -31,7 +51,7 @@ CHECK_FUNCTION_EXISTS(pthread_condattr_s
 CHECK_FUNCTION_EXISTS(pthread_self HAVE_PTHREAD_SELF)
 CHECK_FUNCTION_EXISTS(sched_get_priority_min HAVE_SCHED_GET_PRIORITY_MIN)
 CHECK_FUNCTION_EXISTS(sched_get_priority_max HAVE_SCHED_GET_PRIORITY_MAX)
-CHECK_FUNCTION_EXISTS(sched_setaffinity HAVE_SCHED_SETAFFINTIY)
+CHECK_FUNCTION_EXISTS(sched_setaffinity HAVE_SCHED_SETAFFINITY)
 CHECK_FUNCTION_EXISTS(sched_setscheduler HAVE_SCHED_SETSCHEDULER)
 CHECK_FUNCTION_EXISTS(processor_bind HAVE_PROCESSOR_BIND)
 CHECK_FUNCTION_EXISTS(epoll_create HAVE_EPOLL_CREATE)
@@ -153,7 +173,8 @@ IF(WITH_NDBMTD)
     return a;
   }"
   NDB_BUILD_NDBMTD)
-
+ELSE()
+  SET(NDB_BUILD_NDBMTD CACHE INTERNAL "")
 ENDIF()
 
 SET(WITH_NDB_PORT "" CACHE INTEGER

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-05-16 11:44:52 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-05-24 08:38:04 +0000
@@ -2,7 +2,7 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=25
+NDB_VERSION_BUILD=26
 NDB_VERSION_STATUS=""
 
 dnl for build ndb docs

=== modified file 'storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp	2011-02-16 14:53:53 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp	2011-05-17 23:29:55 +0000
@@ -73,6 +73,8 @@ DictTabInfo::TableMapping[] = {
   DTIMAP(Table, HashMapObjectId, HashMapObjectId),
   DTIMAP(Table, HashMapVersion, HashMapVersion),
   DTIMAP(Table, TableStorageType, TableStorageType),
+  DTIMAP(Table, ExtraRowGCIBits, ExtraRowGCIBits),
+  DTIMAP(Table, ExtraRowAuthorBits, ExtraRowAuthorBits),
   DTIBREAK(AttributeName)
 };
 
@@ -184,6 +186,9 @@ DictTabInfo::Table::init(){
   HashMapVersion = RNIL;
 
   TableStorageType = NDB_STORAGETYPE_DEFAULT;
+
+  ExtraRowGCIBits = 0;
+  ExtraRowAuthorBits = 0;
 }
 
 void

=== modified file 'storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp	2011-05-25 13:19:02 +0000
@@ -36,6 +36,7 @@ printTCKEYREQ(FILE * output, const Uint3
 	  sig->getOperationType(requestInfo) == ZDELETE  ? "Delete" :
 	  sig->getOperationType(requestInfo) == ZWRITE   ? "Write"  :
           sig->getOperationType(requestInfo) == ZUNLOCK  ? "Unlock" :
+          sig->getOperationType(requestInfo) == ZREFRESH ? "Refresh" :
 	  "Unknown");
   {
     if(sig->getDirtyFlag(requestInfo)){

=== modified file 'storage/ndb/src/kernel/blocks/ERROR_codes.txt'
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt	2011-05-25 15:03:11 +0000
@@ -29,7 +29,8 @@ Next DBTUX 12010
 Next SUMA 13047
 Next LGMAN 15001
 Next TSMAN 16001
-Next TRIX 17xxx
+Next DBSPJ 17000
+Next TRIX 18000
 
 TESTING NODE FAILURE, ARBITRATION
 ---------------------------------
@@ -725,11 +726,11 @@ ACC bug#34348
 TRIX
 ----
 Index stats:
-17001: fail to seize statOp
-17002: fail to seize associated subRec
-17011: fail HEAD read - simulate UtilExecuteRef::AllocationError
-17012: fail HEAD insert/update/delete - as in 17011
-17021: fail RT_CLEAN_NEW - simulate TC error 626
-17022: fail RT_CLEAN_OLD (non-fatal) - as in 17021
-17023: fail RT_CLEAN_ALL (non-fatal) - as in 17021
-17024: fail RT_SCAN_FRAG - simulate TC error 630
+18001: fail to seize statOp
+18002: fail to seize associated subRec
+18011: fail HEAD read - simulate UtilExecuteRef::AllocationError
+18012: fail HEAD insert/update/delete - as in 18011
+18021: fail RT_CLEAN_NEW - simulate TC error 626
+18022: fail RT_CLEAN_OLD (non-fatal) - as in 18021
+18023: fail RT_CLEAN_ALL (non-fatal) - as in 18021
+18024: fail RT_SCAN_FRAG - simulate TC error 630

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-05-25 15:03:11 +0000
@@ -1434,6 +1434,11 @@ void Cmvmi::execTAMPER_ORD(Signal* signa
   else if (errNo < 18000)
   {
     jam();
+    tuserblockref = DBSPJ_REF;
+  }
+  else if (errNo < 19000)
+  {
+    jam();
     tuserblockref = TRIX_REF;
   }
   else if (errNo < 30000)

=== modified file 'storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp	2011-05-25 13:19:02 +0000
@@ -140,7 +140,7 @@ ndbout << "Ptr: " << ptr.p->word32 << "
 /**
  * Check kernel_types for other operation types
  */
-#define ZSCAN_OP 6
+#define ZSCAN_OP 8
 #define ZSCAN_REC_SIZE 256
 #define ZSTAND_BY 2
 #define ZTABLESIZE 16
@@ -642,6 +642,7 @@ public:
   class Dblqh* c_lqh;
 
   void execACCMINUPDATE(Signal* signal);
+  void removerow(Uint32 op, const Local_key*);
 
 private:
   BLOCK_DEFINES(Dbacc);

=== modified file 'storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp	2011-04-20 11:58:16 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp	2011-05-25 13:19:02 +0000
@@ -971,9 +971,12 @@ void Dbacc::initOpRec(Signal* signal)
   Uint32 readFlag = (((Treqinfo >> 4) & 0x3) == 0);      // Only 1 if Read
   Uint32 dirtyFlag = (((Treqinfo >> 6) & 0x1) == 1);     // Only 1 if Dirty
   Uint32 dirtyReadFlag = readFlag & dirtyFlag;
+  Uint32 operation = Treqinfo & 0xf;
+  if (operation == ZREFRESH)
+    operation = ZWRITE; /* Insert if !exist, otherwise lock */
 
   Uint32 opbits = 0;
-  opbits |= Treqinfo & 0x7;
+  opbits |= operation;
   opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0;
   opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0;
   opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0;
@@ -2323,6 +2326,27 @@ void Dbacc::execACCMINUPDATE(Signal* sig
   ndbrequire(false);
 }//Dbacc::execACCMINUPDATE()
 
+void
+Dbacc::removerow(Uint32 opPtrI, const Local_key* key)
+{
+  jamEntry();
+  operationRecPtr.i = opPtrI;
+  ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+  Uint32 opbits = operationRecPtr.p->m_op_bits;
+  fragrecptr.i = operationRecPtr.p->fragptr;
+
+  /* Mark element disappeared */
+  opbits |= Operationrec::OP_ELEMENT_DISAPPEARED;
+  opbits &= ~Uint32(Operationrec::OP_COMMIT_DELETE_CHECK);
+  operationRecPtr.p->m_op_bits = opbits;
+
+#ifdef VM_TRACE
+  ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+  ndbrequire(operationRecPtr.p->localdata[0] == key->m_page_no);
+  ndbrequire(operationRecPtr.p->localdata[1] == key->m_page_idx);
+#endif
+}//Dbacc::execACCMINUPDATE()
+
 /* ******************--------------------------------------------------------------- */
 /* ACC_COMMITREQ                                        COMMIT  TRANSACTION          */
 /*                                                     SENDER: LQH,    LEVEL B       */
@@ -2371,6 +2395,16 @@ void Dbacc::execACC_COMMITREQ(Signal* si
       }//if
     } else {
       jam();                                                /* EXPAND PROCESS HANDLING */
+      if (unlikely(opbits & Operationrec::OP_ELEMENT_DISAPPEARED))
+      {
+        jam();
+        /* Commit of refresh of non existing tuple.
+         *   ZREFRESH->ZWRITE->ZINSERT
+         * Do not affect element count
+         */
+        ndbrequire((opbits & Operationrec::OP_MASK) == ZINSERT);
+        return;
+      }
       fragrecptr.p->noOfElements++;
       fragrecptr.p->slack -= fragrecptr.p->elementLength;
       if (fragrecptr.p->slack >= (1u << 31)) { 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-05-19 09:18:04 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-05-25 15:03:11 +0000
@@ -757,6 +757,9 @@ Dbdict::packTableIntoPages(SimplePropert
   w.add(DictTabInfo::SingleUserMode, tablePtr.p->singleUserMode);
   w.add(DictTabInfo::HashMapObjectId, tablePtr.p->hashMapObjectId);
   w.add(DictTabInfo::TableStorageType, tablePtr.p->storageType);
+  w.add(DictTabInfo::ExtraRowGCIBits, tablePtr.p->m_extra_row_gci_bits);
+  w.add(DictTabInfo::ExtraRowAuthorBits, tablePtr.p->m_extra_row_author_bits);
+
 
   if (tablePtr.p->hashMapObjectId != RNIL)
   {
@@ -4982,6 +4985,8 @@ void Dbdict::handleTabInfoInit(Signal *
   tablePtr.p->hashMapObjectId = c_tableDesc.HashMapObjectId;
   tablePtr.p->hashMapVersion = c_tableDesc.HashMapVersion;
   tablePtr.p->storageType = c_tableDesc.TableStorageType;
+  tablePtr.p->m_extra_row_gci_bits = c_tableDesc.ExtraRowGCIBits;
+  tablePtr.p->m_extra_row_author_bits = c_tableDesc.ExtraRowAuthorBits;
 
   tabRequire(tablePtr.p->noOfAttributes <= MAX_ATTRIBUTES_IN_TABLE,
              CreateTableRef::NoMoreAttributeRecords); // bad error code!
@@ -5501,6 +5506,27 @@ void Dbdict::handleTabInfo(SimplePropert
   tabRequire(CHECK_SUMA_MESSAGE_SIZE(keyCount, keyLength, attrCount, recordLength),
              CreateTableRef::RecordTooBig);
 
+  /* Check that all currently running nodes data support
+   * table features
+   */
+  for (Uint32 nodeId=1; nodeId < MAX_NODES; nodeId++)
+  {
+    const NodeInfo& ni = getNodeInfo(nodeId);
+
+    if (ni.m_connected &&
+        (ni.m_type == NODE_TYPE_DB))
+    {
+      /* Check that all nodes support extra bits */
+      if (tablePtr.p->m_extra_row_gci_bits ||
+          tablePtr.p->m_extra_row_author_bits)
+      {
+        tabRequire(ndb_tup_extrabits(ni.m_version),
+                   CreateTableRef::FeatureRequiresUpgrade);
+      }
+    }
+  }
+
+
   if(tablePtr.p->m_tablespace_id != RNIL || counts[3] || counts[4])
   {
     FilegroupPtr tablespacePtr;
@@ -6205,8 +6231,9 @@ Dbdict::createTab_local(Signal* signal,
   req->noOfNullAttributes = tabPtr.p->noOfNullBits;
   req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
   req->checksumIndicator = 1;
-  req->GCPIndicator = 1;
+  req->GCPIndicator = 1 + tabPtr.p->m_extra_row_gci_bits;
   req->noOfAttributes = tabPtr.p->noOfAttributes;
+  req->extraRowAuthorBits = tabPtr.p->m_extra_row_author_bits;
   sendSignal(DBLQH_REF, GSN_CREATE_TAB_REQ, signal,
              CreateTabReq::SignalLengthLDM, JBB);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-05-19 09:18:04 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-05-25 15:03:11 +0000
@@ -224,10 +224,6 @@ public:
       }
       return false;
     }
-
-    /** Singly linked in internal (attributeId) order */
-    // TODO use DL template when possible to have more than 1
-    Uint32 nextAttributeIdPtrI;
   };
   typedef Ptr<AttributeRecord> AttributeRecordPtr;
   ArrayPool<AttributeRecord> c_attributeRecordPool;
@@ -282,6 +278,8 @@ public:
       TR_Temporary    = 0x8,
       TR_ForceVarPart = 0x10
     };
+    Uint8 m_extra_row_gci_bits;
+    Uint8 m_extra_row_author_bits;
     Uint16 m_bits;
 
     /* Number of attibutes in table */

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-05-17 11:41:50 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-05-25 09:30:37 +0000
@@ -122,7 +122,7 @@ public:
    * ONGOING */
   struct ApiConnectRecord {
     Uint64 apiGci;
-    Uint32 nextApi;
+    Uint32 senderData;
   };
   typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
 
@@ -947,7 +947,6 @@ private:
   bool isMaster();
   bool isActiveMaster();
 
-  void emptyverificbuffer(Signal *, bool aContintueB);
   void handleGcpStateInMaster(Signal *, NodeRecordPtr failedNodeptr);
   void initRestartInfo(Signal*);
   void initRestorableGciFiles();
@@ -1268,7 +1267,6 @@ private:
 
   // Variables to support record structures and their free lists
 
-  ApiConnectRecord *apiConnectRecord;
   Uint32 capiConnectFileSize;
 
   ConnectRecord *connectRecord;
@@ -1313,9 +1311,27 @@ private:
     2.4  C O M M O N    S T O R E D    V A R I A B L E S
     ----------------------------------------------------
   */
-  Uint32 cfirstVerifyQueue;
-  Uint32 clastVerifyQueue;
-  Uint32 cverifyQueueCounter;
+  struct DIVERIFY_queue
+  {
+    DIVERIFY_queue() {
+      cfirstVerifyQueue = clastVerifyQueue = 0;
+      apiConnectRecord = 0;
+      m_empty_done = 1;
+    }
+    ApiConnectRecord *apiConnectRecord;
+    Uint32 cfirstVerifyQueue;
+    Uint32 clastVerifyQueue;
+    Uint32 m_empty_done;
+  };
+
+  bool isEmpty(const DIVERIFY_queue&);
+  void enqueue(DIVERIFY_queue&, Uint32 senderData, Uint64 gci);
+  void dequeue(DIVERIFY_queue&, ApiConnectRecord &);
+  void emptyverificbuffer(Signal *, Uint32 q, bool aContintueB);
+  void emptyverificbuffer_check(Signal*, Uint32, Uint32);
+
+  DIVERIFY_queue c_diverify_queue[1];
+  Uint32 c_diverify_queue_cnt;
 
   /*------------------------------------------------------------------------*/
   /*       THIS VARIABLE KEEPS THE REFERENCES TO FILE RECORDS THAT DESCRIBE */
@@ -1357,8 +1373,15 @@ private:
    */
   struct MicroGcp
   {
+    MicroGcp() { }
     bool m_enabled;
     Uint32 m_master_ref;
+
+    /**
+     * rw-lock that protects multiple parallel DIVERIFY (readers) from
+     *   updates to gcp-state (e.g GCP_PREPARE, GCP_COMMIT)
+     */
+    NdbSeqLock m_lock;
     Uint64 m_old_gci;
     Uint64 m_current_gci; // Currently active
     Uint64 m_new_gci;     // Currently being prepared...

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp	2011-02-15 11:41:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp	2011-05-18 09:07:07 +0000
@@ -73,13 +73,16 @@ void Dbdih::initData()
   c_2pass_inr = false;
 }//Dbdih::initData()
 
-void Dbdih::initRecords() 
+void Dbdih::initRecords()
 {
   // Records with dynamic sizes
-  apiConnectRecord = (ApiConnectRecord*)
-    allocRecord("ApiConnectRecord", 
-                sizeof(ApiConnectRecord),
-                capiConnectFileSize);
+  for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
+  {
+    c_diverify_queue[i].apiConnectRecord = (ApiConnectRecord*)
+      allocRecord("ApiConnectRecord",
+                  sizeof(ApiConnectRecord),
+                  capiConnectFileSize);
+  }
 
   connectRecord = (ConnectRecord*)allocRecord("ConnectRecord",
                                               sizeof(ConnectRecord), 
@@ -306,7 +309,6 @@ Dbdih::Dbdih(Block_context& ctx):
                &Dbdih::execDIH_GET_TABINFO_CONF);
 #endif
 
-  apiConnectRecord = 0;
   connectRecord = 0;
   fileRecord = 0;
   fragmentstore = 0;
@@ -319,15 +321,20 @@ Dbdih::Dbdih(Block_context& ctx):
   c_nextNodeGroup = 0;
   c_fragments_per_node = 1;
   bzero(c_node_groups, sizeof(c_node_groups));
+  c_diverify_queue_cnt = 1;
 
 }//Dbdih::Dbdih()
 
-Dbdih::~Dbdih() 
+Dbdih::~Dbdih()
 {
-  deallocRecord((void **)&apiConnectRecord, "ApiConnectRecord", 
-                sizeof(ApiConnectRecord),
-                capiConnectFileSize);
-  
+  for (Uint32 i = 0; i<c_diverify_queue_cnt; i++)
+  {
+    deallocRecord((void **)&c_diverify_queue[i].apiConnectRecord,
+                  "ApiConnectRecord",
+                  sizeof(ApiConnectRecord),
+                  capiConnectFileSize);
+  }
+
   deallocRecord((void **)&connectRecord, "ConnectRecord",
                 sizeof(ConnectRecord), 
                 cconnectFileSize);

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-05-17 11:41:50 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-05-25 09:30:37 +0000
@@ -542,7 +542,7 @@ void Dbdih::execCONTINUEB(Signal* signal
     break;
   case DihContinueB::ZEMPTY_VERIFY_QUEUE:
     jam();
-    emptyverificbuffer(signal, true);
+    emptyverificbuffer(signal, signal->theData[1], true);
     return;
     break;
   case DihContinueB::ZCHECK_GCP_STOP:
@@ -1298,6 +1298,8 @@ void Dbdih::execREAD_CONFIG_REQ(Signal*
   ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_API_CONNECT, 
 					   &capiConnectFileSize),
 		NDBD_EXIT_INVALID_CONFIG);
+  capiConnectFileSize++; // Increase by 1...so that srsw queue never gets full
+
   ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, 
 					   &cfragstoreFileSize),
 		NDBD_EXIT_INVALID_CONFIG);
@@ -3469,6 +3471,12 @@ void Dbdih::execEND_TOREQ(Signal* signal
              EndToConf::SignalLength, JBB);
 }//Dbdih::execEND_TOREQ()
 
+#define DIH_TAB_WRITE_LOCK(tabPtrP) \
+  do { assertOwnThread(); tabPtrP->m_lock.write_lock(); } while (0)
+
+#define DIH_TAB_WRITE_UNLOCK(tabPtrP) \
+  do { assertOwnThread(); tabPtrP->m_lock.write_unlock(); } while (0)
+
 /* --------------------------------------------------------------------------*/
 /*       AN ORDER TO START OR COMMIT THE REPLICA CREATION ARRIVED FROM THE   */
 /*       MASTER.                                                             */
@@ -3509,7 +3517,8 @@ void Dbdih::execCREATE_FRAGREQ(Signal* s
     dump_replica_info(fragPtr.p);
   }
   ndbrequire(frReplicaPtr.i != RNIL);
-  
+
+  DIH_TAB_WRITE_LOCK(tabPtr.p);
   switch (replicaType) {
   case CreateFragReq::STORED:
     jam();
@@ -3544,6 +3553,7 @@ void Dbdih::execCREATE_FRAGREQ(Signal* s
     ndbrequire(false);
     break;
   }//switch
+  DIH_TAB_WRITE_UNLOCK(tabPtr.p);
 
   /* ------------------------------------------------------------------------*/
   /*       THE NEW NODE OF THIS REPLICA IS THE STARTING NODE.                */
@@ -8033,7 +8043,9 @@ Dbdih::sendAddFragreq(Signal* signal, Co
     if (AlterTableReq::getReorgFragFlag(connectPtr.p->m_alter.m_changeMask))
     {
       jam();
+      DIH_TAB_WRITE_LOCK(tabPtr.p);
       tabPtr.p->m_new_map_ptr_i = connectPtr.p->m_alter.m_new_map_ptr_i;
+      DIH_TAB_WRITE_UNLOCK(tabPtr.p);
     }
 
     if (AlterTableReq::getAddFragFlag(connectPtr.p->m_alter.m_changeMask))
@@ -8521,6 +8533,7 @@ void Dbdih::execALTER_TAB_REQ(Signal * s
     if (AlterTableReq::getReorgFragFlag(connectPtr.p->m_alter.m_changeMask))
     {
       jam();
+      DIH_TAB_WRITE_LOCK(tabPtr.p);
       Uint32 save = tabPtr.p->m_map_ptr_i;
       tabPtr.p->m_map_ptr_i = tabPtr.p->m_new_map_ptr_i;
       tabPtr.p->m_new_map_ptr_i = save;
@@ -8532,6 +8545,7 @@ void Dbdih::execALTER_TAB_REQ(Signal * s
         getFragstore(tabPtr.p, i, fragPtr);
         fragPtr.p->distributionKey = (fragPtr.p->distributionKey + 1) & 0xFF;
       }
+      DIH_TAB_WRITE_UNLOCK(tabPtr.p);
 
       ndbassert(tabPtr.p->m_scan_count[1] == 0);
       tabPtr.p->m_scan_count[1] = tabPtr.p->m_scan_count[0];
@@ -8556,8 +8570,10 @@ void Dbdih::execALTER_TAB_REQ(Signal * s
 
     send_alter_tab_conf(signal, connectPtr);
 
+    DIH_TAB_WRITE_LOCK(tabPtr.p);
     tabPtr.p->m_new_map_ptr_i = RNIL;
     tabPtr.p->m_scan_reorg_flag = 0;
+    DIH_TAB_WRITE_UNLOCK(tabPtr.p);
 
     ndbrequire(tabPtr.p->connectrec == connectPtr.i);
     tabPtr.p->connectrec = RNIL;
@@ -9004,30 +9020,32 @@ void Dbdih::execDIGETNODESREQ(Signal* si
   Uint32 fragId, newFragId = RNIL;
   DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
   TabRecord* regTabDesc = tabRecord;
-  jamEntry();
+  EmulatedJamBuffer * jambuf = * (EmulatedJamBuffer**)(req->jamBuffer);
+  thrjamEntry(jambuf);
   ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
 
   if (DictTabInfo::isOrderedIndex(tabPtr.p->tableType))
   {
-    jam();
+    thrjam(jambuf);
     tabPtr.i = tabPtr.p->primaryTableId;
     ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
   }
 
+loop:
+  Uint32 val = tabPtr.p->m_lock.read_lock();
   Uint32 map_ptr_i = tabPtr.p->m_map_ptr_i;
   Uint32 new_map_ptr_i = tabPtr.p->m_new_map_ptr_i;
 
   /* When distr key indicator is set, regardless
-   * of distribution algorithm in use, hashValue 
+   * of distribution algorithm in use, hashValue
    * IS fragment id.
    */
   if (req->distr_key_indicator)
   {
     fragId = hashValue;
-    
     if (unlikely(fragId >= tabPtr.p->totalfragments))
     {
-      jam();
+      thrjam(jambuf);
       conf->zero= 1; //Indicate error;
       signal->theData[1]= ZUNDEFINED_FRAGMENT_ERROR;
       return;
@@ -9035,40 +9053,40 @@ void Dbdih::execDIGETNODESREQ(Signal* si
   }
   else if (tabPtr.p->method == TabRecord::HASH_MAP)
   {
-    jam();
+    thrjam(jambuf);
     Ptr<Hash2FragmentMap> ptr;
     g_hash_map.getPtr(ptr, map_ptr_i);
     fragId = ptr.p->m_map[hashValue % ptr.p->m_cnt];
 
     if (unlikely(new_map_ptr_i != RNIL))
     {
-      jam();
+      thrjam(jambuf);
       g_hash_map.getPtr(ptr, new_map_ptr_i);
       newFragId = ptr.p->m_map[hashValue % ptr.p->m_cnt];
       if (newFragId == fragId)
       {
-        jam();
+        thrjam(jambuf);
         newFragId = RNIL;
       }
     }
   }
   else if (tabPtr.p->method == TabRecord::LINEAR_HASH)
   {
-    jam();
+    thrjam(jambuf);
     fragId = hashValue & tabPtr.p->mask;
     if (fragId < tabPtr.p->hashpointer) {
-      jam();
+      thrjam(jambuf);
       fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
     }//if
   }
   else if (tabPtr.p->method == TabRecord::NORMAL_HASH)
   {
-    jam();
+    thrjam(jambuf);
     fragId= hashValue % tabPtr.p->totalfragments;
   }
   else
   {
-    jam();
+    thrjam(jambuf);
     ndbassert(tabPtr.p->method == TabRecord::USER_DEFINED);
 
     /* User defined partitioning, but no distribution key passed */
@@ -9087,7 +9105,7 @@ void Dbdih::execDIGETNODESREQ(Signal* si
 
   if (unlikely(newFragId != RNIL))
   {
-    jam();
+    thrjam(jambuf);
     conf->reqinfo |= DiGetNodesConf::REORG_MOVING;
     getFragstore(tabPtr.p, newFragId, fragPtr);
     nodeCount = extractNodeInfo(fragPtr.p, conf->nodes + 2 + MAX_REPLICAS);
@@ -9096,6 +9114,9 @@ void Dbdih::execDIGETNODESREQ(Signal* si
       (fragPtr.p->distributionKey << 16) +
       (dihGetInstanceKey(fragPtr) << 24);
   }
+
+  if (unlikely(!tabPtr.p->m_lock.read_unlock(val)))
+    goto loop;
 }//Dbdih::execDIGETNODESREQ()
 
 Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]) 
@@ -9201,6 +9222,63 @@ void Dbdih::initialiseFragstore()
   }//for    
 }//Dbdih::initialiseFragstore()
 
+#ifndef NDB_HAVE_RMB
+#define rmb() do { } while (0)
+#endif
+
+#ifndef NDB_HAVE_WMB
+#define wmb() do { } while (0)
+#endif
+
+inline
+bool
+Dbdih::isEmpty(const DIVERIFY_queue & q)
+{
+  return q.cfirstVerifyQueue == q.clastVerifyQueue;
+}
+
+inline
+void
+Dbdih::enqueue(DIVERIFY_queue & q, Uint32 senderData, Uint64 gci)
+{
+  Uint32 last = q.clastVerifyQueue;
+  ApiConnectRecord * apiConnectRecord = q.apiConnectRecord;
+
+  apiConnectRecord[last].senderData = senderData;
+  apiConnectRecord[last].apiGci = gci;
+  wmb();
+  if (last + 1 == capiConnectFileSize)
+  {
+    q.clastVerifyQueue = 0;
+  }
+  else
+  {
+    q.clastVerifyQueue = last + 1;
+  }
+  assert(q.clastVerifyQueue != q.cfirstVerifyQueue);
+}
+
+inline
+void
+Dbdih::dequeue(DIVERIFY_queue & q, ApiConnectRecord & conRecord)
+{
+  Uint32 first = q.cfirstVerifyQueue;
+  ApiConnectRecord * apiConnectRecord = q.apiConnectRecord;
+
+  rmb();
+  conRecord.senderData = apiConnectRecord[first].senderData;
+  conRecord.apiGci = apiConnectRecord[first].apiGci;
+
+  if (first + 1 == capiConnectFileSize)
+  {
+    q.cfirstVerifyQueue = 0;
+  }
+  else
+  {
+    q.cfirstVerifyQueue = first + 1;
+  }
+}
+
 /*
   3.9   V E R I F I C A T I O N
   ****************************=
@@ -9212,13 +9290,16 @@ void Dbdih::initialiseFragstore()
   3.9.1     R E C E I V I N G  O F  V E R I F I C A T I O N   R E Q U E S T
   *************************************************************************
   */
-void Dbdih::execDIVERIFYREQ(Signal* signal) 
+void Dbdih::execDIVERIFYREQ(Signal* signal)
 {
-
-  jamEntry();
-  if ((getBlockCommit() == false) &&
-      (cfirstVerifyQueue == RNIL)) {
-    jam();
+  EmulatedJamBuffer * jambuf = * (EmulatedJamBuffer**)(signal->theData+2);
+  thrjamEntry(jambuf);
+loop:
+  Uint32 val = m_micro_gcp.m_lock.read_lock();
+  Uint32 blocked = getBlockCommit() == true ? 1 : 0;
+  if (blocked == 0 && isEmpty(c_diverify_queue[0]))
+  {
+    thrjam(jambuf);
     /*-----------------------------------------------------------------------*/
     // We are not blocked and the verify queue was empty currently so we can
     // simply reply back to TC immediately. The method was called with 
@@ -9229,32 +9310,21 @@ void Dbdih::execDIVERIFYREQ(Signal* sign
     signal->theData[1] = (Uint32)(m_micro_gcp.m_current_gci >> 32);
     signal->theData[2] = (Uint32)(m_micro_gcp.m_current_gci & 0xFFFFFFFF);
     signal->theData[3] = 0;
+    if (unlikely(! m_micro_gcp.m_lock.read_unlock(val)))
+      goto loop;
     return;
   }//if
   /*-------------------------------------------------------------------------*/
   // Since we are blocked we need to put this operation last in the verify
   // queue to ensure that operation starts up in the correct order.
   /*-------------------------------------------------------------------------*/
-  ApiConnectRecordPtr tmpApiConnectptr;
-  ApiConnectRecordPtr localApiConnectptr;
-
-  cverifyQueueCounter++;
-  localApiConnectptr.i = signal->theData[0];
-  tmpApiConnectptr.i = clastVerifyQueue;
-  ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
-  localApiConnectptr.p->apiGci = m_micro_gcp.m_new_gci;
-  localApiConnectptr.p->nextApi = RNIL;
-  clastVerifyQueue = localApiConnectptr.i;
-  if (tmpApiConnectptr.i == RNIL) {
-    jam();
-    cfirstVerifyQueue = localApiConnectptr.i;
-  } else {
-    jam();
-    ptrCheckGuard(tmpApiConnectptr, capiConnectFileSize, apiConnectRecord);
-    tmpApiConnectptr.p->nextApi = localApiConnectptr.i;
-  }//if
-  emptyverificbuffer(signal, false);
-  signal->theData[3] = 1; // Indicate no immediate return
+  DIVERIFY_queue & q = c_diverify_queue[0];
+  enqueue(q, signal->theData[0], m_micro_gcp.m_new_gci);
+  if (blocked == 0 && jambuf == jamBuffer())
+  {
+    emptyverificbuffer(signal, 0, false);
+  }
+  signal->theData[3] = blocked + 1; // Indicate no immediate return
   return;
 }//Dbdih::execDIVERIFYREQ()
 
@@ -9438,15 +9508,18 @@ Dbdih::execUPGRADE_PROTOCOL_ORD(Signal*
 }
 
 void
-Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime) 
+Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime)
 {
-  if (cfirstVerifyQueue != RNIL)
+  for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
   {
-    // Previous global checkpoint is not yet completed.
-    jam();
-    signal->theData[0] = DihContinueB::ZSTART_GCP;
-    sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
-    return;
+    if (c_diverify_queue[i].m_empty_done == 0)
+    {
+      // Previous global checkpoint is not yet completed.
+      jam();
+      signal->theData[0] = DihContinueB::ZSTART_GCP;
+      sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
+      return;
+    }
   }
 
   emptyWaitGCPMasterQueue(signal,
@@ -10006,10 +10079,12 @@ void Dbdih::execGCP_PREPARE(Signal* sign
   
   ndbrequire(m_micro_gcp.m_state == MicroGcp::M_GCP_IDLE);
 
+  m_micro_gcp.m_lock.write_lock();
   cgckptflag = true;
   m_micro_gcp.m_state = MicroGcp::M_GCP_PREPARE;
   m_micro_gcp.m_new_gci = gci;
   m_micro_gcp.m_master_ref = retRef;
+  m_micro_gcp.m_lock.write_unlock();
 
   if (ERROR_INSERTED(7031))
   {
@@ -10123,10 +10198,18 @@ void Dbdih::execGCP_COMMIT(Signal* signa
   m_micro_gcp.m_state = MicroGcp::M_GCP_COMMIT;
   m_micro_gcp.m_master_ref = calcDihBlockRef(masterNodeId);
   
+  m_micro_gcp.m_lock.write_lock();
   m_micro_gcp.m_old_gci = m_micro_gcp.m_current_gci;
   m_micro_gcp.m_current_gci = gci;
   cgckptflag = false;
-  emptyverificbuffer(signal, true);
+  m_micro_gcp.m_lock.write_unlock();
+
+  for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
+  {
+    jam();
+    c_diverify_queue[i].m_empty_done = 0;
+    emptyverificbuffer(signal, i, true);
+  }
 
   GCPNoMoreTrans* req2 = (GCPNoMoreTrans*)signal->getDataPtrSend();
   req2->senderRef = reference();
@@ -14672,53 +14755,79 @@ void Dbdih::createFileRw(Signal* signal,
   sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
 }//Dbdih::createFileRw()
 
-void Dbdih::emptyverificbuffer(Signal* signal, bool aContinueB) 
+void
+Dbdih::emptyverificbuffer(Signal* signal, Uint32 q, bool aContinueB)
 {
-  if(cfirstVerifyQueue == RNIL){
+  if(unlikely(getBlockCommit() == true))
+  {
     jam();
     return;
-  }//if
-  ApiConnectRecordPtr localApiConnectptr;
-  if(getBlockCommit() == false){
+  }
+
+  if (!isEmpty(c_diverify_queue[q]))
+  {
     jam();
-    ndbrequire(cverifyQueueCounter > 0);
-    cverifyQueueCounter--;
-    localApiConnectptr.i = cfirstVerifyQueue;
-    ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
-    ndbrequire(localApiConnectptr.p->apiGci <= m_micro_gcp.m_current_gci);
-    cfirstVerifyQueue = localApiConnectptr.p->nextApi;
-    if (cfirstVerifyQueue == RNIL) {
-      jam();
-      ndbrequire(cverifyQueueCounter == 0);
-      clastVerifyQueue = RNIL;
-    }//if
-    signal->theData[0] = localApiConnectptr.i;
+
+    ApiConnectRecord localApiConnect;
+    dequeue(c_diverify_queue[q], localApiConnect);
+    ndbrequire(localApiConnect.apiGci <= m_micro_gcp.m_current_gci);
+    signal->theData[0] = localApiConnect.senderData;
     signal->theData[1] = (Uint32)(m_micro_gcp.m_current_gci >> 32);
     signal->theData[2] = (Uint32)(m_micro_gcp.m_current_gci & 0xFFFFFFFF);
     signal->theData[3] = 0;
     sendSignal(clocaltcblockref, GSN_DIVERIFYCONF, signal, 4, JBB);
-    if (aContinueB == true) {
-      jam();
-      //-----------------------------------------------------------------------
-      // This emptying happened as part of a take-out process by continueb signals.
-      // This ensures that we will empty the queue eventually. We will also empty
-      // one item every time we insert one item to ensure that the list doesn't
-      // grow when it is not blocked.
-      //-----------------------------------------------------------------------
-      signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
-      sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
-    }//if
-  } else {
+  }
+  else if (aContinueB == true)
+  {
+    jam();
+    /**
+     * Make sure that we don't miss any pending transactions
+     *   (transactions that are added to list by other thread
+     *    while we execute this code)
+     */
+    Uint32 blocks[] = { DBTC, 0 };
+    Callback c = { safe_cast(&Dbdih::emptyverificbuffer_check), q };
+    synchronize_threads_for_blocks(signal, blocks, c);
+    return;
+  }
+
+  if (aContinueB == true)
+  {
     jam();
     //-----------------------------------------------------------------------
-    // We are blocked so it is no use in continuing the emptying of the
-    // verify buffer. Whenever the block is removed the emptying will
-    // restart.
+    // This emptying happened as part of a take-out process by continueb signals
+    // This ensures that we will empty the queue eventually. We will also empty
+    // one item every time we insert one item to ensure that the list doesn't
+    // grow when it is not blocked.
     //-----------------------------------------------------------------------
-  }  
+    signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
+    signal->theData[1] = q;
+    sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+  }//if
+
   return;
 }//Dbdih::emptyverificbuffer()
 
+void
+Dbdih::emptyverificbuffer_check(Signal* signal, Uint32 q, Uint32 retVal)
+{
+  ndbrequire(retVal == 0);
+  if (!isEmpty(c_diverify_queue[q]))
+  {
+    jam();
+    signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
+    signal->theData[1] = q;
+    sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+  }
+  else
+  {
+    /**
+     * Done with emptyverificbuffer
+     */
+    c_diverify_queue[q].m_empty_done = 1;
+  }
+}
+
 /*************************************************************************/
 /*       FIND THE NODES FROM WHICH WE CAN EXECUTE THE LOG TO RESTORE THE */
 /*       DATA NODE IN A SYSTEM RESTART.                                  */
@@ -15065,11 +15174,9 @@ void Dbdih::initCommonData()
   cfailurenr = 1;
   cfirstAliveNode = RNIL;
   cfirstDeadNode = RNIL;
-  cfirstVerifyQueue = RNIL;
   cgckptflag = false;
   cgcpOrderBlocked = 0;
 
-  clastVerifyQueue = RNIL;
   c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
 
   c_lcpState.clcpDelay = 0;
@@ -15103,7 +15210,6 @@ void Dbdih::initCommonData()
   cstarttype = (Uint32)-1;
   csystemnodes = 0;
   c_newest_restorable_gci = 0;
-  cverifyQueueCounter = 0;
   cwaitLcpSr = false;
   c_nodeStartMaster.blockGcp = 0;
 
@@ -15416,12 +15522,18 @@ void Dbdih::initialiseRecordsLab(Signal*
   case 1:{
     ApiConnectRecordPtr apiConnectptr;
     jam();
-    /******** INTIALIZING API CONNECT RECORDS ********/
-    for (apiConnectptr.i = 0; apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++) {
-      refresh_watch_dog();
-      ptrAss(apiConnectptr, apiConnectRecord);
-      apiConnectptr.p->nextApi = RNIL;
-    }//for
+    for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
+    {
+      /******** INTIALIZING API CONNECT RECORDS ********/
+      for (apiConnectptr.i = 0;
+           apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++)
+      {
+        refresh_watch_dog();
+        ptrAss(apiConnectptr, c_diverify_queue[i].apiConnectRecord);
+        apiConnectptr.p->senderData = RNIL;
+        apiConnectptr.p->apiGci = ~(Uint64)0;
+      }//for
+    }
     jam();
     break;
   }
@@ -17208,11 +17320,17 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal
   if (arg == DumpStateOrd::DihDumpNodeRestartInfo) {
     infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d, c_nodeStartMaster.wait = %d",
 	      c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait);
-    infoEvent("cfirstVerifyQueue = %d, cverifyQueueCounter = %d",
-              cfirstVerifyQueue, cverifyQueueCounter);
+    for (Uint32 i = 0; i < c_diverify_queue_cnt; i++)
+    {
+      infoEvent("[ %u : cfirstVerifyQueue = %u clastVerifyQueue = %u sz: %u]",
+                i,
+                c_diverify_queue[i].cfirstVerifyQueue,
+                c_diverify_queue[i].clastVerifyQueue,
+                capiConnectFileSize);
+    }
     infoEvent("cgcpOrderBlocked = %d",
               cgcpOrderBlocked);
-  }//if  
+  }//if
   if (arg == DumpStateOrd::DihDumpNodeStatusInfo) {
     NodeRecordPtr localNodePtr;
     infoEvent("Printing nodeStatus of all nodes");
@@ -17892,7 +18010,11 @@ void Dbdih::execUNBLOCK_COMMIT_ORD(Signa
     jam();
     
     c_blockCommit = false;
-    emptyverificbuffer(signal, true);
+    for (Uint32 i = 0; i<c_diverify_queue_cnt; i++)
+    {
+      c_diverify_queue[i].m_empty_done = 0;
+      emptyverificbuffer(signal, i, true);
+    }
   }
 }
 
@@ -18039,11 +18161,15 @@ void Dbdih::execDIH_SWITCH_REPLICA_REQ(S
     sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_REF, signal,
                DihSwitchReplicaRef::SignalLength, JBB);
   }//if
+
+  DIH_TAB_WRITE_LOCK(tabPtr.p);
   for (Uint32 i = 0; i < noOfReplicas; i++) {
     jam();
     ndbrequire(i < MAX_REPLICAS);
     fragPtr.p->activeNodes[i] = req->newNodeOrder[i];
   }//for
+  DIH_TAB_WRITE_UNLOCK(tabPtr.p);
+
   /**
    * Reply
    */

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2011-05-25 15:03:11 +0000
@@ -3161,6 +3161,7 @@ public:
   bool is_same_trans(Uint32 opId, Uint32 trid1, Uint32 trid2);
   void get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci_hi, Uint32* gci_lo);
   void accminupdate(Signal*, Uint32 opPtrI, const Local_key*);
+  void accremoverow(Signal*, Uint32 opPtrI, const Local_key*);
 
   /**
    *
@@ -3370,6 +3371,16 @@ Dblqh::accminupdate(Signal* signal, Uint
 }
 
 inline
+void
+Dblqh::accremoverow(Signal* signal, Uint32 opId, const Local_key* key)
+{
+  TcConnectionrecPtr regTcPtr;
+  regTcPtr.i= opId;
+  ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+  c_acc->removerow(regTcPtr.p->accConnectrec, key);
+}
+
+inline
 bool
 Dblqh::TRACE_OP_CHECK(const TcConnectionrec* regTcPtr)
 {

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-05-25 15:03:11 +0000
@@ -148,6 +148,7 @@ operator<<(NdbOut& out, Operation_t op)
   case ZDELETE: out << "DELETE"; break;
   case ZWRITE: out << "WRITE"; break;
   case ZUNLOCK: out << "UNLOCK"; break;
+  case ZREFRESH: out << "REFRESH"; break;
   }
   return out;
 }
@@ -4535,6 +4536,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
     regTcPtr->lockType = 
       op == ZREAD_EX ? ZUPDATE : 
       (Operation_t) op == ZWRITE ? ZINSERT : 
+      (Operation_t) op == ZREFRESH ? ZINSERT :
       (Operation_t) op == ZUNLOCK ? ZREAD : // lockType not relevant for unlock req
       (Operation_t) op;
   }
@@ -4687,8 +4689,11 @@ void Dblqh::execLQHKEYREQ(Signal* signal
 
   sig2 = lqhKeyReq->variableData[nextPos + 0];
   sig3 = cnewestGci;
+  /* If gci_hi provided, take it and set gci_lo to max value
+   * Otherwise, it will be decided by TUP at commit time as normal
+   */
   regTcPtr->gci_hi = LqhKeyReq::getGCIFlag(Treqinfo) ? sig2 : sig3;
-  regTcPtr->gci_lo = 0;
+  regTcPtr->gci_lo = LqhKeyReq::getGCIFlag(Treqinfo) ? ~Uint32(0) : 0;
   nextPos += LqhKeyReq::getGCIFlag(Treqinfo);
   
   if (LqhKeyReq::getRowidFlag(Treqinfo))
@@ -5071,6 +5076,7 @@ void Dblqh::prepareContinueAfterBlockedL
     case ZINSERT: TRACENR("INSERT"); break;
     case ZDELETE: TRACENR("DELETE"); break;
     case ZUNLOCK: TRACENR("UNLOCK"); break;
+    case ZREFRESH: TRACENR("REFRESH"); break;
     default: TRACENR("<Unknown: " << regTcPtr->operation << ">"); break;
     }
     
@@ -5120,7 +5126,6 @@ Dblqh::exec_acckeyreq(Signal* signal, Tc
   Uint32 taccreq;
   regTcPtr.p->transactionState = TcConnectionrec::WAIT_ACC;
   taccreq = regTcPtr.p->operation;
-  taccreq = taccreq + (regTcPtr.p->opSimple << 3);
   taccreq = taccreq + (regTcPtr.p->lockType << 4);
   taccreq = taccreq + (regTcPtr.p->dirtyOp << 6);
   taccreq = taccreq + (regTcPtr.p->replicaType << 7);
@@ -5285,15 +5290,17 @@ Dblqh::handle_nr_copy(Signal* signal, Pt
     if (match)
     {
       jam();
-      if (op != ZDELETE)
+      if (op != ZDELETE && op != ZREFRESH)
       {
 	if (TRACENR_FLAG)
-	  TRACENR(" Changing from to ZWRITE" << endl);
+	  TRACENR(" Changing from INSERT/UPDATE to ZWRITE" << endl);
 	regTcPtr.p->operation = ZWRITE;
       }
       goto run;
     }
-    
+
+    ndbassert(!match && op == ZINSERT);
+
     /**
      * 1) Delete row at specified rowid (if len > 0)
      * 2) Delete specified row at different rowid (if exists)
@@ -6005,7 +6012,7 @@ Dblqh::acckeyconf_tupkeyreq(Signal* sign
   
   TRACE_OP(regTcPtr, "TUPKEYREQ");
   
-  regTcPtr->m_use_rowid |= (op == ZINSERT);
+  regTcPtr->m_use_rowid |= (op == ZINSERT || op == ZREFRESH);
   regTcPtr->m_row_id.m_page_no = page_no;
   regTcPtr->m_row_id.m_page_idx = page_idx;
   

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-13 08:38:01 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-25 09:31:27 +0000
@@ -3682,10 +3682,11 @@ Dbspj::getNodes(Signal* signal, BuildKey
   req->tableId = tableId;
   req->hashValue = dst.hashInfo[1];
   req->distr_key_indicator = 0; // userDefinedPartitioning not supported!
+  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
 
 #if 1
   EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
-                 DiGetNodesReq::SignalLength);
+                 DiGetNodesReq::SignalLength, 0);
 #else
   sendSignal(DBDIH_REF, GSN_DIGETNODESREQ, signal,
              DiGetNodesReq::SignalLength, JBB);

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2011-05-25 13:19:02 +0000
@@ -1694,6 +1694,7 @@ private:
   void checkNodeFailComplete(Signal* signal, Uint32 failedNodeId, Uint32 bit);
 
   void apiFailBlockCleanupCallback(Signal* signal, Uint32 failedNodeId, Uint32 ignoredRc);
+  bool isRefreshSupported() const;
   
   // Initialisation
   void initData();

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-05-25 15:03:11 +0000
@@ -3061,6 +3061,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
     case ZINSERT:
     case ZDELETE:
     case ZWRITE:
+    case ZREFRESH:
       jam();
       break;
     default:
@@ -3142,6 +3143,34 @@ handle_reorg_trigger(DiGetNodesConf * co
   }
 }
 
+bool
+Dbtc::isRefreshSupported() const
+{
+  const NodeVersionInfo& nvi = getNodeVersionInfo();
+  const Uint32 minVer = nvi.m_type[NodeInfo::DB].m_min_version;
+  const Uint32 maxVer = nvi.m_type[NodeInfo::DB].m_max_version;
+
+  if (likely (minVer == maxVer))
+  {
+    /* Normal case, use function */
+    return ndb_refresh_tuple(minVer);
+  }
+
+  /* As refresh feature was introduced across three minor versions
+   * we check that all data nodes support it.  This slow path
+   * should only be hit during upgrades between versions
+   */
+  for (Uint32 i=1; i < MAX_NODES; i++)
+  {
+    const NodeInfo& nodeInfo = getNodeInfo(i);
+    if ((nodeInfo.m_type == NODE_TYPE_DB) &&
+        (nodeInfo.m_connected) &&
+        (! ndb_refresh_tuple(nodeInfo.m_version)))
+      return false;
+  }
+  return true;
+}
+
 /**
  * tckeyreq050Lab
  * This method is executed once all KeyInfo has been obtained for
@@ -3188,6 +3217,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal
   req->tableId = Ttableref;
   req->hashValue = TdistrHashValue;
   req->distr_key_indicator = regCachePtr->distributionKeyIndicator;
+  * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
 
   /*-------------------------------------------------------------*/
   /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
@@ -3198,7 +3228,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal
   /* IS SPENT IN DIH AND EVEN LESS IN REPLICATED NDB.            */
   /*-------------------------------------------------------------*/
   EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
-                 DiGetNodesReq::SignalLength);
+                 DiGetNodesReq::SignalLength, 0);
   DiGetNodesConf * conf = (DiGetNodesConf *)&signal->theData[0];
   UintR Tdata2 = conf->reqinfo;
   UintR TerrorIndicator = signal->theData[0];
@@ -3367,6 +3397,14 @@ void Dbtc::tckeyreq050Lab(Signal* signal
     TlastReplicaNo = tnoOfBackup + tnoOfStandby;
     regTcPtr->lastReplicaNo = (Uint8)TlastReplicaNo;
     regTcPtr->noOfNodes = (Uint8)(TlastReplicaNo + 1);
+
+    if (unlikely((Toperation == ZREFRESH) &&
+                 (! isRefreshSupported())))
+    {
+      /* Function not implemented yet */
+      TCKEY_abort(signal,63);
+      return;
+    }
   }//if
 
   if (regCachePtr->isLongTcKeyReq || 
@@ -4878,7 +4916,9 @@ void Dbtc::diverify010Lab(Signal* signal
        * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE 
        * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
        *---------------------------------------------------------------------*/
-      EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
+      * (EmulatedJamBuffer**)(signal->theData+2) = jamBuffer();
+      EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal,
+                     2 + sizeof(void*)/sizeof(Uint32), 0);
       if (signal->theData[3] == 0) {
         execDIVERIFYCONF(signal);
       }
@@ -10872,9 +10912,9 @@ void Dbtc::execDIH_SCAN_TAB_CONF(Signal*
     req->tableId = tabPtr.i;
     req->hashValue = cachePtr.p->distributionKey;
     req->distr_key_indicator = tabPtr.p->get_user_defined_partitioning();
-
+    * (EmulatedJamBuffer**)req->jamBuffer = jamBuffer();
     EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal,
-                   DiGetNodesReq::SignalLength);
+                   DiGetNodesReq::SignalLength, 0);
     UintR TerrorIndicator = signal->theData[0];
     jamEntry();
     if (TerrorIndicator != 0)

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-17 12:47:21 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-25 13:19:02 +0000
@@ -268,6 +268,7 @@ inline const Uint32* ALIGN_WORD(const vo
 #define ZMUST_BE_ABORTED_ERROR 898
 #define ZTUPLE_DELETED_ERROR 626
 #define ZINSERT_ERROR 630
+#define ZOP_AFTER_REFRESH_ERROR 920
 
 #define ZINVALID_CHAR_FORMAT 744
 #define ZROWID_ALLOCATED 899
@@ -420,6 +421,8 @@ struct Fragoperrec {
   Uint32 attributeCount;
   Uint32 charsetIndex;
   Uint32 m_null_bits[2];
+  Uint32 m_extra_row_gci_bits;
+  Uint32 m_extra_row_author_bits;
   union {
     BlockReference lqhBlockrefFrag;
     Uint32 m_senderRef;
@@ -827,10 +830,11 @@ struct Operationrec {
     unsigned int m_disk_preallocated : 1;
     unsigned int m_load_diskpage_on_commit : 1;
     unsigned int m_wait_log_buffer : 1;
+    unsigned int m_gci_written : 1;
   };
   union {
     OpBitFields op_struct;
-    Uint16 op_bit_fields;
+    Uint32 op_bit_fields;
   };
 
   /*
@@ -840,6 +844,19 @@ struct Operationrec {
    * version even if in the same transaction.
    */
   Uint16 tupVersion;
+
+  /*
+   * When refreshing a row, there are four scenarios
+   * The actual scenario is encoded in the 'copy tuple location'
+   * to enable special handling at commit time
+   */
+  enum RefreshScenario
+  {
+    RF_SINGLE_NOT_EXIST = 1,    /* Refresh op first in trans, no row */
+    RF_SINGLE_EXIST     = 2,    /* Refresh op first in trans, row exists */
+    RF_MULTI_NOT_EXIST  = 3,    /* Refresh op !first in trans, row deleted */
+    RF_MULTI_EXIST      = 4     /* Refresh op !first in trans, row exists */
+  };
 };
 typedef Ptr<Operationrec> OperationrecPtr;
 
@@ -1019,7 +1036,9 @@ ArrayPool<TupTriggerData> c_triggerPool;
       TR_Checksum = 0x1, // Need to be 1
       TR_RowGCI   = 0x2,
       TR_ForceVarPart = 0x4,
-      TR_DiskPart  = 0x8
+      TR_DiskPart  = 0x8,
+      TR_ExtraRowGCIBits = 0x10,
+      TR_ExtraRowAuthorBits = 0x20
     };
     Uint16 m_bits;
     Uint16 total_rec_size; // Max total size for entire tuple in words
@@ -1032,6 +1051,7 @@ ArrayPool<TupTriggerData> c_triggerPool;
     Uint16 noOfKeyAttr;
     Uint16 noOfCharsets;
     Uint16 m_dyn_null_bits[2];
+    Uint16 m_no_of_extra_columns; // "Hidden" columns
 
     bool need_expand() const { 
       return m_no_of_attributes > m_attributes[MM].m_no_of_fixsize;
@@ -1057,6 +1077,17 @@ ArrayPool<TupTriggerData> c_triggerPool;
         (disk && m_attributes[DD].m_no_of_varsize > 0);
     }
 
+    template <Uint32 bit> Uint32 getExtraAttrId() const {
+      if (bit == TR_ExtraRowGCIBits)
+        return 0;
+      Uint32 no = 0;
+      if (m_bits & TR_ExtraRowGCIBits)
+        no++;
+      assert(bit == TR_ExtraRowAuthorBits);
+      //if (bit == TR_ExtraRowAuthorBits)
+      return no;
+    }
+
     /**
      * Descriptors for MM and DD part
      */
@@ -2063,6 +2094,13 @@ private:
                       KeyReqStruct* req_struct,
 		      bool disk);
 
+  int handleRefreshReq(Signal* signal,
+                       Ptr<Operationrec>,
+                       Ptr<Fragrecord>,
+                       Tablerec*,
+                       KeyReqStruct*,
+                       bool disk);
+
 //------------------------------------------------------------------
 //------------------------------------------------------------------
   int  updateStartLab(Signal* signal,
@@ -2951,10 +2989,15 @@ private:
   void initData();
   void initRecords();
 
+  // 2 words for optional GCI64 + AUTHOR info
+#define EXTRA_COPY_PROC_WORDS 2
+#define MAX_COPY_PROC_LEN (MAX_ATTRIBUTES_IN_TABLE + EXTRA_COPY_PROC_WORDS)
+
+
   void deleteScanProcedure(Signal* signal, Operationrec* regOperPtr);
   void allocCopyProcedure();
   void freeCopyProcedure();
-  void prepareCopyProcedure(Uint32 numAttrs);
+  void prepareCopyProcedure(Uint32 numAttrs, Uint16 tableBits);
   void releaseCopyProcedure();
   void copyProcedure(Signal* signal,
                      TablerecPtr regTabPtr,
@@ -2973,7 +3016,7 @@ private:
 //-----------------------------------------------------------------------------
 
 // Public methods
-  Uint32 getTabDescrOffsets(Uint32, Uint32, Uint32, Uint32*);
+  Uint32 getTabDescrOffsets(Uint32, Uint32, Uint32, Uint32, Uint32*);
   Uint32 getDynTabDescrOffsets(Uint32 MaskSize, Uint32* offset);
   Uint32 allocTabDescr(Uint32 allocSize);
   void releaseTabDescr(Uint32 desc);
@@ -3174,6 +3217,8 @@ private:
   Uint32 czero;
   Uint32 cCopyProcedure;
   Uint32 cCopyLastSeg;
+  Uint32 cCopyOverwrite;
+  Uint32 cCopyOverwriteLen;
 
  // A little bit bigger to cover overwrites in copy algorithms (16384 real size).
 #define ZATTR_BUFFER_SIZE 16384
@@ -3380,15 +3425,21 @@ private:
   void findFirstOp(OperationrecPtr&);
   bool is_rowid_lcp_scanned(const Local_key& key1,
                            const Dbtup::ScanOp& op);
-  void commit_operation(Signal*, Uint32, Tuple_header*, PagePtr,
+  void commit_operation(Signal*, Uint32, Uint32, Tuple_header*, PagePtr,
 			Operationrec*, Fragrecord*, Tablerec*);
+  void commit_refresh(Signal*, Uint32, Uint32, Tuple_header*, PagePtr,
+                      KeyReqStruct*, Operationrec*, Fragrecord*, Tablerec*);
   int retrieve_data_page(Signal*,
                          Page_cache_client::Request,
                          OperationrecPtr);
   int retrieve_log_page(Signal*, FragrecordPtr, OperationrecPtr);
 
-  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*,
+  void dealloc_tuple(Signal* signal, Uint32, Uint32, Page*, Tuple_header*,
 		     KeyReqStruct*, Operationrec*, Fragrecord*, Tablerec*);
+  bool store_extra_row_bits(Uint32, const Tablerec*, Tuple_header*, Uint32,
+                            bool);
+  void read_extra_row_bits(Uint32, const Tablerec*, Tuple_header*, Uint32 *,
+                           bool);
 
   int handle_size_change_after_update(KeyReqStruct* req_struct,
 				      Tuple_header* org,

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-07 06:17:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-25 13:19:02 +0000
@@ -147,7 +147,8 @@ Dbtup::is_rowid_lcp_scanned(const Local_
 
 void
 Dbtup::dealloc_tuple(Signal* signal,
-		     Uint32 gci,
+		     Uint32 gci_hi,
+                     Uint32 gci_lo,
 		     Page* page,
 		     Tuple_header* ptr, 
                      KeyReqStruct * req_struct,
@@ -168,7 +169,7 @@ Dbtup::dealloc_tuple(Signal* signal,
     tmpptr.i = m_pgman_ptr.i;
     tmpptr.p = reinterpret_cast<Page*>(m_pgman_ptr.p);
     disk_page_free(signal, regTabPtr, regFragPtr, 
-		   &disk, tmpptr, gci);
+		   &disk, tmpptr, gci_hi);
   }
   
   if (! (bits & (Tuple_header::LCP_SKIP | Tuple_header::ALLOC)) && 
@@ -199,7 +200,12 @@ Dbtup::dealloc_tuple(Signal* signal,
   if (regTabPtr->m_bits & Tablerec::TR_RowGCI)
   {
     jam();
-    * ptr->get_mm_gci(regTabPtr) = gci;
+    * ptr->get_mm_gci(regTabPtr) = gci_hi;
+    if (regTabPtr->m_bits & Tablerec::TR_ExtraRowGCIBits)
+    {
+      Uint32 attrId = regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowGCIBits>();
+      store_extra_row_bits(attrId, regTabPtr, ptr, gci_lo, /* truncate */true);
+    }
   }
 }
 
@@ -288,7 +294,8 @@ static void dump_buf_hex(unsigned char *
 
 void
 Dbtup::commit_operation(Signal* signal,
-			Uint32 gci,
+			Uint32 gci_hi,
+                        Uint32 gci_lo,
 			Tuple_header* tuple_ptr, 
 			PagePtr pagePtr,
 			Operationrec* regOperPtr, 
@@ -309,6 +316,7 @@ Dbtup::commit_operation(Signal* signal,
   Uint32 fixsize= regTabPtr->m_offsets[MM].m_fix_header_size;
   Uint32 mm_vars= regTabPtr->m_attributes[MM].m_no_of_varsize;
   Uint32 mm_dyns= regTabPtr->m_attributes[MM].m_no_of_dynamic;
+  bool update_gci_at_commit = ! regOperPtr->op_struct.m_gci_written;
   if((mm_vars+mm_dyns) == 0)
   {
     jam();
@@ -399,7 +407,7 @@ Dbtup::commit_operation(Signal* signal,
     if(copy_bits & Tuple_header::DISK_ALLOC)
     {
       jam();
-      disk_page_alloc(signal, regTabPtr, regFragPtr, &key, diskPagePtr, gci);
+      disk_page_alloc(signal, regTabPtr, regFragPtr, &key, diskPagePtr, gci_hi);
     }
     
     if(regTabPtr->m_attributes[DD].m_no_of_varsize == 0)
@@ -419,7 +427,7 @@ Dbtup::commit_operation(Signal* signal,
     {
       jam();
       disk_page_undo_update(diskPagePtr.p, 
-			    &key, dst, sz, gci, logfile_group_id);
+			    &key, dst, sz, gci_hi, logfile_group_id);
     }
     
     memcpy(dst, disk_ptr, 4*sz);
@@ -452,10 +460,17 @@ Dbtup::commit_operation(Signal* signal,
   tuple_ptr->m_header_bits= copy_bits;
   tuple_ptr->m_operation_ptr_i= save;
   
-  if (regTabPtr->m_bits & Tablerec::TR_RowGCI)
+  if (regTabPtr->m_bits & Tablerec::TR_RowGCI  &&
+      update_gci_at_commit)
   {
     jam();
-    * tuple_ptr->get_mm_gci(regTabPtr) = gci;
+    * tuple_ptr->get_mm_gci(regTabPtr) = gci_hi;
+    if (regTabPtr->m_bits & Tablerec::TR_ExtraRowGCIBits)
+    {
+      Uint32 attrId = regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowGCIBits>();
+      store_extra_row_bits(attrId, regTabPtr, tuple_ptr, gci_lo,
+                           /* truncate */true);
+    }
   }
   
   if (regTabPtr->m_bits & Tablerec::TR_Checksum) {
@@ -834,23 +849,27 @@ skip_disk:
     
     tuple_ptr->m_operation_ptr_i = RNIL;
     
-    if(regOperPtr.p->op_struct.op_type != ZDELETE)
+    if (regOperPtr.p->op_struct.op_type == ZDELETE)
     {
       jam();
-      commit_operation(signal, gci_hi, tuple_ptr, page,
+      if (get_page)
+        ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
+      dealloc_tuple(signal, gci_hi, gci_lo, page.p, tuple_ptr,
+                    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+    }
+    else if(regOperPtr.p->op_struct.op_type != ZREFRESH)
+    {
+      jam();
+      commit_operation(signal, gci_hi, gci_lo, tuple_ptr, page,
 		       regOperPtr.p, regFragPtr.p, regTabPtr.p); 
     }
     else
     {
       jam();
-      if (get_page)
-      {
-	ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
-      }
-      dealloc_tuple(signal, gci_hi, page.p, tuple_ptr,
-		    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+      commit_refresh(signal, gci_hi, gci_lo, tuple_ptr, page,
+                     &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
     }
-  } 
+  }
 
   if (nextOp != RNIL)
   {
@@ -902,3 +921,48 @@ Dbtup::set_commit_change_mask_info(const
     }
   }
 }
+
+void
+Dbtup::commit_refresh(Signal* signal,
+                      Uint32 gci_hi,
+                      Uint32 gci_lo,
+                      Tuple_header* tuple_ptr,
+                      PagePtr pagePtr,
+                      KeyReqStruct * req_struct,
+                      Operationrec* regOperPtr,
+                      Fragrecord* regFragPtr,
+                      Tablerec* regTabPtr)
+{
+  /* Committing a refresh operation.
+   * Refresh of an existing row looks like an update
+   * and can commit normally.
+   * Refresh of a non-existing row looks like an Insert which
+   * is 'undone' at commit time.
+   * This is achieved by making special calls to ACC to get
+   * it to forget, before deallocating the tuple locally.
+   */
+  switch(regOperPtr->m_copy_tuple_location.m_file_no){
+  case Operationrec::RF_SINGLE_NOT_EXIST:
+  case Operationrec::RF_MULTI_NOT_EXIST:
+    break;
+  case Operationrec::RF_SINGLE_EXIST:
+  case Operationrec::RF_MULTI_EXIST:
+    // "Normal" update
+    commit_operation(signal, gci_hi, gci_lo, tuple_ptr, pagePtr,
+                     regOperPtr, regFragPtr, regTabPtr);
+    return;
+
+  default:
+    ndbrequire(false);
+  }
+
+  Local_key key = regOperPtr->m_tuple_location;
+  key.m_page_no = pagePtr.p->frag_page_id;
+
+  /**
+   * Tell ACC to delete
+   */
+  c_lqh->accremoverow(signal, regOperPtr->userpointer, &key);
+  dealloc_tuple(signal, gci_hi, gci_lo, pagePtr.p, tuple_ptr,
+                req_struct, regOperPtr, regFragPtr, regTabPtr);
+}

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-17 12:47:21 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-25 13:19:02 +0000
@@ -192,6 +192,8 @@ Dbtup::insertActiveOpList(OperationrecPt
       prevOpPtr.p->op_struct.m_wait_log_buffer;
     regOperPtr.p->op_struct.m_load_diskpage_on_commit= 
       prevOpPtr.p->op_struct.m_load_diskpage_on_commit;
+    regOperPtr.p->op_struct.m_gci_written=
+      prevOpPtr.p->op_struct.m_gci_written;
     regOperPtr.p->m_undo_buffer_space= prevOpPtr.p->m_undo_buffer_space;
     // start with prev mask (matters only for UPD o UPD)
 
@@ -212,7 +214,14 @@ Dbtup::insertActiveOpList(OperationrecPt
 	  prevOpPtr.p->op_struct.delete_insert_flag= true;
 	  regOperPtr.p->op_struct.delete_insert_flag= true;
 	  return true;
-	} else {
+	}
+        else if (op == ZREFRESH)
+        {
+          /* ZREFRESH after Delete - ok */
+          return true;
+        }
+        else
+        {
 	  terrorCode= ZTUPLE_DELETED_ERROR;
 	  return false;
 	}
@@ -222,6 +231,12 @@ Dbtup::insertActiveOpList(OperationrecPt
 	terrorCode= ZINSERT_ERROR;
 	return false;
       }
+      else if (prevOp == ZREFRESH)
+      {
+        /* No operation after a ZREFRESH */
+        terrorCode= ZOP_AFTER_REFRESH_ERROR;
+        return false;
+      }
       return true;
     }
     else
@@ -281,21 +296,39 @@ Dbtup::setup_read(KeyReqStruct *req_stru
       dirty= false;
     }
 
+    /* found == true indicates that savepoint is some state
+     * within tuple's current transaction's uncommitted operations
+     */
     bool found= find_savepoint(currOpPtr, savepointId);
     
     Uint32 currOp= currOpPtr.p->op_struct.op_type;
     
+    /* is_insert==true if tuple did not exist before its current
+     * transaction
+     */
     bool is_insert = (bits & Tuple_header::ALLOC);
+
+    /* If savepoint is in transaction, and post-delete-op
+     *   OR
+     * Tuple didn't exist before
+     *      AND
+     *   Read is dirty
+     *           OR
+     *   Savepoint is before-transaction
+     *
+     * Tuple does not exist in read's view
+     */
     if((found && currOp == ZDELETE) || 
        ((dirty || !found) && is_insert))
     {
+      /* Tuple not visible to this read operation */
       terrorCode= ZTUPLE_DELETED_ERROR;
       break;
     }
     
     if(dirty || !found)
     {
-      
+      /* Read existing committed tuple */
     }
     else
     {
@@ -349,6 +382,17 @@ Dbtup::load_diskpage(Signal* signal,
     jam();
     regOperPtr->op_struct.m_wait_log_buffer= 1;
     regOperPtr->op_struct.m_load_diskpage_on_commit= 1;
+    if (unlikely((flags & 7) == ZREFRESH))
+    {
+      jam();
+      /* Refresh of previously nonexistant DD tuple.
+       * No diskpage to load at commit time
+       */
+      regOperPtr->op_struct.m_wait_log_buffer= 0;
+      regOperPtr->op_struct.m_load_diskpage_on_commit= 0;
+    }
+
+    /* In either case return 1 for 'proceed' */
     return 1;
   }
   
@@ -408,6 +452,7 @@ Dbtup::load_diskpage(Signal* signal,
   case ZUPDATE:
   case ZINSERT:
   case ZWRITE:
+  case ZREFRESH:
     regOperPtr->op_struct.m_wait_log_buffer= 1;
     regOperPtr->op_struct.m_load_diskpage_on_commit= 1;
   }
@@ -554,7 +599,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal
    Uint32 Rstoredid= tupKeyReq->storedProcedure;
 
    regOperPtr->fragmentPtr= Rfragptr;
-   regOperPtr->op_struct.op_type= (TrequestInfo >> 6) & 0xf;
+   regOperPtr->op_struct.op_type= (TrequestInfo >> 6) & 0x7;
    regOperPtr->op_struct.delete_insert_flag = false;
    regOperPtr->op_struct.m_reorg = (TrequestInfo >> 12) & 3;
 
@@ -629,12 +674,20 @@ void Dbtup::execTUPKEYREQ(Signal* signal
                 req_struct.attrinfo_len,
                 attrInfoIVal);
    
+   regOperPtr->op_struct.m_gci_written = 0;
+
    if (Roptype == ZINSERT && Local_key::isInvalid(pageid, pageidx))
    {
-     // No tuple allocatated yet
+     // No tuple allocated yet
      goto do_insert;
    }
 
+   if (Roptype == ZREFRESH && Local_key::isInvalid(pageid, pageidx))
+   {
+     // No tuple allocated yet
+     goto do_refresh;
+   }
+
    if (unlikely(isCopyTuple(pageid, pageidx)))
    {
      /**
@@ -828,6 +881,23 @@ void Dbtup::execTUPKEYREQ(Signal* signal
        sendTUPKEYCONF(signal, &req_struct, regOperPtr);
        return;
      }
+     else if (Roptype == ZREFRESH)
+     {
+       /**
+        * No TUX or immediate triggers, just detached triggers
+        */
+   do_refresh:
+       if (unlikely(handleRefreshReq(signal, operPtr,
+                                     fragptr, regTabPtr,
+                                     &req_struct, disk_page != RNIL) == -1))
+       {
+         return;
+       }
+
+       sendTUPKEYCONF(signal, &req_struct, regOperPtr);
+       return;
+
+     }
      else
      {
        ndbrequire(false); // Invalid op type
@@ -1114,6 +1184,15 @@ int Dbtup::handleUpdateReq(Signal* signa
   
   if (!req_struct->interpreted_exec) {
     jam();
+
+    if (regTabPtr->m_bits & Tablerec::TR_ExtraRowAuthorBits)
+    {
+      jam();
+      Uint32 attrId =
+        regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowAuthorBits>();
+
+      store_extra_row_bits(attrId, regTabPtr, dst, /* default */ 0, false);
+    }
     int retValue = updateAttributes(req_struct,
 				    &cinBuffer[0],
 				    req_struct->attrinfo_len);
@@ -1660,6 +1739,14 @@ int Dbtup::handleInsertReq(Signal* signa
     terrorCode = ZAI_INCONSISTENCY_ERROR;
     goto update_error;
   }
+
+  if (regTabPtr->m_bits & Tablerec::TR_ExtraRowAuthorBits)
+  {
+    Uint32 attrId =
+      regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowAuthorBits>();
+
+    store_extra_row_bits(attrId, regTabPtr, tuple_ptr, /* default */ 0, false);
+  }
   
   if (!regTabPtr->m_default_value_location.isNull())
   {
@@ -2034,6 +2121,197 @@ error:
   return -1;
 }
 
+int
+Dbtup::handleRefreshReq(Signal* signal,
+                        Ptr<Operationrec> regOperPtr,
+                        Ptr<Fragrecord>  regFragPtr,
+                        Tablerec* regTabPtr,
+                        KeyReqStruct *req_struct,
+                        bool disk)
+{
+  /* Here we setup the tuple so that a transition to its current
+   * state can be observed by SUMA's detached triggers.
+   *
+   * If the tuple does not exist then we fabricate a tuple
+   * so that it can appear to be 'deleted'.
+   *   The fabricated tuple may have invalid NULL values etc.
+   * If the tuple does exist then we fabricate a null-change
+   * update to the tuple.
+   *
+   * The logic differs depending on whether there are already
+   * other operations on the tuple in this transaction.
+   * No other operations (including Refresh) are allowed after
+   * a refresh.
+   */
+  Uint32 refresh_case;
+  if (regOperPtr.p->is_first_operation())
+  {
+    jam();
+    if (Local_key::isInvalid(req_struct->frag_page_id,
+                             regOperPtr.p->m_tuple_location.m_page_idx))
+    {
+      jam();
+      refresh_case = Operationrec::RF_SINGLE_NOT_EXIST;
+      //ndbout_c("case 1");
+      /**
+       * This is refresh of non-existing tuple...
+       *   i.e "delete", reuse initial insert
+       */
+       Local_key accminupdate;
+       Local_key * accminupdateptr = &accminupdate;
+
+       /**
+        * We don't need ...in this scenario
+        * - disk
+        * - default values
+        */
+       Uint32 save_disk = regTabPtr->m_no_of_disk_attributes;
+       Local_key save_defaults = regTabPtr->m_default_value_location;
+       Bitmask<MAXNROFATTRIBUTESINWORDS> save_mask =
+         regTabPtr->notNullAttributeMask;
+
+       regTabPtr->m_no_of_disk_attributes = 0;
+       regTabPtr->m_default_value_location.setNull();
+       regOperPtr.p->op_struct.op_type = ZINSERT;
+
+       /**
+        * Update notNullAttributeMask  to only include primary keys
+        */
+       regTabPtr->notNullAttributeMask.clear();
+       const Uint32 * primarykeys =
+         (Uint32*)&tableDescriptor[regTabPtr->readKeyArray].tabDescr;
+       for (Uint32 i = 0; i<regTabPtr->noOfKeyAttr; i++)
+         regTabPtr->notNullAttributeMask.set(primarykeys[i] >> 16);
+
+       int res = handleInsertReq(signal, regOperPtr,
+                                 regFragPtr, regTabPtr, req_struct,
+                                 &accminupdateptr);
+
+       regTabPtr->m_no_of_disk_attributes = save_disk;
+       regTabPtr->m_default_value_location = save_defaults;
+       regTabPtr->notNullAttributeMask = save_mask;
+
+       if (unlikely(res == -1))
+       {
+         return -1;
+       }
+
+       regOperPtr.p->op_struct.op_type = ZREFRESH;
+
+       if (accminupdateptr)
+       {
+       /**
+          * Update ACC local-key, once *everything* has completed succesfully
+          */
+         c_lqh->accminupdate(signal,
+                             regOperPtr.p->userpointer,
+                             accminupdateptr);
+       }
+    }
+    else
+    {
+      refresh_case = Operationrec::RF_SINGLE_EXIST;
+      //ndbout_c("case 2");
+      jam();
+
+      Uint32 tup_version_save = req_struct->m_tuple_ptr->get_tuple_version();
+      Uint32 new_tup_version = decr_tup_version(tup_version_save);
+      Tuple_header* origTuple = req_struct->m_tuple_ptr;
+      origTuple->set_tuple_version(new_tup_version);
+      int res = handleUpdateReq(signal, regOperPtr.p, regFragPtr.p,
+                                regTabPtr, req_struct, disk);
+      /* Now we must reset the original tuple header back
+       * to the original version.
+       * The copy tuple will have the correct version due to
+       * the update incrementing it.
+       * On commit, the tuple becomes the copy tuple.
+       * On abort, the original tuple remains.  If we don't
+       * reset it here, then aborts cause the version to
+       * decrease
+       */
+      origTuple->set_tuple_version(tup_version_save);
+      if (res == -1)
+        return -1;
+    }
+  }
+  else
+  {
+    /* Not first operation on tuple in transaction */
+    jam();
+
+    Uint32 tup_version_save = req_struct->prevOpPtr.p->tupVersion;
+    Uint32 new_tup_version = decr_tup_version(tup_version_save);
+    req_struct->prevOpPtr.p->tupVersion = new_tup_version;
+
+    int res;
+    if (req_struct->prevOpPtr.p->op_struct.op_type == ZDELETE)
+    {
+      refresh_case = Operationrec::RF_MULTI_NOT_EXIST;
+      //ndbout_c("case 3");
+
+      jam();
+      /**
+       * We don't need ...in this scenario
+       * - default values
+       *
+       * We keep disk attributes to avoid issues with 'insert'
+       */
+      Local_key save_defaults = regTabPtr->m_default_value_location;
+      Bitmask<MAXNROFATTRIBUTESINWORDS> save_mask =
+        regTabPtr->notNullAttributeMask;
+
+      regTabPtr->m_default_value_location.setNull();
+      regOperPtr.p->op_struct.op_type = ZINSERT;
+
+      /**
+       * Update notNullAttributeMask  to only include primary keys
+       */
+      regTabPtr->notNullAttributeMask.clear();
+      const Uint32 * primarykeys =
+        (Uint32*)&tableDescriptor[regTabPtr->readKeyArray].tabDescr;
+      for (Uint32 i = 0; i<regTabPtr->noOfKeyAttr; i++)
+        regTabPtr->notNullAttributeMask.set(primarykeys[i] >> 16);
+
+      /**
+       * This is multi-update + DELETE + REFRESH
+       */
+      Local_key * accminupdateptr = 0;
+      res = handleInsertReq(signal, regOperPtr,
+                            regFragPtr, regTabPtr, req_struct,
+                            &accminupdateptr);
+
+      regTabPtr->m_default_value_location = save_defaults;
+      regTabPtr->notNullAttributeMask = save_mask;
+
+      if (unlikely(res == -1))
+      {
+        return -1;
+      }
+
+      regOperPtr.p->op_struct.op_type = ZREFRESH;
+    }
+    else
+    {
+      jam();
+      refresh_case = Operationrec::RF_MULTI_EXIST;
+      //ndbout_c("case 4");
+      /**
+       * This is multi-update + INSERT/UPDATE + REFRESH
+       */
+      res = handleUpdateReq(signal, regOperPtr.p, regFragPtr.p,
+                            regTabPtr, req_struct, disk);
+    }
+    req_struct->prevOpPtr.p->tupVersion = tup_version_save;
+    if (res == -1)
+      return -1;
+  }
+
+  /* Store the refresh scenario in the copy tuple location */
+  // TODO : Verify this is never used as a copy tuple location!
+  regOperPtr.p->m_copy_tuple_location.m_file_no = refresh_case;
+  return 0;
+}
+
 bool
 Dbtup::checkNullAttributes(KeyReqStruct * req_struct,
                            Tablerec* regTabPtr)
@@ -2209,6 +2487,28 @@ int Dbtup::interpreterStartLab(Signal* s
 	return -1;
       }
     }
+
+    if ((RlogSize > 0) ||
+        (RfinalUpdateLen > 0))
+    {
+      /* Operation updates row,
+       * reset author pseudo-col before update takes effect
+       * This should probably occur only if the interpreted program
+       * did not explicitly write the value, but that requires a bit
+       * to record whether the value has been written.
+       */
+      Tablerec* regTabPtr = req_struct->tablePtrP;
+      Tuple_header* dst = req_struct->m_tuple_ptr;
+
+      if (regTabPtr->m_bits & Tablerec::TR_ExtraRowAuthorBits)
+      {
+        Uint32 attrId =
+          regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowAuthorBits>();
+
+        store_extra_row_bits(attrId, regTabPtr, dst, /* default */ 0, false);
+      }
+    }
+
     if (RfinalUpdateLen > 0) {
       jam();
       /* ---------------------------------------------------------------- */

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-05-07 06:17:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-05-17 23:29:55 +0000
@@ -51,6 +51,8 @@ void Dbtup::initData()
 
   cCopyProcedure = RNIL;
   cCopyLastSeg = RNIL;
+  cCopyOverwrite = 0;
+  cCopyOverwriteLen = 0;
 
   // Records with constant sizes
   init_list_sizes();

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-05-07 06:17:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-05-17 23:29:55 +0000
@@ -76,6 +76,9 @@ Dbtup::execCREATE_TAB_REQ(Signal* signal
   memset(fragOperPtr.p->m_null_bits, 0, sizeof(fragOperPtr.p->m_null_bits));
   fragOperPtr.p->charsetIndex = 0;
   fragOperPtr.p->lqhBlockrefFrag = req->senderRef;
+  fragOperPtr.p->m_extra_row_gci_bits =
+    req->GCPIndicator > 1 ? req->GCPIndicator - 1 : 0;
+  fragOperPtr.p->m_extra_row_author_bits = req->extraRowAuthorBits;
 
   regTabPtr.p->m_createTable.m_fragOpPtrI = fragOperPtr.i;
   regTabPtr.p->m_createTable.defValSectionI = RNIL;
@@ -84,6 +87,9 @@ Dbtup::execCREATE_TAB_REQ(Signal* signal
   regTabPtr.p->m_bits |= (req->checksumIndicator ? Tablerec::TR_Checksum : 0);
   regTabPtr.p->m_bits |= (req->GCPIndicator ? Tablerec::TR_RowGCI : 0);
   regTabPtr.p->m_bits |= (req->forceVarPartFlag? Tablerec::TR_ForceVarPart : 0);
+  regTabPtr.p->m_bits |=
+    (req->GCPIndicator > 1 ? Tablerec::TR_ExtraRowGCIBits : 0);
+  regTabPtr.p->m_bits |= (req->extraRowAuthorBits ? Tablerec::TR_ExtraRowAuthorBits : 0);
 
   regTabPtr.p->m_offsets[MM].m_disk_ref_offset= 0;
   regTabPtr.p->m_offsets[MM].m_null_words= 0;
@@ -118,12 +124,26 @@ Dbtup::execCREATE_TAB_REQ(Signal* signal
   regTabPtr.p->m_no_of_attributes= req->noOfAttributes;
   regTabPtr.p->dynTabDescriptor[MM]= RNIL;
   regTabPtr.p->dynTabDescriptor[DD]= RNIL;
+  regTabPtr.p->m_no_of_extra_columns = 0;
+
+  if (regTabPtr.p->m_bits & Tablerec::TR_ExtraRowGCIBits)
+  {
+    jam();
+    regTabPtr.p->m_no_of_extra_columns++;
+  }
+
+  if (regTabPtr.p->m_bits & Tablerec::TR_ExtraRowAuthorBits)
+  {
+    jam();
+    regTabPtr.p->m_no_of_extra_columns++;
+  }
 
   {
     Uint32 offset[10];
     Uint32 allocSize= getTabDescrOffsets(req->noOfAttributes,
                                          req->noOfCharsets,
                                          req->noOfKeyAttr,
+                                         regTabPtr.p->m_no_of_extra_columns,
                                          offset);
     Uint32 tableDescriptorRef= allocTabDescr(allocSize);
     if (tableDescriptorRef == RNIL)
@@ -178,6 +198,8 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal*
   fragOperPtr.p->attributeCount--;
   const bool lastAttr = (fragOperPtr.p->attributeCount == 0);
 
+  Uint32 extraAttrId = 0;
+
   Uint32 firstTabDesIndex= regTabPtr.p->tabDescriptor + (attrId * ZAD_SIZE);
   setTabDescrWord(firstTabDesIndex, attrDescriptor);
   Uint32 attrLen = AttributeDescriptor::getSize(attrDescriptor);
@@ -321,7 +343,71 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal*
 	       signal, 2, JBB);
     return;
   }
-  
+
+  if (fragOperPtr.p->m_extra_row_gci_bits)
+  {
+    jam();
+
+    const Uint32 bits = fragOperPtr.p->m_extra_row_gci_bits;
+
+    /**
+     * Create attribute descriptor for extra row gci bits...
+     */
+    Uint32 desc = 0;
+    Uint32 off = 0;
+
+    AttributeDescriptor::setSize(desc, 0); // bit
+    AttributeDescriptor::setArraySize(desc, bits);
+    AttributeOffset::setNullFlagPos(off, fragOperPtr.p->m_null_bits[MM]);
+    fragOperPtr.p->m_null_bits[MM] += bits;
+
+    if (fragOperPtr.p->m_null_bits[MM] > AO_NULL_FLAG_POS_MASK)
+    {
+      jam();
+      terrorCode = ZTOO_MANY_BITS_ERROR;
+      goto error;
+    }
+
+    Uint32 idx = regTabPtr.p->tabDescriptor;
+    idx += ZAD_SIZE * (regTabPtr.p->m_no_of_attributes + extraAttrId);
+    setTabDescrWord(idx, desc);
+    setTabDescrWord(idx + 1, off);
+
+    extraAttrId++;
+  }
+
+  if (fragOperPtr.p->m_extra_row_author_bits)
+  {
+    jam();
+
+    const Uint32 bits = fragOperPtr.p->m_extra_row_author_bits;
+
+    /**
+     * Create attribute descriptor for extra row gci bits...
+     */
+    Uint32 desc = 0;
+    Uint32 off = 0;
+
+    AttributeDescriptor::setSize(desc, 0); // bit
+    AttributeDescriptor::setArraySize(desc, bits);
+    AttributeOffset::setNullFlagPos(off, fragOperPtr.p->m_null_bits[MM]);
+    fragOperPtr.p->m_null_bits[MM] += bits;
+
+    if (fragOperPtr.p->m_null_bits[MM] > AO_NULL_FLAG_POS_MASK)
+    {
+      jam();
+      terrorCode = ZTOO_MANY_BITS_ERROR;
+      goto error;
+    }
+
+    Uint32 idx = regTabPtr.p->tabDescriptor;
+    idx += ZAD_SIZE * (regTabPtr.p->m_no_of_attributes + extraAttrId);
+    setTabDescrWord(idx, desc);
+    setTabDescrWord(idx + 1, off);
+
+    extraAttrId++;
+  }
+
 #define BTW(x) ((x+31) >> 5)
   regTabPtr.p->m_offsets[MM].m_null_words= BTW(fragOperPtr.p->m_null_bits[MM]);
   regTabPtr.p->m_offsets[DD].m_null_words= BTW(fragOperPtr.p->m_null_bits[DD]);
@@ -1027,6 +1113,7 @@ Dbtup::handleAlterTablePrepare(Signal *s
     /* Allocate a new (possibly larger) table descriptor buffer. */
     Uint32 allocSize= getTabDescrOffsets(newNoOfAttr, newNoOfCharsets,
                                          newNoOfKeyAttrs,
+                                         regTabPtr->m_no_of_extra_columns,
                                          regAlterTabOpPtr.p->tabDesOffset);
     Uint32 tableDescriptorRef= allocTabDescr(allocSize);
     if (tableDescriptorRef == RNIL) {
@@ -1047,11 +1134,24 @@ Dbtup::handleAlterTablePrepare(Signal *s
       (CHARSET_INFO**)(desc + regAlterTabOpPtr.p->tabDesOffset[2]);
     memcpy(CharsetArray, regTabPtr->charsetArray,
            sizeof(*CharsetArray)*regTabPtr->noOfCharsets);
-    Uint32 *attrDesPtr= desc + regAlterTabOpPtr.p->tabDesOffset[4];
+    Uint32 * const attrDesPtrStart = desc + regAlterTabOpPtr.p->tabDesOffset[4];
+    Uint32 * attrDesPtr = attrDesPtrStart;
     memcpy(attrDesPtr,
            &tableDescriptor[regTabPtr->tabDescriptor].tabDescr,
-           (ZAD_SIZE<<2)*oldNoOfAttr);
-    attrDesPtr+= ZAD_SIZE*oldNoOfAttr;
+           4 * ZAD_SIZE * oldNoOfAttr);
+
+    /**
+     * Copy extra columns descriptors to end of attrDesPtr
+     */
+    {
+      const Uint32 * src = &tableDescriptor[regTabPtr->tabDescriptor].tabDescr;
+      src += ZAD_SIZE * oldNoOfAttr;
+
+      Uint32 * dst = attrDesPtr + (ZAD_SIZE * newNoOfAttr);
+      memcpy(dst, src, 4 * ZAD_SIZE * regTabPtr->m_no_of_extra_columns);
+    }
+
+    attrDesPtr+= ZAD_SIZE * oldNoOfAttr;
 
     /*
       Loop over the new attributes to add.
@@ -1122,6 +1222,7 @@ Dbtup::handleAlterTablePrepare(Signal *s
       *attrDesPtr++= attrDes2;
     }
     ndbassert(newNoOfCharsets==charsetIndex);
+    ndbrequire(attrDesPtr == attrDesPtrStart + (ZAD_SIZE * newNoOfAttr));
 
     regAlterTabOpPtr.p->noOfDynNullBits= dyn_nullbits;
     ndbassert(noDynamic ==
@@ -1841,6 +1942,7 @@ void Dbtup::releaseTabDescr(Tablerec* co
     getTabDescrOffsets(regTabPtr->m_no_of_attributes,
                        regTabPtr->noOfCharsets,
                        regTabPtr->noOfKeyAttr,
+                       regTabPtr->m_no_of_extra_columns,
                        offset);
 
     regTabPtr->tabDescriptor= RNIL;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp	2011-05-25 15:03:11 +0000
@@ -25,6 +25,7 @@
 #include <AttributeDescriptor.hpp>
 #include "AttributeOffset.hpp"
 #include <AttributeHeader.hpp>
+#include <dblqh/Dblqh.hpp>
 
 void
 Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
@@ -1743,6 +1744,63 @@ int Dbtup::updateAttributes(KeyReqStruct
       inBufIndex += 1 + sz;
       req_struct->in_buf_index = inBufIndex;
     }
+    else if (attributeId == AttributeHeader::ROW_AUTHOR)
+    {
+      jam();
+      Uint32 sz= ahIn.getDataSize();
+      ndbrequire(sz == 1);
+
+      Uint32 value = * (inBuffer + inBufIndex + 1);
+      Uint32 attrId =
+        regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowAuthorBits>();
+
+      if (unlikely(!(regTabPtr->m_bits & Tablerec::TR_ExtraRowAuthorBits)))
+      {
+        return -ZATTRIBUTE_ID_ERROR;
+      }
+
+      if (unlikely(store_extra_row_bits(attrId, regTabPtr,
+                                        req_struct->m_tuple_ptr,
+                                        value, /* truncate */ false) == false))
+      {
+        return -ZAI_INCONSISTENCY_ERROR;
+      }
+      inBufIndex += 1 + sz;
+      req_struct->in_buf_index = inBufIndex;
+    }
+    else if (attributeId == AttributeHeader::ROW_GCI64)
+    {
+      jam();
+      Uint32 sz= ahIn.getDataSize();
+      ndbrequire(sz == 2);
+      Uint32 attrId =
+        regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowGCIBits>();
+      Uint32 gciLo = * (inBuffer + inBufIndex + 1);
+      Uint32 gciHi = * (inBuffer + inBufIndex + 2);
+
+      if (unlikely(!(regTabPtr->m_bits & Tablerec::TR_RowGCI)))
+      {
+        return -ZATTRIBUTE_ID_ERROR;
+      }
+
+      /* Record that GCI has been set explicitly */
+      regOperPtr->op_struct.m_gci_written = 1;
+
+      *req_struct->m_tuple_ptr->get_mm_gci(regTabPtr) = gciHi;
+
+      if (regTabPtr->m_bits & Tablerec::TR_ExtraRowGCIBits)
+      {
+        if (unlikely(store_extra_row_bits(attrId, regTabPtr,
+                                          req_struct->m_tuple_ptr,
+                                          gciLo, /*truncate*/ true) == false))
+        {
+          return -ZAI_INCONSISTENCY_ERROR;
+        }
+      }
+
+      inBufIndex+= 1 + sz;
+      req_struct->in_buf_index = inBufIndex;
+    }
     else
     {
       jam();
@@ -2520,6 +2578,48 @@ Dbtup::read_pseudo(const Uint32 * inBuff
       sz = 2;
     }
     break;
+  case AttributeHeader::ROW_GCI64:
+  {
+    sz = 0;
+    if (req_struct->tablePtrP->m_bits & Tablerec::TR_RowGCI)
+    {
+      Uint32 tmp0 = *req_struct->m_tuple_ptr->get_mm_gci(req_struct->tablePtrP);
+      Uint32 tmp1 = ~Uint32(0);
+      if (req_struct->tablePtrP->m_bits & Tablerec::TR_ExtraRowGCIBits)
+      {
+        Uint32 attrId =
+          req_struct->tablePtrP->getExtraAttrId<Tablerec::TR_ExtraRowGCIBits>();
+        read_extra_row_bits(attrId,
+                            req_struct->tablePtrP,
+                            req_struct->m_tuple_ptr,
+                            &tmp1,
+                            /* extend */ true);
+      }
+      Uint64 tmp = Uint64(tmp0) << 32 | tmp1;
+      memcpy(outBuffer + 1, &tmp, sizeof(tmp));
+      sz = 2;
+    }
+    break;
+  }
+  case AttributeHeader::ROW_AUTHOR:
+  {
+    sz = 0;
+    if (req_struct->tablePtrP->m_bits & Tablerec::TR_ExtraRowAuthorBits)
+    {
+      Uint32 attrId = req_struct->tablePtrP
+        ->getExtraAttrId<Tablerec::TR_ExtraRowAuthorBits>();
+
+      Uint32 tmp;
+      read_extra_row_bits(attrId,
+                          req_struct->tablePtrP,
+                          req_struct->m_tuple_ptr,
+                          &tmp,
+                          /* extend */ false);
+      outBuffer[1] = tmp;
+      sz = 1;
+    }
+    break;
+  }
   case AttributeHeader::ANY_VALUE:
   {
     /**
@@ -3444,3 +3544,85 @@ Dbtup::read_lcp_keys(Uint32 tableId,
 
   return ret;
 }
+
+bool
+Dbtup::store_extra_row_bits(Uint32 extra_no,
+                            const Tablerec* regTabPtr,
+                            Tuple_header* ptr,
+                            Uint32 value,
+                            bool truncate)
+{
+  jam();
+  if (unlikely(extra_no >= regTabPtr->m_no_of_extra_columns))
+    return false;
+  /**
+   * ExtraRowGCIBits are using regTabPtr->m_no_of_attributes + extra_no
+   */
+  Uint32 num_attr= regTabPtr->m_no_of_attributes;
+  Uint32 attrId = num_attr + extra_no;
+  Uint32 descr_start = regTabPtr->tabDescriptor;
+  TableDescriptor *tab_descr = &tableDescriptor[descr_start];
+  ndbrequire(descr_start + (attrId << ZAD_LOG_SIZE)<= cnoOfTabDescrRec);
+
+  Uint32 attrDescriptorIndex = attrId << ZAD_LOG_SIZE;
+  Uint32 attrDescriptor = tab_descr[attrDescriptorIndex].tabDescr;
+  Uint32 attrOffset = tab_descr[attrDescriptorIndex + 1].tabDescr;
+
+  Uint32 pos = AttributeOffset::getNullFlagPos(attrOffset);
+  Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+  Uint32 maxVal = (1 << bitCount) - 1;
+  Uint32 *bits= ptr->get_null_bits(regTabPtr);
+
+  if (value > maxVal)
+  {
+    if (truncate)
+    {
+      value = maxVal;
+    }
+    else
+    {
+      return false;
+    }
+  }
+
+  Uint32 check = regTabPtr->m_offsets[MM].m_null_words;
+  BitmaskImpl::setField(check, bits, pos, bitCount, &value);
+  return true;
+}
+
+void
+Dbtup::read_extra_row_bits(Uint32 extra_no,
+                           const Tablerec* regTabPtr,
+                           Tuple_header* ptr,
+                           Uint32 * value,
+                           bool extend)
+{
+  /**
+   * ExtraRowGCIBits are using regTabPtr->m_no_of_attributes + extra_no
+   */
+  ndbrequire(extra_no < regTabPtr->m_no_of_extra_columns);
+  Uint32 num_attr= regTabPtr->m_no_of_attributes;
+  Uint32 attrId = num_attr + extra_no;
+  Uint32 descr_start = regTabPtr->tabDescriptor;
+  TableDescriptor *tab_descr = &tableDescriptor[descr_start];
+  ndbrequire(descr_start + (attrId << ZAD_LOG_SIZE)<= cnoOfTabDescrRec);
+
+  Uint32 attrDescriptorIndex = attrId << ZAD_LOG_SIZE;
+  Uint32 attrDescriptor = tab_descr[attrDescriptorIndex].tabDescr;
+  Uint32 attrOffset = tab_descr[attrDescriptorIndex + 1].tabDescr;
+
+  Uint32 pos = AttributeOffset::getNullFlagPos(attrOffset);
+  Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+  Uint32 maxVal = (1 << bitCount) - 1;
+  Uint32 *bits= ptr->get_null_bits(regTabPtr);
+
+  Uint32 tmp;
+  Uint32 check = regTabPtr->m_offsets[MM].m_null_words;
+  BitmaskImpl::getField(check, bits, pos, bitCount, &tmp);
+
+  if (tmp == maxVal && extend)
+  {
+    tmp = ~Uint32(0);
+  }
+  * value = tmp;
+}

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp	2011-02-08 14:45:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp	2011-05-17 23:29:55 +0000
@@ -189,8 +189,15 @@ void Dbtup::allocCopyProcedure()
     ndbrequire(appendToSection(iVal, &ahWord, 1));
   }
 
+  /* Add space for extra attrs */
+  ahWord = 0;
+  for (Uint32 extra=0; extra < EXTRA_COPY_PROC_WORDS; extra++)
+    ndbrequire(appendToSection(iVal, &ahWord, 1));
+
   cCopyProcedure= iVal;
   cCopyLastSeg= RNIL;
+  cCopyOverwrite= 0;
+  cCopyOverwriteLen= 0;
 }
 
 void Dbtup::freeCopyProcedure()
@@ -201,7 +208,8 @@ void Dbtup::freeCopyProcedure()
   cCopyProcedure=RNIL;
 }
 
-void Dbtup::prepareCopyProcedure(Uint32 numAttrs)
+void Dbtup::prepareCopyProcedure(Uint32 numAttrs,
+                                 Uint16 tableBits)
 {
   /* Set length of copy procedure section to the
    * number of attributes supplied
@@ -209,22 +217,49 @@ void Dbtup::prepareCopyProcedure(Uint32
   ndbassert(numAttrs <= MAX_ATTRIBUTES_IN_TABLE);
   ndbassert(cCopyProcedure != RNIL);
   ndbassert(cCopyLastSeg == RNIL);
+  ndbassert(cCopyOverwrite == 0);
+  ndbassert(cCopyOverwriteLen == 0);
   Ptr<SectionSegment> first;
   g_sectionSegmentPool.getPtr(first, cCopyProcedure);
 
   /* Record original 'last segment' of section */
   cCopyLastSeg= first.p->m_lastSegment;
 
+  /* Check table bits to see if we need to do extra reads */
+  Uint32 extraAttrIds[EXTRA_COPY_PROC_WORDS];
+  Uint32 extraReads = 0;
+
+  if (tableBits & Tablerec::TR_ExtraRowGCIBits)
+  {
+    AttributeHeader ah(AttributeHeader::ROW_GCI64,0);
+    extraAttrIds[extraReads++] = ah.m_value;
+  }
+  if (tableBits & Tablerec::TR_ExtraRowAuthorBits)
+  {
+    AttributeHeader ah(AttributeHeader::ROW_AUTHOR,0);
+    extraAttrIds[extraReads++] = ah.m_value;
+  }
+
   /* Modify section to represent relevant prefix 
    * of code by modifying size and lastSegment
    */
-  first.p->m_sz= numAttrs;
+  Uint32 newSize = numAttrs + extraReads;
+  first.p->m_sz= newSize;
 
+  if (extraReads)
+  {
+    cCopyOverwrite= numAttrs;
+    cCopyOverwriteLen = extraReads;
+
+    ndbrequire(writeToSection(first.i, numAttrs, extraAttrIds, extraReads));
+  }
+
+   /* Trim section size and lastSegment */
   Ptr<SectionSegment> curr= first;  
-  while(numAttrs > SectionSegment::DataLength)
+  while(newSize > SectionSegment::DataLength)
   {
     g_sectionSegmentPool.getPtr(curr, curr.p->m_nextSegment);
-    numAttrs-= SectionSegment::DataLength;
+    newSize-= SectionSegment::DataLength;
   }
   first.p->m_lastSegment= curr.i;
 }
@@ -238,10 +273,24 @@ void Dbtup::releaseCopyProcedure()
   Ptr<SectionSegment> first;
   g_sectionSegmentPool.getPtr(first, cCopyProcedure);
   
-  ndbassert(first.p->m_sz <= MAX_ATTRIBUTES_IN_TABLE);
-  first.p->m_sz= MAX_ATTRIBUTES_IN_TABLE;
+  ndbassert(first.p->m_sz <= MAX_COPY_PROC_LEN);
+  first.p->m_sz= MAX_COPY_PROC_LEN;
   first.p->m_lastSegment= cCopyLastSeg;
   
+  if (cCopyOverwriteLen)
+  {
+    ndbassert(cCopyOverwriteLen <= EXTRA_COPY_PROC_WORDS);
+    Uint32 attrids[EXTRA_COPY_PROC_WORDS];
+    for (Uint32 i=0; i < cCopyOverwriteLen; i++)
+    {
+      AttributeHeader ah(cCopyOverwrite + i, 0);
+      attrids[i] = ah.m_value;
+    }
+    ndbrequire(writeToSection(first.i, cCopyOverwrite, attrids, cCopyOverwriteLen));
+    cCopyOverwriteLen= 0;
+    cCopyOverwrite= 0;
+  }
+
   cCopyLastSeg= RNIL;
 }
   
@@ -257,8 +306,13 @@ void Dbtup::copyProcedure(Signal* signal
    * This assumes that there is only one fragment copy going
    * on at any time, which is verified by checking 
    * cCopyLastSeg == RNIL before starting each copy
+   *
+   * If the table has extra per-row metainformation that
+   * needs copied then we add that to the copy procedure
+   * as well.
    */
-  prepareCopyProcedure(regTabPtr.p->m_no_of_attributes);
+  prepareCopyProcedure(regTabPtr.p->m_no_of_attributes,
+                       regTabPtr.p->m_bits);
 
   SectionHandle handle(this);
   handle.m_cnt=1;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp	2011-04-19 15:59:06 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp	2011-05-17 23:29:55 +0000
@@ -38,8 +38,11 @@
  */
 
 Uint32
-Dbtup::getTabDescrOffsets(Uint32 noOfAttrs, Uint32 noOfCharsets,
-                          Uint32 noOfKeyAttr, Uint32* offset)
+Dbtup::getTabDescrOffsets(Uint32 noOfAttrs,
+                          Uint32 noOfCharsets,
+                          Uint32 noOfKeyAttr,
+                          Uint32 extraColumns,
+                          Uint32* offset)
 {
   // belongs to configure.in
   unsigned sizeOfPointer = sizeof(CHARSET_INFO*);
@@ -53,7 +56,7 @@ Dbtup::getTabDescrOffsets(Uint32 noOfAtt
   offset[2] = allocSize += noOfAttrs * sizeOfReadFunction();
   offset[3] = allocSize += noOfCharsets * sizeOfPointer;
   offset[4] = allocSize += noOfKeyAttr;
-  offset[5] = allocSize += noOfAttrs * ZAD_SIZE;
+  offset[5] = allocSize += (noOfAttrs + extraColumns) * ZAD_SIZE;
   offset[6] = allocSize += (noOfAttrs+1) >> 1;  // real order
   allocSize += ZTD_TRAILER_SIZE;
   // return number of words
@@ -322,6 +325,7 @@ Dbtup::verifytabdes()
         const Uint32 alloc = getTabDescrOffsets(ptr.p->m_no_of_attributes,
                                                 ptr.p->noOfCharsets,
                                                 ptr.p->noOfKeyAttr,
+                                                ptr.p->m_no_of_extra_columns,
                                                 offset);
         const Uint32 desc = ptr.p->readKeyArray - offset[3];
         Uint32 size = alloc;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2011-05-25 13:19:02 +0000
@@ -863,6 +863,7 @@ void Dbtup::checkDetachedTriggers(KeyReq
   switch (save_type) {
   case ZUPDATE:
   case ZINSERT:
+  case ZREFRESH:
     req_struct->m_tuple_ptr =get_copy_tuple(&regOperPtr->m_copy_tuple_location);
     break;
   }
@@ -878,7 +879,10 @@ void Dbtup::checkDetachedTriggers(KeyReq
       return;
       goto end;
     }
-    regOperPtr->op_struct.op_type = ZINSERT;
+    else if (save_type != ZREFRESH)
+    {
+      regOperPtr->op_struct.op_type = ZINSERT;
+    }
   }
   else if (save_type == ZINSERT) {
     /**
@@ -930,6 +934,29 @@ void Dbtup::checkDetachedTriggers(KeyReq
                          regTablePtr->subscriptionUpdateTriggers, 
                          regOperPtr, disk);
     break;
+  case ZREFRESH:
+    jam();
+    /* Depending on the Refresh scenario, fire Delete or Insert
+     * triggers to simulate the effect of arriving at the tuple's
+     * current state.
+     */
+    switch(regOperPtr->m_copy_tuple_location.m_file_no){
+    case Operationrec::RF_SINGLE_NOT_EXIST:
+    case Operationrec::RF_MULTI_NOT_EXIST:
+      fireDetachedTriggers(req_struct,
+                           regTablePtr->subscriptionDeleteTriggers,
+                           regOperPtr, disk);
+      break;
+    case Operationrec::RF_SINGLE_EXIST:
+    case Operationrec::RF_MULTI_EXIST:
+      fireDetachedTriggers(req_struct,
+                           regTablePtr->subscriptionInsertTriggers,
+                           regOperPtr, disk);
+      break;
+    default:
+      ndbrequire(false);
+    }
+    break;
   default:
     ndbrequire(false);
     break;
@@ -1375,12 +1402,14 @@ out:
 
     switch(regOperPtr->op_struct.op_type) {
     case(ZINSERT):
+    is_insert:
       jam();
       // Send AttrInfo signals with new attribute values
       trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
       sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
       break;
     case(ZDELETE):
+    is_delete:
       if (trigPtr->sendBeforeValues) {
         jam();
         trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
@@ -1397,6 +1426,23 @@ out:
       trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
       sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
       break;
+    case ZREFRESH:
+      jam();
+      /* Reuse Insert/Delete trigger firing code as necessary */
+      switch(regOperPtr->m_copy_tuple_location.m_file_no){
+      case Operationrec::RF_SINGLE_NOT_EXIST:
+        jam();
+      case Operationrec::RF_MULTI_NOT_EXIST:
+        jam();
+        goto is_delete;
+      case Operationrec::RF_SINGLE_EXIST:
+        jam();
+      case Operationrec::RF_MULTI_EXIST:
+        jam();
+        goto is_insert;
+      default:
+        ndbrequire(false);
+      }
     default:
       ndbrequire(false);
     }
@@ -1424,6 +1470,25 @@ out:
     jam();
     fireTrigOrd->m_triggerEvent = TriggerEvent::TE_DELETE;
     break;
+  case ZREFRESH:
+    jam();
+    switch(regOperPtr->m_copy_tuple_location.m_file_no){
+    case Operationrec::RF_SINGLE_NOT_EXIST:
+      jam();
+    case Operationrec::RF_MULTI_NOT_EXIST:
+      jam();
+      fireTrigOrd->m_triggerEvent = TriggerEvent::TE_DELETE;
+      break;
+    case Operationrec::RF_SINGLE_EXIST:
+      jam();
+    case Operationrec::RF_MULTI_EXIST:
+      jam();
+      fireTrigOrd->m_triggerEvent = TriggerEvent::TE_INSERT;
+      break;
+    default:
+      ndbrequire(false);
+    }
+    break;
   default:
     ndbrequire(false);
     break;
@@ -1615,7 +1680,7 @@ bool Dbtup::readTriggerInfo(TupTriggerDa
 // Delete without sending before values only read Primary Key
 //--------------------------------------------------------------------
     return true;
-  } else {
+  } else if (regOperPtr->op_struct.op_type != ZREFRESH){
     jam();
 //--------------------------------------------------------------------
 // All others send all attributes that are monitored, except:
@@ -1632,6 +1697,27 @@ bool Dbtup::readTriggerInfo(TupTriggerDa
     numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes,
                                 &readBuffer[0]);
   }
+  else
+  {
+    jam();
+    ndbassert(regOperPtr->op_struct.op_type == ZREFRESH);
+    /* Refresh specific before/after value hacks */
+    switch(regOperPtr->m_copy_tuple_location.m_file_no){
+    case Operationrec::RF_SINGLE_NOT_EXIST:
+    case Operationrec::RF_MULTI_NOT_EXIST:
+      return true; // generate ZDELETE...no before values
+    case Operationrec::RF_SINGLE_EXIST:
+    case Operationrec::RF_MULTI_EXIST:
+      // generate ZINSERT...all after values
+      numAttrsToRead = setAttrIds(trigPtr->attributeMask,
+                                  regTabPtr->m_no_of_attributes,
+                                  &readBuffer[0]);
+      break;
+    default:
+      ndbrequire(false);
+    }
+  }
+
   ndbrequire(numAttrsToRead <= MAX_ATTRIBUTES_IN_TABLE);
 //--------------------------------------------------------------------
 // Read Main tuple values
@@ -1875,6 +1961,9 @@ Dbtup::executeTuxCommitTriggers(Signal*
       return;
     jam();
     tupVersion= regOperPtr->tupVersion;
+  } else if (regOperPtr->op_struct.op_type == ZREFRESH) {
+    /* Refresh should not affect TUX */
+    return;
   } else {
     ndbrequire(false);
     tupVersion= 0; // remove warning
@@ -1907,6 +1996,10 @@ Dbtup::executeTuxAbortTriggers(Signal* s
   } else if (regOperPtr->op_struct.op_type == ZDELETE) {
     jam();
     return;
+  } else if (regOperPtr->op_struct.op_type == ZREFRESH) {
+    jam();
+    /* Refresh should not affect TUX */
+    return;
   } else {
     ndbrequire(false);
     tupVersion= 0; // remove warning

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.cpp	2011-05-23 10:38:41 +0000
@@ -30,17 +30,17 @@
 #include <EventLogger.hpp>
 extern EventLogger * g_eventLogger;
 
-AsyncIoThread::AsyncIoThread(class Ndbfs& fs, AsyncFile* file)
+AsyncIoThread::AsyncIoThread(class Ndbfs& fs, bool bound)
   : m_fs(fs)
 {
-  m_current_file = file;
-  if (file)
+  m_current_file = 0;
+  if (bound)
   {
-    theMemoryChannelPtr = &theMemoryChannel;
+    theMemoryChannelPtr = &m_fs.theToBoundThreads;
   }
   else
   {
-    theMemoryChannelPtr = &m_fs.theToThreads;
+    theMemoryChannelPtr = &m_fs.theToUnboundThreads;
   }
   theReportTo = &m_fs.theFromThreads;
 }
@@ -149,13 +149,17 @@ AsyncIoThread::run()
     switch (request->action) {
     case Request::open:
       file->openReq(request);
+      if (request->error == 0 && request->m_do_bind)
+        attach(file);
       break;
     case Request::close:
       file->closeReq(request);
+      detach(file);
       break;
     case Request::closeRemove:
       file->closeReq(request);
       file->removeReq(request);
+      detach(file);
       break;
     case Request::readPartial:
     case Request::read:
@@ -265,3 +269,32 @@ AsyncIoThread::buildIndxReq(Request* req
   req.buffer_size = request->file->m_page_cnt * sizeof(GlobalPage);
   request->error = (* req.func_ptr)(&req);
 }
+
+void
+AsyncIoThread::attach(AsyncFile* file)
+{
+  assert(m_current_file == 0);
+  assert(theMemoryChannelPtr == &m_fs.theToBoundThreads);
+  m_current_file = file;
+  theMemoryChannelPtr = &theMemoryChannel;
+  file->attach(this);
+  m_fs.cnt_active_bound(1);
+}
+
+void
+AsyncIoThread::detach(AsyncFile* file)
+{
+  if (m_current_file == 0)
+  {
+    assert(file->getThread() == 0);
+  }
+  else
+  {
+    assert(m_current_file == file);
+    assert(theMemoryChannelPtr = &theMemoryChannel);
+    m_current_file = 0;
+    theMemoryChannelPtr = &m_fs.theToBoundThreads;
+    file->detach(this);
+    m_fs.cnt_active_bound(-1);
+  }
+}

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2011-04-21 09:21:18 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2011-05-23 10:38:41 +0000
@@ -43,6 +43,8 @@ class Request
 public:
   Request() {}
 
+  void atGet() { m_do_bind = false; }
+
   enum Action {
     open,
     close,
@@ -113,6 +115,7 @@ public:
    // Information for open, needed if the first open action fails.
   AsyncFile* file;
   Uint32 theTrace;
+  bool m_do_bind;
 
   MemoryChannel<Request>::ListMember m_mem_channel;
 };
@@ -134,7 +137,7 @@ class AsyncIoThread
   friend class Ndbfs;
   friend class AsyncFile;
 public:
-  AsyncIoThread(class Ndbfs&, AsyncFile* file);
+  AsyncIoThread(class Ndbfs&, bool bound);
   virtual ~AsyncIoThread() {};
 
   struct NdbThread* doStart();
@@ -174,6 +177,8 @@ private:
    */
   void buildIndxReq(Request*);
 
+  void attach(AsyncFile*);
+  void detach(AsyncFile*);
 };
 
 #endif

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2011-04-21 09:21:18 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2011-05-23 10:38:41 +0000
@@ -45,6 +45,8 @@
 #include <EventLogger.hpp>
 extern EventLogger * g_eventLogger;
 
+NdbMutex g_active_bound_threads_mutex;
+
 inline
 int pageSize( const NewVARIABLE* baseAddrRef )
 {
@@ -62,10 +64,15 @@ Ndbfs::Ndbfs(Block_context& ctx) :
   scanningInProgress(false),
   theLastId(0),
   theRequestPool(0),
-  m_maxOpenedFiles(0)
+  m_maxOpenedFiles(0),
+  m_bound_threads_cnt(0),
+  m_unbounds_threads_cnt(0),
+  m_active_bound_threads_cnt(0)
 {
   BLOCK_CONSTRUCTOR(Ndbfs);
 
+  NdbMutex_Init(&g_active_bound_threads_mutex);
+
   // Set received signals
   addRecSignal(GSN_READ_CONFIG_REQ, &Ndbfs::execREAD_CONFIG_REQ);
   addRecSignal(GSN_DUMP_STATE_ORD,  &Ndbfs::execDUMP_STATE_ORD);
@@ -100,7 +107,8 @@ Ndbfs::~Ndbfs()
   request.action = Request::end;
   for (unsigned i = 0; i < theThreads.size(); i++)
   {
-    theToThreads.writeChannel(&request);
+    theToBoundThreads.writeChannel(&request);
+    theToUnboundThreads.writeChannel(&request);
   }
 
   for (unsigned i = 0; i < theThreads.size(); i++)
@@ -274,7 +282,12 @@ Ndbfs::execREAD_CONFIG_REQ(Signal* signa
   // Create idle AsyncFiles
   for (Uint32 i = 0; i < noIdleFiles; i++)
   {
-    theIdleBoundFiles.push_back(createAsyncFile(true /* bound */));
+    theIdleFiles.push_back(createAsyncFile());
+    AsyncIoThread * thr = createIoThread(/* bound */ true);
+    if (thr)
+    {
+      theThreads.push_back(thr);
+    }
   }
 
   Uint32 threadpool = 2;
@@ -283,7 +296,7 @@ Ndbfs::execREAD_CONFIG_REQ(Signal* signa
   // Create IoThreads
   for (Uint32 i = 0; i < threadpool; i++)
   {
-    AsyncIoThread * thr = createIoThread(0);
+    AsyncIoThread * thr = createIoThread(/* bound */ false);
     if (thr)
     {
       jam();
@@ -339,7 +352,7 @@ Ndbfs::execSTTOR(Signal* signal)
   ndbrequire(0);
 }
 
-int 
+int
 Ndbfs::forward( AsyncFile * file, Request* request)
 {
   jam();
@@ -348,9 +361,13 @@ Ndbfs::forward( AsyncFile * file, Reques
   {
     thr->dispatch(request);
   }
+  else if (request->m_do_bind)
+  {
+    theToBoundThreads.writeChannel(request);
+  }
   else
   {
-    theToThreads.writeChannel(request);
+    theToUnboundThreads.writeChannel(request);
   }
   return 1;
 }
@@ -444,7 +461,8 @@ Ndbfs::execFSOPENREQ(Signal* signal)
   request->par.open.file_size <<= 32;
   request->par.open.file_size |= fsOpenReq->file_size_lo;
   request->par.open.auto_sync_size = fsOpenReq->auto_sync_size;
-  
+  request->m_do_bind = bound;
+
   ndbrequire(forward(file, request));
 }
 
@@ -454,7 +472,8 @@ Ndbfs::execFSREMOVEREQ(Signal* signal)
   jamEntry();
   const FsRemoveReq * const req = (FsRemoveReq *)signal->getDataPtr();
   const BlockReference userRef = req->userReference;
-  AsyncFile* file = getIdleFile(true);
+  bool bound = true;
+  AsyncFile* file = getIdleFile(bound);
   ndbrequire(file != NULL);
 
   SectionHandle handle(this, signal);
@@ -479,7 +498,8 @@ Ndbfs::execFSREMOVEREQ(Signal* signal)
   request->set(userRef, req->userPointer, newId() );
   request->file = file;
   request->theTrace = signal->getTrace();
-  
+  request->m_do_bind = bound;
+
   if (version == 6)
   {
     ndbrequire(bp < NDB_ARRAY_SIZE(m_base_path));
@@ -541,6 +561,7 @@ Ndbfs::execFSCLOSEREQ(Signal * signal)
   request->file = openFile;
   request->error = 0;
   request->theTrace = signal->getTrace();
+  request->m_do_bind = false;
 
   ndbrequire(forward(openFile, request));
 }
@@ -584,6 +605,7 @@ Ndbfs::readWriteRequest(int action, Sign
   request->file = openFile;
   request->action = (Request::Action) action;
   request->theTrace = signal->getTrace();
+  request->m_do_bind = false;
 
   Uint32 format = fsRWReq->getFormatFlag(fsRWReq->operationFlag);
 
@@ -804,7 +826,8 @@ Ndbfs::execFSSYNCREQ(Signal * signal)
   request->set(userRef, userPointer, filePointer);
   request->file = openFile;
   request->theTrace = signal->getTrace();
-  
+  request->m_do_bind = false;
+
   ndbrequire(forward(openFile,request));
 }
 
@@ -832,6 +855,7 @@ Ndbfs::execFSSUSPENDORD(Signal * signal)
   request->file = openFile;
   request->theTrace = signal->getTrace();
   request->par.suspend.milliseconds = millis;
+  request->m_do_bind = false;
 
   ndbrequire(forward(openFile,request));
 }
@@ -895,6 +919,7 @@ Ndbfs::execFSAPPENDREQ(Signal * signal)
     request->action = Request::append;
   else
     request->action = Request::append_synch;
+  request->m_do_bind = false;
   ndbrequire(forward(openFile, request));
   return;
   
@@ -918,7 +943,8 @@ Ndbfs::execALLOC_MEM_REQ(Signal* signal)
 
   AllocMemReq* req = (AllocMemReq*)signal->getDataPtr();
 
-  AsyncFile* file = getIdleFile(true);
+  bool bound = true;
+  AsyncFile* file = getIdleFile(bound);
   ndbrequire(file != NULL);
 
   Request *request = theRequestPool->get();
@@ -932,6 +958,7 @@ Ndbfs::execALLOC_MEM_REQ(Signal* signal)
   request->par.alloc.requestInfo = req->requestInfo;
   request->par.alloc.bytes = (Uint64(req->bytes_hi) << 32) + req->bytes_lo;
   request->action = Request::allocmem;
+  request->m_do_bind = bound;
   ndbrequire(forward(file, request));
 }
 
@@ -943,7 +970,8 @@ Ndbfs::execBUILD_INDX_IMPL_REQ(Signal* s
   jamEntry();
   mt_BuildIndxReq * req = (mt_BuildIndxReq*)signal->getDataPtr();
 
-  AsyncFile* file = getIdleFile(true);
+  bool bound = true;
+  AsyncFile* file = getIdleFile(bound);
   ndbrequire(file != NULL);
 
   Request *request = theRequestPool->get();
@@ -972,6 +1000,7 @@ Ndbfs::execBUILD_INDX_IMPL_REQ(Signal* s
 
   memcpy(&request->par.build.m_req, req, sizeof(* req));
   request->action = Request::buildindx;
+  request->m_do_bind = bound;
   ndbrequire(forward(file, request));
 }
 
@@ -1000,8 +1029,8 @@ Ndbfs::newId()
 }
 
 AsyncFile*
-Ndbfs::createAsyncFile(bool bound){
-
+Ndbfs::createAsyncFile()
+{
   // Check limit of open files
   if (m_maxFiles !=0 && theFiles.size() ==  m_maxFiles)
   {
@@ -1024,42 +1053,35 @@ Ndbfs::createAsyncFile(bool bound){
     ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile");
   }
 
-  if (bound)
-  {
-    AsyncIoThread * thr = createIoThread(file);
-    theThreads.push_back(thr);
-    file->attach(thr);
-
-#ifdef VM_TRACE
-    ndbout_c("NDBFS: Created new file thread %d", theFiles.size());
-#endif
-  }
-
   theFiles.push_back(file);
-  
   return file;
 }
 
 void
 Ndbfs::pushIdleFile(AsyncFile* file)
 {
-  if (file->getThread())
-  {
-    theIdleBoundFiles.push_back(file);
-  }
-  else
-  {
-    theIdleUnboundFiles.push_back(file);
-  }
+  assert(file->getThread() == 0);
+  theIdleFiles.push_back(file);
 }
 
 AsyncIoThread*
-Ndbfs::createIoThread(AsyncFile* file)
+Ndbfs::createIoThread(bool bound)
 {
-  AsyncIoThread* thr = new AsyncIoThread(*this, file);
+  AsyncIoThread* thr = new AsyncIoThread(*this, bound);
+  if (thr)
+  {
+#ifdef VM_TRACE
+    ndbout_c("NDBFS: Created new file thread %d", theThreads.size());
+#endif
 
-  struct NdbThread* thrptr = thr->doStart();
-  globalEmulatorData.theConfiguration->addThread(thrptr, NdbfsThread);
+    struct NdbThread* thrptr = thr->doStart();
+    globalEmulatorData.theConfiguration->addThread(thrptr, NdbfsThread);
+
+    if (bound)
+      m_bound_threads_cnt++;
+    else
+      m_unbounds_threads_cnt++;
+  }
 
   return thr;
 }
@@ -1067,31 +1089,50 @@ Ndbfs::createIoThread(AsyncFile* file)
 AsyncFile*
 Ndbfs::getIdleFile(bool bound)
 {
-  if (bound)
+  AsyncFile* file = 0;
+  Uint32 sz = theIdleFiles.size();
+  if (sz)
   {
-    Uint32 sz = theIdleBoundFiles.size();
-    if (sz)
-    {
-      AsyncFile* file = theIdleBoundFiles[sz - 1];
-      theIdleBoundFiles.erase(sz - 1);
-      return file;
-    }
+    file = theIdleFiles[sz - 1];
+    theIdleFiles.erase(sz - 1);
   }
   else
   {
-    Uint32 sz = theIdleUnboundFiles.size();
-    if (sz)
+    file = createAsyncFile();
+  }
+
+  if (bound)
+  {
+    /**
+     * Check if we should create thread
+     */
+    if (m_active_bound_threads_cnt == m_bound_threads_cnt)
     {
-      AsyncFile* file = theIdleUnboundFiles[sz - 1];
-      theIdleUnboundFiles.erase(sz - 1);
-      return file;
+      AsyncIoThread * thr = createIoThread(true);
+      if (thr)
+      {
+        theThreads.push_back(thr);
+      }
     }
   }
-
-  return createAsyncFile(bound);
+  return file;
 }
 
-
+void
+Ndbfs::cnt_active_bound(int val)
+{
+  Guard g(&g_active_bound_threads_mutex);
+  if (val < 0)
+  {
+    val = -val;
+    assert(m_active_bound_threads_cnt >= (Uint32)val);
+    m_active_bound_threads_cnt -= val;
+  }
+  else
+  {
+    m_active_bound_threads_cnt += val;
+  }
+}
 
 void
 Ndbfs::report(Request * request, Signal* signal)
@@ -1506,10 +1547,13 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal
     infoEvent("NDBFS: Files: %d Open files: %d",
 	      theFiles.size(),
 	      theOpenFiles.size());
-    infoEvent(" Idle files: (bound: %u unbound: %u) Max opened files: %d",
-              theIdleBoundFiles.size(),
-              theIdleUnboundFiles.size(),
+    infoEvent(" Idle files: %u Max opened files: %d",
+              theIdleFiles.size(),
               m_maxOpenedFiles);
+    infoEvent(" Bound Threads: %u (active %u) Unbound threads: %u",
+              m_bound_threads_cnt,
+              m_active_bound_threads_cnt,
+              m_unbounds_threads_cnt);
     infoEvent(" Max files: %d",
 	      m_maxFiles);
     infoEvent(" Requests: %d",
@@ -1522,7 +1566,10 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal
     
     for (unsigned i = 0; i < theOpenFiles.size(); i++){
       AsyncFile* file = theOpenFiles.getFile(i);
-      infoEvent("%2d (0x%lx): %s", i, (long)file, file->theFileName.c_str());
+      infoEvent("%2d (0x%lx): %s thr: %lx", i,
+                (long)file,
+                file->theFileName.c_str(),
+                (long)file->getThread());
     }
     return;
   }
@@ -1536,18 +1583,14 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal
     return;
   }
   if(signal->theData[0] == DumpStateOrd::NdbfsDumpIdleFiles){
-    infoEvent("NDBFS: Dump idle files: %d %u",
-              theIdleBoundFiles.size(), theIdleUnboundFiles.size());
-    
-    for (unsigned i = 0; i < theIdleBoundFiles.size(); i++){
-      AsyncFile* file = theIdleBoundFiles[i];
-      infoEvent("%2d (0x%lx): %s", i, (long)file, file->isOpen()?"OPEN":"CLOSED");
-    }
+    infoEvent("NDBFS: Dump idle files: %u",
+              theIdleFiles.size());
 
-    for (unsigned i = 0; i < theIdleUnboundFiles.size(); i++){
-      AsyncFile* file = theIdleUnboundFiles[i];
+    for (unsigned i = 0; i < theIdleFiles.size(); i++){
+      AsyncFile* file = theIdleFiles[i];
       infoEvent("%2d (0x%lx): %s", i, (long)file, file->isOpen()?"OPEN":"CLOSED");
     }
+
     return;
   }
 

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp	2011-05-23 10:38:41 +0000
@@ -79,19 +79,19 @@ private:
 
   // Communication from/to files
   MemoryChannel<Request> theFromThreads;
-  MemoryChannel<Request> theToThreads;
+  MemoryChannel<Request> theToBoundThreads;
+  MemoryChannel<Request> theToUnboundThreads;
 
   Pool<Request>* theRequestPool;
 
-  AsyncIoThread* createIoThread(AsyncFile* file);
-  AsyncFile* createAsyncFile(bool bound);
+  AsyncIoThread* createIoThread(bool bound);
+  AsyncFile* createAsyncFile();
   AsyncFile* getIdleFile(bool bound);
   void pushIdleFile(AsyncFile*);
 
   Vector<AsyncIoThread*> theThreads;// List of all created threads
   Vector<AsyncFile*> theFiles;      // List all created AsyncFiles
-  Vector<AsyncFile*> theIdleBoundFiles;   // List of idle AsyncFiles
-  Vector<AsyncFile*> theIdleUnboundFiles; // List of idle AsyncFiles
+  Vector<AsyncFile*> theIdleFiles;  // List of idle AsyncFiles
   OpenFiles theOpenFiles;           // List of open AsyncFiles
 
   BaseString m_base_path[FsOpenReq::BP_MAX];
@@ -105,6 +105,11 @@ private:
   void readWriteRequest(  int action, Signal * signal );
 
   static Uint32 translateErrno(int aErrno);
+
+  Uint32 m_bound_threads_cnt;
+  Uint32 m_unbounds_threads_cnt;
+  Uint32 m_active_bound_threads_cnt;
+  void cnt_active_bound(int val);
 public:
   const BaseString& get_base_path(Uint32 no) const;
 };

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp	2011-05-23 10:38:41 +0000
@@ -249,6 +249,7 @@ template <class T> inline T* Pool<T>::ge
    }
    --theTop;
    tmp = theList[theTop];
+   tmp->atGet();
    return tmp;
 }
 

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp	2011-04-09 15:48:21 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp	2011-05-24 15:06:09 +0000
@@ -73,6 +73,24 @@ void Qmgr::initData()
 #ifdef ERROR_INSERT
   nodeFailCount = 0;
 #endif
+
+  cfailureNr = 1;
+  ccommitFailureNr = 1;
+  cprepareFailureNr = 1;
+  cnoFailedNodes = 0;
+  cnoPrepFailedNodes = 0;
+  creadyDistCom = ZFALSE;
+  cpresident = ZNIL;
+  c_start.m_president_candidate = ZNIL;
+  c_start.m_president_candidate_gci = 0;
+  cpdistref = 0;
+  cneighbourh = ZNIL;
+  cneighbourl = ZNIL;
+  cdelayRegreq = ZDELAY_REGREQ;
+  cactivateApiCheck = 0;
+  c_allow_api_connect = 0;
+  ctoStatus = Q_NOT_ACTIVE;
+  clatestTransactionCheck = 0;
 }//Qmgr::initData()
 
 void Qmgr::initRecords() 

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-05-02 13:36:19 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-05-24 15:06:09 +0000
@@ -2481,27 +2481,9 @@ void Qmgr::findNeighbours(Signal* signal
 /*---------------------------------------------------------------------------*/
 void Qmgr::initData(Signal* signal) 
 {
-  cfailureNr = 1;
-  ccommitFailureNr = 1;
-  cprepareFailureNr = 1;
-  cnoFailedNodes = 0;
-  cnoPrepFailedNodes = 0;
-  creadyDistCom = ZFALSE;
-  cpresident = ZNIL;
-  c_start.m_president_candidate = ZNIL;
-  c_start.m_president_candidate_gci = 0;
-  cpdistref = 0;
-  cneighbourh = ZNIL;
-  cneighbourl = ZNIL;
-  cdelayRegreq = ZDELAY_REGREQ;
-  cactivateApiCheck = 0;
-  c_allow_api_connect = 0;
-  ctoStatus = Q_NOT_ACTIVE;
-
   NDB_TICKS now = NdbTick_CurrentMillisecond();
   interface_check_timer.setDelay(1000);
   interface_check_timer.reset(now);
-  clatestTransactionCheck = 0;
 
   // catch-all for missing initializations
   memset(&arbitRec, 0, sizeof(arbitRec));

=== modified file 'storage/ndb/src/kernel/blocks/trix/Trix.cpp'
--- a/storage/ndb/src/kernel/blocks/trix/Trix.cpp	2011-05-24 14:26:59 +0000
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.cpp	2011-05-25 15:03:11 +0000
@@ -1584,7 +1584,7 @@ bool
 Trix::statOpSeize(Uint32& statPtrI)
 {
   StatOpPtr statPtr;
-  if (ERROR_INSERTED(17001) ||
+  if (ERROR_INSERTED(18001) ||
       !c_statOpPool.seize(statPtr))
   {
     jam();
@@ -1600,7 +1600,7 @@ Trix::statOpSeize(Uint32& statPtrI)
   stat.m_ownPtrI = statPtrI;
 
   SubscriptionRecPtr subRecPtr;
-  if (ERROR_INSERTED(17002) ||
+  if (ERROR_INSERTED(18002) ||
       !c_theSubscriptions.seize(subRecPtr))
   {
     jam();
@@ -1987,8 +1987,8 @@ Trix::statUtilPrepareConf(Signal* signal
   util.m_prepareId = utilConf->prepareId;
 
   const Uint32 ot = send.m_operationType;
-  if (ERROR_INSERTED(17011) && ot == UtilPrepareReq::Read ||
-      ERROR_INSERTED(17012) && ot != UtilPrepareReq::Read)
+  if (ERROR_INSERTED(18011) && ot == UtilPrepareReq::Read ||
+      ERROR_INSERTED(18012) && ot != UtilPrepareReq::Read)
   {
     jam();
     CLEAR_ERROR_INSERT_VALUE;
@@ -2400,9 +2400,9 @@ Trix::statCleanExecute(Signal* signal, S
   releaseSections(handle);
 
   const Uint32 rt = stat.m_requestType;
-  if (ERROR_INSERTED(17021) && rt == IndexStatReq::RT_CLEAN_NEW ||
-      ERROR_INSERTED(17022) && rt == IndexStatReq::RT_CLEAN_OLD ||
-      ERROR_INSERTED(17023) && rt == IndexStatReq::RT_CLEAN_ALL)
+  if (ERROR_INSERTED(18021) && rt == IndexStatReq::RT_CLEAN_NEW ||
+      ERROR_INSERTED(18022) && rt == IndexStatReq::RT_CLEAN_OLD ||
+      ERROR_INSERTED(18023) && rt == IndexStatReq::RT_CLEAN_ALL)
   {
     jam();
     CLEAR_ERROR_INSERT_VALUE;
@@ -2574,7 +2574,7 @@ Trix::statScanExecute(Signal* signal, St
   scan.m_keyBytes += kb;
   releaseSections(handle);
 
-  if (ERROR_INSERTED(17024))
+  if (ERROR_INSERTED(18024))
   {
     jam();
     CLEAR_ERROR_INSERT_VALUE;

=== modified file 'storage/ndb/src/kernel/vm/LongSignal.cpp'
--- a/storage/ndb/src/kernel/vm/LongSignal.cpp	2011-02-22 06:57:40 +0000
+++ b/storage/ndb/src/kernel/vm/LongSignal.cpp	2011-05-17 23:29:55 +0000
@@ -414,3 +414,58 @@ releaseSection(SPC_ARG Uint32 firstSegme
                                      p->m_lastSegment);
   }
 }
+
+bool
+writeToSection(Uint32 firstSegmentIVal, Uint32 offset,
+               const Uint32* src,
+               Uint32 len)
+{
+  Ptr<SectionSegment> segPtr;
+
+  if (len == 0)
+    return true;
+
+  if (firstSegmentIVal == RNIL)
+  {
+    return false;
+  }
+  else
+  {
+    /* Section has at least one segment with data already */
+    g_sectionSegmentPool.getPtr(segPtr, firstSegmentIVal);
+
+    Uint32 existingLen= segPtr.p->m_sz;
+
+    assert(existingLen > 0);
+    if (offset >= existingLen)
+      return false;         /* No sparse sections or extension */
+    if (len > (existingLen - offset))
+      return false;         /* Would be extending beyond current length */
+
+    /* Advance through segments to the one containing the start offset */
+    while (offset >= SectionSegment::DataLength)
+    {
+      g_sectionSegmentPool.getPtr(segPtr, segPtr.p->m_nextSegment);
+      offset-= SectionSegment::DataLength;
+    }
+
+    /* Now overwrite the words */
+    while (true)
+    {
+      Uint32 wordsToCopy = MIN(len,
+                               SectionSegment::DataLength - offset);
+      memcpy(&segPtr.p->theData[offset], src, (wordsToCopy << 2));
+      src+= wordsToCopy;
+      len-= wordsToCopy;
+
+      if (!len)
+      {
+        return true;
+      }
+
+      offset = 0;
+      g_sectionSegmentPool.getPtr(segPtr, segPtr.p->m_nextSegment);
+    }
+  }
+}
+

=== modified file 'storage/ndb/src/kernel/vm/LongSignalImpl.hpp'
--- a/storage/ndb/src/kernel/vm/LongSignalImpl.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/vm/LongSignalImpl.hpp	2011-05-17 23:29:55 +0000
@@ -48,6 +48,8 @@ bool import(SPC_ARG Ptr<SectionSegment>
 bool appendToSection(SPC_ARG Uint32& firstSegmentIVal, const Uint32* src, Uint32 len);
 /* dupSection : Create new section as copy of src section */
 bool dupSection(SPC_ARG Uint32& copyFirstIVal, Uint32 srcFirstIVal);
+/* writeToSection : Overwrite section from offset with data.  */
+bool writeToSection(Uint32 firstSegmentIVal, Uint32 offset, const Uint32* src, Uint32 len);
 
 void release(SPC_ARG SegmentedSectionPtr & ptr);
 void releaseSection(SPC_ARG Uint32 firstSegmentIVal);

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-05-16 12:24:55 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-05-17 23:29:55 +0000
@@ -1578,6 +1578,13 @@ SimulatedBlock::dupSection(Uint32& copyF
   return ::dupSection(SB_SP_ARG copyFirstIVal, srcFirstIVal);
 }
 
+bool
+SimulatedBlock::writeToSection(Uint32 firstSegmentIVal, Uint32 offset,
+                               const Uint32* src, Uint32 len)
+{
+  return ::writeToSection(firstSegmentIVal, offset, src, len);
+}
+
 class SectionSegmentPool& 
 SimulatedBlock::getSectionSegmentPool(){
   return g_sectionSegmentPool;

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2011-05-16 12:24:55 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2011-05-17 23:29:55 +0000
@@ -350,6 +350,7 @@ protected:
   bool import(SegmentedSectionPtr& ptr, const Uint32* src, Uint32 len);
   bool appendToSection(Uint32& firstSegmentIVal, const Uint32* src, Uint32 len);
   bool dupSection(Uint32& copyFirstIVal, Uint32 srcFirstIVal);
+  bool writeToSection(Uint32 firstSegmentIVal, Uint32 offset, const Uint32* src, Uint32 len);
 
   void handle_invalid_sections_in_send_signal(Signal*) const;
   void handle_lingering_sections_after_execute(Signal*) const;

=== modified file 'storage/ndb/src/mgmsrv/Config.hpp'
--- a/storage/ndb/src/mgmsrv/Config.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/mgmsrv/Config.hpp	2011-05-24 11:51:39 +0000
@@ -22,7 +22,7 @@
 #include <kernel/NodeBitmask.hpp>
 #include "ConfigInfo.hpp"
 #include <mgmapi.h>
-#include <mgmapi_configuration.hpp>
+#include "../mgmapi/mgmapi_configuration.hpp"
 
 
 /**

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2011-02-16 14:53:53 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2011-05-17 23:29:55 +0000
@@ -934,6 +934,36 @@ NdbDictionary::Table::getRowGCIIndicator
 }
 
 void
+NdbDictionary::Table::setExtraRowGciBits(Uint32 val)
+{
+  if (val <= 31)
+  {
+    m_impl.m_extra_row_gci_bits = val;
+  }
+}
+
+Uint32
+NdbDictionary::Table::getExtraRowGciBits() const
+{
+  return m_impl.m_extra_row_gci_bits;
+}
+
+void
+NdbDictionary::Table::setExtraRowAuthorBits(Uint32 val)
+{
+  if (val <= 31)
+  {
+    m_impl.m_extra_row_author_bits = val;
+  }
+}
+
+Uint32
+NdbDictionary::Table::getExtraRowAuthorBits() const
+{
+  return m_impl.m_extra_row_author_bits;
+}
+
+void
 NdbDictionary::Table::setForceVarPart(bool val){
   m_impl.m_force_var_part = val;
 }

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-05-25 15:03:11 +0000
@@ -378,6 +378,10 @@ NdbColumnImpl::create_pseudo_columns()
     NdbColumnImpl::create_pseudo("NDB$ROWID");
   NdbDictionary::Column::ROW_GCI=
     NdbColumnImpl::create_pseudo("NDB$ROW_GCI");
+  NdbDictionary::Column::ROW_GCI64 =
+    NdbColumnImpl::create_pseudo("NDB$ROW_GCI64");
+  NdbDictionary::Column::ROW_AUTHOR =
+    NdbColumnImpl::create_pseudo("NDB$ROW_AUTHOR");
   NdbDictionary::Column::ANY_VALUE=
     NdbColumnImpl::create_pseudo("NDB$ANY_VALUE");
   NdbDictionary::Column::COPY_ROWID=
@@ -412,6 +416,8 @@ NdbColumnImpl::destory_pseudo_columns()
   delete NdbDictionary::Column::RECORDS_IN_RANGE;
   delete NdbDictionary::Column::ROWID;
   delete NdbDictionary::Column::ROW_GCI;
+  delete NdbDictionary::Column::ROW_GCI64;
+  delete NdbDictionary::Column::ROW_AUTHOR;
   delete NdbDictionary::Column::ANY_VALUE;
   delete NdbDictionary::Column::OPTIMIZE;
   NdbDictionary::Column::FRAGMENT= 0;
@@ -425,6 +431,8 @@ NdbColumnImpl::destory_pseudo_columns()
   NdbDictionary::Column::RECORDS_IN_RANGE= 0;
   NdbDictionary::Column::ROWID= 0;
   NdbDictionary::Column::ROW_GCI= 0;
+  NdbDictionary::Column::ROW_GCI64= 0;
+  NdbDictionary::Column::ROW_AUTHOR= 0;
   NdbDictionary::Column::ANY_VALUE= 0;
   NdbDictionary::Column::OPTIMIZE= 0;
 
@@ -508,6 +516,18 @@ NdbColumnImpl::create_pseudo(const char
     col->m_impl.m_attrSize = 8;
     col->m_impl.m_arraySize = 1;
     col->m_impl.m_nullable = true;
+  } else if(!strcmp(name, "NDB$ROW_GCI64")){
+    col->setType(NdbDictionary::Column::Bigunsigned);
+    col->m_impl.m_attrId = AttributeHeader::ROW_GCI64;
+    col->m_impl.m_attrSize = 8;
+    col->m_impl.m_arraySize = 1;
+    col->m_impl.m_nullable = true;
+  } else if(!strcmp(name, "NDB$ROW_AUTHOR")){
+    col->setType(NdbDictionary::Column::Unsigned);
+    col->m_impl.m_attrId = AttributeHeader::ROW_AUTHOR;
+    col->m_impl.m_attrSize = 4;
+    col->m_impl.m_arraySize = 1;
+    col->m_impl.m_nullable = true;
   } else if(!strcmp(name, "NDB$ANY_VALUE")){
     col->setType(NdbDictionary::Column::Unsigned);
     col->m_impl.m_attrId = AttributeHeader::ANY_VALUE;
@@ -657,6 +677,8 @@ NdbTableImpl::init(){
   m_hash_map_id = RNIL;
   m_hash_map_version = ~0;
   m_storageType = NDB_STORAGETYPE_DEFAULT;
+  m_extra_row_gci_bits = 0;
+  m_extra_row_author_bits = 0;
 }
 
 bool
@@ -851,6 +873,22 @@ NdbTableImpl::equal(const NdbTableImpl&
     DBUG_RETURN(false);
   }
 
+  if (m_extra_row_gci_bits != obj.m_extra_row_gci_bits)
+  {
+    DBUG_PRINT("info",("m_extra_row_gci_bits %d != %d",
+                       (int32)m_extra_row_gci_bits,
+                       (int32)obj.m_extra_row_gci_bits));
+    DBUG_RETURN(false);
+  }
+
+  if (m_extra_row_author_bits != obj.m_extra_row_author_bits)
+  {
+    DBUG_PRINT("info",("m_extra_row_author_bits %d != %d",
+                       (int32)m_extra_row_author_bits,
+                       (int32)obj.m_extra_row_author_bits));
+    DBUG_RETURN(false);
+  }
+
   DBUG_RETURN(true);
 }
 
@@ -916,6 +954,8 @@ NdbTableImpl::assign(const NdbTableImpl&
   m_fragmentCount = org.m_fragmentCount;
   
   m_single_user_mode = org.m_single_user_mode;
+  m_extra_row_gci_bits = org.m_extra_row_gci_bits;
+  m_extra_row_author_bits = org.m_extra_row_author_bits;
 
   if (m_index != 0)
     delete m_index;
@@ -2770,6 +2810,8 @@ NdbDictInterface::parseTableInfo(NdbTabl
   impl->m_maxLoadFactor = tableDesc->MaxLoadFactor;
   impl->m_single_user_mode = tableDesc->SingleUserMode;
   impl->m_storageType = tableDesc->TableStorageType;
+  impl->m_extra_row_gci_bits = tableDesc->ExtraRowGCIBits;
+  impl->m_extra_row_author_bits = tableDesc->ExtraRowAuthorBits;
 
   impl->m_indexType = (NdbDictionary::Object::Type)
     getApiConstant(tableDesc->TableType,
@@ -3365,7 +3407,9 @@ NdbDictInterface::compChangeMask(const N
      impl.m_tablespace_version != old_impl.m_tablespace_version ||
      impl.m_id != old_impl.m_id ||
      impl.m_version != old_impl.m_version ||
-     sz < old_sz)
+     sz < old_sz ||
+     impl.m_extra_row_gci_bits != old_impl.m_extra_row_gci_bits ||
+     impl.m_extra_row_author_bits != old_impl.m_extra_row_author_bits)
   {
     DBUG_PRINT("info", ("Old and new table not compatible"));
     goto invalid_alter_table;
@@ -3553,6 +3597,8 @@ NdbDictInterface::serializeTableDesc(Ndb
   tmpTab->LinearHashFlag = impl.m_linear_flag;
   tmpTab->SingleUserMode = impl.m_single_user_mode;
   tmpTab->ForceVarPartFlag = impl.m_force_var_part;
+  tmpTab->ExtraRowGCIBits = impl.m_extra_row_gci_bits;
+  tmpTab->ExtraRowAuthorBits = impl.m_extra_row_author_bits;
 
   tmpTab->FragmentType = getKernelConstant(impl.m_fragmentType,
  					   fragmentTypeMapping,
@@ -8444,6 +8490,8 @@ const NdbDictionary::Column * NdbDiction
 const NdbDictionary::Column * NdbDictionary::Column::RECORDS_IN_RANGE = 0;
 const NdbDictionary::Column * NdbDictionary::Column::ROWID = 0;
 const NdbDictionary::Column * NdbDictionary::Column::ROW_GCI = 0;
+const NdbDictionary::Column * NdbDictionary::Column::ROW_GCI64 = 0;
+const NdbDictionary::Column * NdbDictionary::Column::ROW_AUTHOR = 0;
 const NdbDictionary::Column * NdbDictionary::Column::ANY_VALUE = 0;
 const NdbDictionary::Column * NdbDictionary::Column::COPY_ROWID = 0;
 const NdbDictionary::Column * NdbDictionary::Column::OPTIMIZE = 0;

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-05-12 09:01:21 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-05-17 23:29:55 +0000
@@ -238,6 +238,8 @@ public:
   Uint16 m_fragmentCount;
   Uint8 m_single_user_mode;
   Uint8 m_storageType;  // NDB_STORAGETYPE_MEMORY or _DISK or DEFAULT
+  Uint8 m_extra_row_gci_bits;
+  Uint8 m_extra_row_author_bits;
 
   NdbIndexImpl * m_index;
   NdbColumnImpl * getColumn(unsigned attrId);

=== modified file 'storage/ndb/src/ndbapi/NdbInfo.cpp'
--- a/storage/ndb/src/ndbapi/NdbInfo.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfo.cpp	2011-05-23 14:05:08 +0000
@@ -23,6 +23,7 @@ NdbInfo::NdbInfo(class Ndb_cluster_conne
                  const char* prefix, const char* dbname,
                  const char* table_prefix) :
   m_connect_count(connection->get_connect_count()),
+  m_min_db_version(0),
   m_connection(connection),
   m_tables_table(NULL), m_columns_table(NULL),
   m_prefix(prefix),
@@ -270,7 +271,9 @@ bool NdbInfo::load_tables()
   }
 
   // After sucessfull load of the tables, set connect count
+  // and the min db version of cluster
   m_connect_count = m_connection->get_connect_count();
+  m_min_db_version = m_connection->get_min_db_version();
   return true;
 }
 
@@ -328,12 +331,14 @@ void NdbInfo::flush_tables()
 bool
 NdbInfo::check_tables()
 {
-  if (m_connection->get_connect_count() != m_connect_count)
+  if (unlikely(m_connection->get_connect_count() != m_connect_count ||
+               m_connection->get_min_db_version() != m_min_db_version))
   {
-    // Connect count has changed -> flush the cached table definitions
+    // Connect count or min db version of cluster has changed
+    //  -> flush the cached table definitions
     flush_tables();
   }
-  if (m_tables.entries() <= NUM_HARDCODED_TABLES)
+  if (unlikely(m_tables.entries() <= NUM_HARDCODED_TABLES))
   {
     // Global table cache is not loaded yet or has been
     // flushed, try to load it

=== modified file 'storage/ndb/src/ndbapi/NdbInfo.hpp'
--- a/storage/ndb/src/ndbapi/NdbInfo.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfo.hpp	2011-05-23 14:05:08 +0000
@@ -89,8 +89,6 @@ public:
   bool init(void);
   ~NdbInfo();
 
-  void flush_tables();
-
   int openTable(const char* table_name, const Table**);
   int openTable(Uint32 tableId, const Table**);
   void closeTable(const Table* table);
@@ -103,6 +101,7 @@ public:
 private:
   static const size_t NUM_HARDCODED_TABLES = 2;
   unsigned m_connect_count;
+  unsigned m_min_db_version;
   class Ndb_cluster_connection* m_connection;
   pthread_mutex_t m_mutex;
   HashMap<BaseString, Table, BaseString_get_key> m_tables;
@@ -119,6 +118,7 @@ private:
   bool load_hardcoded_tables(void);
   bool load_tables();
   bool check_tables();
+  void flush_tables();
 
   BaseString mysql_table_name(const char* table_name) const;
 

=== modified file 'storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp'
--- a/storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfoRecAttr.hpp	2011-05-23 13:45:57 +0000
@@ -46,13 +46,18 @@ public:
     return m_len;
   }
 
+  bool isNULL() const {
+    return !m_defined;
+  }
+
 protected:
   friend class NdbInfoScanOperation;
-  NdbInfoRecAttr() : m_data(NULL), m_len(0) {};
+  NdbInfoRecAttr() : m_data(NULL), m_len(0), m_defined(false) {};
   ~NdbInfoRecAttr() {};
 private:
   const char* m_data;
   Uint32 m_len;
+  bool m_defined;
 };
 
 #endif

=== modified file 'storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/ndbapi/NdbInfoScanOperation.cpp	2011-05-23 13:45:57 +0000
@@ -418,34 +418,35 @@ NdbInfoScanOperation::execDBINFO_TRANSID
   m_rows_received++;
   DBUG_PRINT("info", ("rows received: %d", m_rows_received));
 
-  const Uint32* start = signal->ptr[0].p;
-  const Uint32* end = start + signal->ptr[0].sz;
-
-  DBUG_PRINT("info", ("start: %p, end: %p", start, end));
-  for (unsigned col = 0; col < m_table->columns(); col++)
+  // Reset all recattr values before reading the new row
+  for (unsigned i = 0; i < m_recAttrs.size(); i++)
   {
+    if (m_recAttrs[i])
+      m_recAttrs[i]->m_defined = false;
+  }
 
-    // Read attribute header
-    const AttributeHeader ah(*start);
-    const Uint32 len = ah.getByteSize();
+  // Read attributes from long signal section
+  AttributeHeader* attr = (AttributeHeader*)signal->ptr[0].p;
+  AttributeHeader* last = (AttributeHeader*)(signal->ptr[0].p +
+                                            signal->ptr[0].sz);
+  while (attr < last)
+  {
+    const Uint32 col = attr->getAttributeId();
+    const Uint32 len = attr->getByteSize();
     DBUG_PRINT("info", ("col: %u, len: %u", col, len));
-
-    // Step past attribute header
-    start += ah.getHeaderSize();
-
-    NdbInfoRecAttr* attr = m_recAttrs[col];
-    if (attr)
+    if (col < m_recAttrs.size())
     {
-      // Update NdbInfoRecAttr pointer and length
-      attr->m_data = (const char*)start;
-      attr->m_len = len;
+      NdbInfoRecAttr* rec_attr = m_recAttrs[col];
+      if (rec_attr)
+      {
+        // Update NdbInfoRecAttr pointer, length and defined flag
+        rec_attr->m_data = (const char*)attr->getDataPtr();
+        rec_attr->m_len = len;
+        rec_attr->m_defined = true;
+      }
     }
 
-    // Step to next attribute header
-    start += ah.getDataSize();
-
-    // No reading beyond end of signal size
-    assert(start <= end);
+    attr = attr->getNext();
   }
 
   DBUG_RETURN(false); // Don't wait more, process this row

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-05-11 13:31:44 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-05-25 13:19:02 +0000
@@ -1120,7 +1120,8 @@ NdbOperation::buildSignalsNdbRecord(Uint
   /* Final update signal words */
   if ((tOpType == InsertRequest) || 
       (tOpType == WriteRequest) ||
-      (tOpType == UpdateRequest))
+      (tOpType == UpdateRequest) ||
+      (tOpType == RefreshRequest))
   {
     updRow= m_attribute_row;
     NdbBlob *currentBlob= theBlobList;
@@ -1333,7 +1334,8 @@ NdbOperation::buildSignalsNdbRecord(Uint
 
   if ((tOpType == InsertRequest) ||
       (tOpType == WriteRequest) ||
-      (tOpType == UpdateRequest))
+      (tOpType == UpdateRequest) ||
+      (tOpType == RefreshRequest))
   {
     /* Handle setAnyValue() for all cases except delete */
     if ((m_flags & OF_USE_ANY_VALUE) != 0)

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-05-25 13:19:02 +0000
@@ -2209,6 +2209,7 @@ NdbTransaction::receiveTCKEY_FAILCONF(co
       case NdbOperation::DeleteRequest:
       case NdbOperation::WriteRequest:
       case NdbOperation::UnlockRequest:
+      case NdbOperation::RefreshRequest:
 	tOp = tOp->next();
 	break;
       case NdbOperation::ReadRequest:
@@ -2713,6 +2714,52 @@ NdbTransaction::writeTuple(const NdbReco
   return op;
 }
 
+const NdbOperation *
+NdbTransaction::refreshTuple(const NdbRecord *key_rec, const char *key_row,
+                             const NdbOperation::OperationOptions *opts,
+                             Uint32 sizeOfOptions)
+{
+  /* Check TC node version lockless */
+  {
+    Uint32 tcVer = theNdb->theImpl->getNodeInfo(theDBnode).m_info.m_version;
+    if (unlikely(! ndb_refresh_tuple(tcVer)))
+    {
+      /* Function not implemented yet */
+      setOperationErrorCodeAbort(4003);
+      return NULL;
+    }
+  }
+
+  /* Check that the NdbRecord specifies the full primary key. */
+  if (!(key_rec->flags & NdbRecord::RecHasAllKeys))
+  {
+    setOperationErrorCodeAbort(4292);
+    return NULL;
+  }
+
+  Uint8 keymask[NDB_MAX_ATTRIBUTES_IN_TABLE/8];
+  bzero(keymask, sizeof(keymask));
+  for (Uint32 i = 0; i<key_rec->key_index_length; i++)
+  {
+    Uint32 id = key_rec->columns[key_rec->key_indexes[i]].attrId;
+    keymask[(id / 8)] |= (1 << (id & 7));
+  }
+
+  NdbOperation *op= setupRecordOp(NdbOperation::RefreshRequest,
+                                  NdbOperation::LM_Exclusive,
+                                  NdbOperation::AbortOnError,
+                                  key_rec, key_row,
+                                  key_rec, key_row,
+                                  keymask /* mask */,
+                                  opts,
+                                  sizeOfOptions);
+  if(!op)
+    return op;
+
+  theSimpleState= 0;
+
+  return op;
+}
 
 NdbScanOperation *
 NdbTransaction::scanTable(const NdbRecord *result_record,

=== modified file 'storage/ndb/src/ndbapi/TransporterFacade.hpp'
--- a/storage/ndb/src/ndbapi/TransporterFacade.hpp	2011-02-24 07:47:22 +0000
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp	2011-05-23 14:05:08 +0000
@@ -303,6 +303,12 @@ unsigned Ndb_cluster_connection_impl::ge
 }
 
 inline
+unsigned Ndb_cluster_connection_impl::get_min_db_version() const
+{
+  return m_transporter_facade->getMinDbNodeVersion();
+}
+
+inline
 bool
 TransporterFacade::get_node_alive(NodeId n) const {
   if (theClusterMgr)

=== modified file 'storage/ndb/src/ndbapi/ndb_cluster_connection.cpp'
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp	2011-04-15 06:29:59 +0000
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp	2011-05-23 14:05:08 +0000
@@ -332,6 +332,11 @@ unsigned Ndb_cluster_connection::get_con
   return m_impl.get_connect_count();
 }
 
+unsigned Ndb_cluster_connection::get_min_db_version() const
+{
+  return m_impl.get_min_db_version();
+}
+
 int Ndb_cluster_connection::get_latest_error() const
 {
   return m_impl.m_latest_error;

=== modified file 'storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp'
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp	2011-02-04 17:52:38 +0000
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp	2011-05-23 14:05:08 +0000
@@ -67,6 +67,7 @@ class Ndb_cluster_connection_impl : publ
   Uint32 get_next_alive_node(Ndb_cluster_connection_node_iter &iter);
 
   inline unsigned get_connect_count() const;
+  inline unsigned get_min_db_version() const;
 public:
   inline Uint64 *get_latest_trans_gci() { return &m_latest_trans_gci; }
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-05-25 15:03:11 +0000
@@ -768,6 +768,7 @@ ErrorBundle ErrorCodes[] = {
   { 2810, DMEC, TR, "No space left on the device" },
   { 2811, DMEC, TR, "Error with file permissions, please check file system" },
   { 2815, DMEC, TR, "Error in reading files, please check file system" },
+  {  920, DMEC, AE, "Row operation defined after refreshTuple()" },
 
   /**
    * NdbQueryBuilder API errors

=== modified file 'storage/ndb/test/include/HugoOperations.hpp'
--- a/storage/ndb/test/include/HugoOperations.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/include/HugoOperations.hpp	2011-05-25 13:19:02 +0000
@@ -87,6 +87,11 @@ public:
 		     int recordNo,
 		     int numRecords = 1);
   
+  int pkRefreshRecord(Ndb*,
+                      int recordNo,
+                      int numRecords = 1,
+                      int anyValueInfo = 0); /* 0 - none, 1+ Val | record */
+
   int execute_Commit(Ndb*, 
 		     AbortOption ao = AbortOnError);
   int execute_NoCommit(Ndb*,

=== modified file 'storage/ndb/test/include/HugoTransactions.hpp'
--- a/storage/ndb/test/include/HugoTransactions.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/include/HugoTransactions.hpp	2011-05-25 13:19:02 +0000
@@ -110,6 +110,9 @@ public:
 		   int batch = 1,
 		   bool allowConstraintViolation = true,
 		   int doSleep = 0);
+
+  int pkRefreshRecords(Ndb*, int startFrom, int count = 1, int batch = 1);
+
   int lockRecords(Ndb*,
 		  int records,
 		  int percentToLock = 1,

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-07 06:17:02 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-25 13:19:02 +0000
@@ -2415,6 +2415,811 @@ runEnd899(NDBT_Context* ctx, NDBT_Step*
 }
 
 
+int initSubscription(NDBT_Context* ctx, NDBT_Step* step){
+  /* Subscribe to events on the table, and put access
+   * to the subscription somewhere handy
+   */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table& tab = *ctx->getTab();
+  bool merge_events = false;
+  bool report = false;
+
+  char eventName[1024];
+  sprintf(eventName,"%s_EVENT",tab.getName());
+
+  NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+
+  if (!myDict) {
+    g_err << "Dictionary not found "
+	  << pNdb->getNdbError().code << " "
+	  << pNdb->getNdbError().message << endl;
+    return NDBT_FAILED;
+  }
+
+  myDict->dropEvent(eventName);
+
+  NdbDictionary::Event myEvent(eventName);
+  myEvent.setTable(tab.getName());
+  myEvent.addTableEvent(NdbDictionary::Event::TE_ALL);
+  for(int a = 0; a < tab.getNoOfColumns(); a++){
+    myEvent.addEventColumn(a);
+  }
+  myEvent.mergeEvents(merge_events);
+
+  if (report)
+    myEvent.setReport(NdbDictionary::Event::ER_SUBSCRIBE);
+
+  int res = myDict->createEvent(myEvent); // Add event to database
+
+  if (res == 0)
+    myEvent.print();
+  else if (myDict->getNdbError().classification ==
+	   NdbError::SchemaObjectExists)
+  {
+    g_info << "Event creation failed event exists\n";
+    res = myDict->dropEvent(eventName);
+    if (res) {
+      g_err << "Failed to drop event: "
+	    << myDict->getNdbError().code << " : "
+	    << myDict->getNdbError().message << endl;
+      return NDBT_FAILED;
+    }
+    // try again
+    res = myDict->createEvent(myEvent); // Add event to database
+    if (res) {
+      g_err << "Failed to create event (1): "
+	    << myDict->getNdbError().code << " : "
+	    << myDict->getNdbError().message << endl;
+      return NDBT_FAILED;
+    }
+  }
+  else
+  {
+    g_err << "Failed to create event (2): "
+	  << myDict->getNdbError().code << " : "
+	  << myDict->getNdbError().message << endl;
+    return NDBT_FAILED;
+  }
+
+  return NDBT_OK;
+}
+
+int removeSubscription(NDBT_Context* ctx, NDBT_Step* step){
+  /* Remove subscription created above */
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table& tab = *ctx->getTab();
+
+  char eventName[1024];
+  sprintf(eventName,"%s_EVENT",tab.getName());
+
+  NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+
+  if (!myDict) {
+    g_err << "Dictionary not found "
+	  << pNdb->getNdbError().code << " "
+	  << pNdb->getNdbError().message << endl;
+    return NDBT_FAILED;
+  }
+
+  myDict->dropEvent(eventName);
+
+  return NDBT_OK;
+}
+
+int runVerifyRowCount(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* ndb = GETNDB(step);
+
+  /* Check that number of results returned by a normal scan
+   * and per-fragment rowcount sum are equal
+   */
+  Uint32 rowCountSum = 0;
+  Uint32 rowScanCount = 0;
+
+  int result = NDBT_OK;
+  do
+  {
+    NdbTransaction* trans = ndb->startTransaction();
+    CHECK(trans != NULL);
+
+    NdbScanOperation* scan = trans->getNdbScanOperation(ctx->getTab());
+    CHECK(scan != NULL);
+
+    CHECK(scan->readTuples(NdbScanOperation::LM_CommittedRead) == 0);
+
+    NdbInterpretedCode code;
+
+    CHECK(code.interpret_exit_last_row() == 0);
+    CHECK(code.finalise() == 0);
+
+    NdbRecAttr* rowCountRA = scan->getValue(NdbDictionary::Column::ROW_COUNT);
+    CHECK(rowCountRA != NULL);
+    CHECK(scan->setInterpretedCode(&code) == 0);
+
+    CHECK(trans->execute(NoCommit) == 0);
+
+    while (scan->nextResult() == 0)
+      rowCountSum+= rowCountRA->u_32_value();
+
+    trans->close();
+
+    trans = ndb->startTransaction();
+    CHECK(trans != NULL);
+
+    scan = trans->getNdbScanOperation(ctx->getTab());
+    CHECK(scan != NULL);
+
+    CHECK(scan->readTuples(NdbScanOperation::LM_CommittedRead) == 0);
+
+    rowCountRA = scan->getValue(NdbDictionary::Column::ROW_COUNT);
+    CHECK(rowCountRA != NULL);
+
+    CHECK(trans->execute(NoCommit) == 0);
+
+    while (scan->nextResult() == 0)
+      rowScanCount++;
+
+    trans->close();
+  }
+  while(0);
+
+  if (result == NDBT_OK)
+  {
+    ndbout_c("Sum of fragment row counts : %u  Number rows scanned : %u",
+             rowCountSum,
+             rowScanCount);
+
+    if (rowCountSum != rowScanCount)
+    {
+      ndbout_c("MISMATCH");
+      result = NDBT_FAILED;
+    }
+  }
+
+  return result;
+}
+
+enum ApiEventType { Insert, Update, Delete };
+
+template class Vector<ApiEventType>;
+
+struct EventInfo
+{
+  ApiEventType type;
+  int id;
+  Uint64 gci;
+};
+template class Vector<EventInfo>;
+
+int collectEvents(Ndb* ndb,
+                  HugoCalculator& calc,
+                  const NdbDictionary::Table& tab,
+                  Vector<EventInfo>& receivedEvents,
+                  int idCol,
+                  int updateCol,
+                  Vector<NdbRecAttr*>* beforeAttrs,
+                  Vector<NdbRecAttr*>* afterAttrs)
+{
+  int MaxTimeouts = 5;
+  while (true)
+  {
+    int res = ndb->pollEvents(1000);
+
+    if (res > 0)
+    {
+      NdbEventOperation* pOp;
+      while ((pOp = ndb->nextEvent()))
+      {
+        bool isDelete = (pOp->getEventType() == NdbDictionary::Event::TE_DELETE);
+        Vector<NdbRecAttr*>* whichVersion =
+          isDelete?
+          beforeAttrs :
+          afterAttrs;
+        int id = (*whichVersion)[idCol]->u_32_value();
+        Uint64 gci = pOp->getGCI();
+        Uint32 anyValue = pOp->getAnyValue();
+        Uint32 scenario = ((anyValue >> 24) & 0xff) -1;
+        Uint32 optype = ((anyValue >> 16) & 0xff);
+        Uint32 recNum = (anyValue & 0xffff);
+
+        g_err << "# " << receivedEvents.size()
+              << " GCI : " << (gci >> 32)
+              << "/"
+              << (gci & 0xffffffff)
+              << " id : "
+              << id
+              << " scenario : " << scenario
+              << " optype : " << optype
+              << " record : " << recNum
+              << "  ";
+
+        /* Check event has self-consistent data */
+        int updatesValue = (*whichVersion)[updateCol]->u_32_value();
+
+        if ((*whichVersion)[updateCol]->isNULL() ||
+            (*whichVersion)[idCol]->isNULL())
+        {
+          g_err << "Null update/id cols : REFRESH of !EXISTS  ";
+        }
+
+        g_err << "(Updates val = " << updatesValue << ")";
+
+        for (int i=0; i < (int) whichVersion->size(); i++)
+        {
+          /* Check PK columns and also other columns for non-delete */
+          if (!isDelete ||
+              tab.getColumn(i)->getPrimaryKey())
+          {
+            NdbRecAttr* ra = (*whichVersion)[i];
+            if (calc.verifyRecAttr(recNum, updatesValue, ra) != 0)
+            {
+              g_err << "Verify failed on recNum : " << recNum << " with updates value "
+                    << updatesValue << " for column " << ra->getColumn()->getAttrId()
+                    << endl;
+              return NDBT_FAILED;
+            }
+          }
+        }
+
+        EventInfo ei;
+
+        switch (pOp->getEventType())
+        {
+        case NdbDictionary::Event::TE_INSERT:
+          g_err << " Insert event" << endl;
+          ei.type = Insert;
+          break;
+        case NdbDictionary::Event::TE_DELETE:
+          ei.type = Delete;
+          g_err << " Delete event" << endl;
+          break;
+        case NdbDictionary::Event::TE_UPDATE:
+          ei.type = Update;
+          g_err << " Update event" << endl;
+          break;
+        default:
+          g_err << " Event type : " << pOp->getEventType() << endl;
+          abort();
+          break;
+        }
+
+        ei.id = recNum;
+        ei.gci = gci;
+
+        receivedEvents.push_back(ei);
+      }
+    }
+    else
+    {
+      if (--MaxTimeouts == 0)
+      {
+        break;
+      }
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int verifyEvents(const Vector<EventInfo>& receivedEvents,
+                 const Vector<ApiEventType>& expectedEvents,
+                 int records)
+{
+  /* Now verify received events against expected
+   * This is messy as events occurring in the same epoch are unordered
+   * except via id, so we use id-duplicates to determine which event
+   * sequence we're looking at.
+   */
+  g_err << "Received total of " << receivedEvents.size() << " events" << endl;
+  Vector<Uint32> keys;
+  Vector<Uint64> gcis;
+  Uint32 z = 0;
+  Uint64 z2 = 0;
+  keys.fill(records, z);
+  gcis.fill(records, z2);
+  Uint64 currGci = 0;
+
+  for (Uint32 e=0; e < receivedEvents.size(); e++)
+  {
+    EventInfo ei = receivedEvents[e];
+
+    if (ei.gci != currGci)
+    {
+      if (ei.gci < currGci)
+        abort();
+
+      /* Epoch boundary */
+      /* At this point, all id counts must be equal */
+      for (int i=0; i < records; i++)
+      {
+        if (keys[i] != keys[0])
+        {
+          g_err << "Count for id " << i
+                << " is " << keys[i]
+                << " but should be " << keys[0] << endl;
+          return NDBT_OK;
+        }
+      }
+
+      currGci = ei.gci;
+    }
+
+    Uint32 eventIndex = keys[ei.id];
+    keys[ei.id]++;
+
+    ApiEventType et = expectedEvents[eventIndex];
+
+    if (ei.type != et)
+    {
+      g_err << "Expected event of type " << et
+            << " but found " << ei.type
+            << " at expectedEvent " << eventIndex
+            << " and event num " << e << endl;
+      return NDBT_FAILED;
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int runRefreshTuple(NDBT_Context* ctx, NDBT_Step* step){
+  int records = ctx->getNumRecords();
+  Ndb* ndb = GETNDB(step);
+
+  /* Now attempt to create EventOperation */
+  NdbEventOperation* pOp;
+  const NdbDictionary::Table& tab = *ctx->getTab();
+
+  char eventName[1024];
+  sprintf(eventName,"%s_EVENT",tab.getName());
+
+  pOp = ndb->createEventOperation(eventName);
+  if (pOp == NULL)
+  {
+    g_err << "Failed to create event operation\n";
+    return NDBT_FAILED;
+  }
+
+  HugoCalculator calc(tab);
+  Vector<NdbRecAttr*> eventAfterRecAttr;
+  Vector<NdbRecAttr*> eventBeforeRecAttr;
+  int updateCol = -1;
+  int idCol = -1;
+
+  /* Now request all attributes */
+  for (int a = 0; a < tab.getNoOfColumns(); a++)
+  {
+    eventAfterRecAttr.push_back(pOp->getValue(tab.getColumn(a)->getName()));
+    eventBeforeRecAttr.push_back(pOp->getPreValue(tab.getColumn(a)->getName()));
+    if (calc.isIdCol(a))
+      idCol = a;
+    if (calc.isUpdateCol(a))
+      updateCol = a;
+  }
+
+  /* Now execute the event */
+  if (pOp->execute())
+  {
+    g_err << "Event operation execution failed : " << pOp->getNdbError() << endl;
+    return NDBT_FAILED;
+  }
+
+  HugoOperations hugoOps(*ctx->getTab());
+  int scenario = 0;
+
+  Vector<ApiEventType> expectedEvents;
+
+  for (scenario = 0; scenario < 2; scenario++)
+  {
+    g_err << "Scenario = " << scenario
+          << " ( Refresh "
+          << ((scenario == 0)? "before":"after")
+          << " operations )" << endl;
+    int optype = 0;
+    bool done = false;
+    int expectedError = 0;
+    do
+    {
+      check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+
+      if (scenario == 0)
+      {
+        g_err << "Refresh before operations" << endl;
+        int anyValue =
+          ((1) << 8) |
+          optype;
+        check(hugoOps.pkRefreshRecord(ndb, 0, records, anyValue) == 0, hugoOps);
+      }
+
+      switch(optype)
+      {
+      case 0:
+      {
+        /* Refresh with no data present */
+        g_err << "  Do nothing" << endl;
+        expectedError = 0; /* Single refresh should always be fine */
+        expectedEvents.push_back(Delete);
+        break;
+      }
+      case 1:
+      {
+        /* [Refresh] Insert [Refresh] */
+        g_err << "  Insert" << endl;
+        check(hugoOps.pkInsertRecord(ndb, 0, records, 1) == 0, hugoOps);
+        if (scenario == 0)
+        {
+          /* Tuple already existed error when we insert after refresh */
+          expectedError = 630;
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedError = 0;
+          expectedEvents.push_back(Insert);
+        }
+        /* Tuple already existed error when we insert after refresh */
+        break;
+      }
+      case 2:
+      {
+        /* Refresh */
+        g_err << "  Refresh" << endl;
+        if (scenario == 0)
+        {
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedEvents.push_back(Insert);
+        }
+        expectedError = 0;
+        break;
+      }
+      case 3:
+      {
+        /* [Refresh] Update [Refresh] */
+        g_err << "  Update" << endl;
+        check(hugoOps.pkUpdateRecord(ndb, 0, records, 3) == 0, hugoOps);
+        if (scenario == 0)
+        {
+          expectedError = 920;
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedError = 0;
+          expectedEvents.push_back(Insert);
+        }
+        break;
+      }
+      case 4:
+      {
+        /* [Refresh] Delete [Refresh] */
+        g_err << "  [Refresh] Delete [Refresh]" << endl;
+        if (scenario == 0)
+        {
+          expectedError = 920;
+          expectedEvents.push_back(Delete);
+        }
+        else
+        {
+          expectedError = 0;
+          expectedEvents.push_back(Delete);
+        }
+        check(hugoOps.pkDeleteRecord(ndb, 0, records) == 0, hugoOps);
+        break;
+      }
+      case 5:
+      {
+        g_err << "  Refresh" << endl;
+        expectedError = 0;
+        expectedEvents.push_back(Delete);
+        /* Refresh with no data present */
+        break;
+      }
+      case 6:
+      {
+        g_err << "  Double refresh" << endl;
+        int anyValue =
+          ((2) << 8) |
+          optype;
+        check(hugoOps.pkRefreshRecord(ndb, 0, records, anyValue) == 0, hugoOps);
+        expectedError = 920; /* Row operation defined after refreshTuple() */
+        expectedEvents.push_back(Delete);
+      }
+      default:
+        done = true;
+        break;
+      }
+
+      if (scenario == 1)
+      {
+        g_err << "Refresh after operations" << endl;
+        int anyValue =
+          ((4) << 8) |
+          optype;
+        check(hugoOps.pkRefreshRecord(ndb, 0, records, anyValue) == 0, hugoOps);
+      }
+
+      int rc = hugoOps.execute_Commit(ndb, AO_IgnoreError);
+      check(rc == expectedError, hugoOps);
+
+      check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+
+      optype++;
+
+
+      /* Now check fragment counts vs findable row counts */
+      if (runVerifyRowCount(ctx, step) != NDBT_OK)
+        return NDBT_FAILED;
+
+    } while (!done);
+  } // for scenario...
+
+  /* Now check fragment counts vs findable row counts */
+  if (runVerifyRowCount(ctx, step) != NDBT_OK)
+    return NDBT_FAILED;
+
+  /* Now let's dump and check the events */
+  g_err << "Expecting the following sequence..." << endl;
+  for (Uint32 i=0; i < expectedEvents.size(); i++)
+  {
+    g_err << i << ".  ";
+    switch(expectedEvents[i])
+    {
+    case Insert:
+      g_err << "Insert" << endl;
+      break;
+    case Update:
+      g_err << "Update" << endl;
+      break;
+    case Delete:
+      g_err << "Delete" << endl;
+      break;
+    default:
+      abort();
+    }
+  }
+
+  Vector<EventInfo> receivedEvents;
+
+  int rc = collectEvents(ndb, calc, tab, receivedEvents, idCol, updateCol,
+                         &eventBeforeRecAttr,
+                         &eventAfterRecAttr);
+  if (rc == NDBT_OK)
+  {
+    rc = verifyEvents(receivedEvents,
+                      expectedEvents,
+                      records);
+  }
+
+  if (ndb->dropEventOperation(pOp) != 0)
+  {
+    g_err << "Drop Event Operation failed : " << ndb->getNdbError() << endl;
+    return NDBT_FAILED;
+  }
+
+  return rc;
+};
+
+enum PreRefreshOps
+{
+  PR_NONE,
+  PR_INSERT,
+  PR_INSERTDELETE,
+  PR_DELETE
+};
+
+struct RefreshScenario
+{
+  const char*   name;
+  bool          preExist;
+  PreRefreshOps preRefreshOps;
+};
+
+static RefreshScenario refreshTests[] = {
+  { "No row, No pre-ops",        false, PR_NONE         },
+  { "No row, Insert pre-op",     false, PR_INSERT       },
+  { "No row, Insert-Del pre-op", false, PR_INSERTDELETE },
+  { "Row exists, No pre-ops",    true,  PR_NONE         },
+  { "Row exists, Delete pre-op", true,  PR_DELETE       }
+};
+
+enum OpTypes
+{
+  READ_C,
+  READ_S,
+  READ_E,
+  INSERT,
+  UPDATE,
+  WRITE,
+  DELETE,
+  LAST
+};
+
+const char* opTypeNames[] =
+{
+  "READ_C",
+  "READ_S",
+  "READ_E",
+  "INSERT",
+  "UPDATE",
+  "WRITE",
+  "DELETE"
+};
+
+
+int
+runRefreshLocking(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Check that refresh in various situations has the
+   * locks we expect it to
+   * Scenario combinations :
+   *   Now row pre-existing | Row pre-existing
+   *   Trans1 : Refresh | Insert-Refresh | Insert-Delete-Refresh
+   *            Delete-Refresh
+   *   Trans2 : Read [Committed|Shared|Exclusive] | Insert | Update
+   *            Write | Delete
+   *
+   * Expectations : Read committed  always non-blocking
+   *                Read committed sees pre-existing row
+   *                All other trans2 operations deadlock
+   */
+
+  Ndb* ndb = GETNDB(step);
+  Uint32 numScenarios = sizeof(refreshTests) / sizeof(refreshTests[0]);
+  HugoTransactions hugoTrans(*ctx->getTab());
+
+  for (Uint32 s = 0; s < numScenarios; s++)
+  {
+    RefreshScenario& scenario = refreshTests[s];
+
+    if (scenario.preExist)
+    {
+      /* Create pre-existing tuple */
+      if (hugoTrans.loadTable(ndb, 1) != 0)
+      {
+        g_err << "Pre-exist failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+    }
+
+    if (hugoTrans.startTransaction(ndb) != 0)
+    {
+      g_err << "Start trans failed : " << hugoTrans.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    g_err << "Scenario : " << scenario.name << endl;
+
+    /* Do pre-refresh ops */
+    switch (scenario.preRefreshOps)
+    {
+    case PR_NONE:
+      break;
+    case PR_INSERT:
+    case PR_INSERTDELETE:
+      if (hugoTrans.pkInsertRecord(ndb, 0) != 0)
+      {
+        g_err << "Pre insert failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+
+      if (scenario.preRefreshOps == PR_INSERT)
+        break;
+    case PR_DELETE:
+      if (hugoTrans.pkDeleteRecord(ndb, 0) != 0)
+      {
+        g_err << "Pre delete failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+      break;
+    }
+
+    /* Then refresh */
+    if (hugoTrans.pkRefreshRecord(ndb, 0) != 0)
+    {
+      g_err << "Refresh failed : " << hugoTrans.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    /* Now execute */
+    if (hugoTrans.execute_NoCommit(ndb) != 0)
+    {
+      g_err << "Execute failed : " << hugoTrans.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    {
+      /* Now try ops from another transaction */
+      HugoOperations hugoOps(*ctx->getTab());
+      Uint32 ot = READ_C;
+
+      while (ot < LAST)
+      {
+        if (hugoOps.startTransaction(ndb) != 0)
+        {
+          g_err << "Start trans2 failed : " << hugoOps.getNdbError() << endl;
+          return NDBT_FAILED;
+        }
+
+        g_err << "Operation type : " << opTypeNames[ot] << endl;
+        int res = 0;
+        switch (ot)
+        {
+        case READ_C:
+          res = hugoOps.pkReadRecord(ndb,0,1,NdbOperation::LM_CommittedRead);
+          break;
+        case READ_S:
+          res = hugoOps.pkReadRecord(ndb,0,1,NdbOperation::LM_Read);
+          break;
+        case READ_E:
+          res = hugoOps.pkReadRecord(ndb,0,1,NdbOperation::LM_Exclusive);
+          break;
+        case INSERT:
+          res = hugoOps.pkInsertRecord(ndb, 0);
+          break;
+        case UPDATE:
+          res = hugoOps.pkUpdateRecord(ndb, 0);
+          break;
+        case WRITE:
+          res = hugoOps.pkWriteRecord(ndb, 0);
+          break;
+        case DELETE:
+          res = hugoOps.pkDeleteRecord(ndb, 0);
+          break;
+        case LAST:
+          abort();
+        }
+
+        hugoOps.execute_Commit(ndb);
+
+        if ((ot == READ_C) && (scenario.preExist))
+        {
+          if (hugoOps.getNdbError().code == 0)
+          {
+            g_err << "Read committed succeeded" << endl;
+          }
+          else
+          {
+            g_err << "UNEXPECTED : Read committed failed. " << hugoOps.getNdbError() << endl;
+            return NDBT_FAILED;
+          }
+        }
+        else
+        {
+          if (hugoOps.getNdbError().code == 0)
+          {
+            g_err << opTypeNames[ot] << " succeeded, should not have" << endl;
+            return NDBT_FAILED;
+          }
+        }
+
+        hugoOps.closeTransaction(ndb);
+
+        ot = ot + 1;
+      }
+
+    }
+
+    /* Close refresh transaction */
+    hugoTrans.closeTransaction(ndb);
+
+    if (scenario.preExist)
+    {
+      /* Cleanup pre-existing before next iteration */
+      if (hugoTrans.pkDelRecords(ndb, 0) != 0)
+      {
+        g_err << "Delete pre existing failed : " << hugoTrans.getNdbError() << endl;
+        return NDBT_FAILED;
+      }
+    }
+  }
+
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testBasic);
 TESTCASE("PkInsert", 
 	 "Verify that we can insert and delete from this table using PK"
@@ -2746,6 +3551,12 @@ TESTCASE("UnlockUpdateBatch",
   STEP(runPkRead);
   FINALIZER(runClearTable);
 }
+TESTCASE("RefreshTuple",
+         "Test refreshTuple() operation properties"){
+  INITIALIZER(initSubscription);
+  INITIALIZER(runRefreshTuple);
+  FINALIZER(removeSubscription);
+}
 TESTCASE("Bug54986", "")
 {
   INITIALIZER(runBug54986);
@@ -2773,6 +3584,11 @@ TESTCASE("899", "")
   STEP(runTest899);
   FINALIZER(runEnd899);
 }
+TESTCASE("RefreshLocking",
+         "Test Refresh locking properties")
+{
+  INITIALIZER(runRefreshLocking);
+}
 NDBT_TESTSUITE_END(testBasic);
 
 #if 0

=== modified file 'storage/ndb/test/ndbapi/testIndex.cpp'
--- a/storage/ndb/test/ndbapi/testIndex.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/test/ndbapi/testIndex.cpp	2011-05-25 13:19:02 +0000
@@ -1744,6 +1744,73 @@ runMixed2(NDBT_Context* ctx, NDBT_Step*
   return NDBT_FAILED;
 }
 
+#define check(b, e)                                                     \
+  if (!(b)) { g_err << "ERR: " << step->getName() << " failed on line " << __LINE__ << ": " << e.getNdbError() << endl; return NDBT_FAILED; }
+
+int runRefreshTupleAbort(NDBT_Context* ctx, NDBT_Step* step){
+  int records = ctx->getNumRecords();
+  int loops = ctx->getNumLoops();
+
+  Ndb* ndb = GETNDB(step);
+
+  const NdbDictionary::Table& tab = *ctx->getTab();
+
+  for (int i=0; i < tab.getNoOfColumns(); i++)
+  {
+    if (tab.getColumn(i)->getStorageType() == NDB_STORAGETYPE_DISK)
+    {
+      g_err << "Table has disk column(s) skipping." << endl;
+      return NDBT_OK;
+    }
+  }
+
+
+  g_err << "Loading table." << endl;
+  HugoTransactions hugoTrans(*ctx->getTab());
+  check(hugoTrans.loadTable(ndb, records) == 0, hugoTrans);
+
+  HugoOperations hugoOps(*ctx->getTab());
+
+  /* Check refresh, abort sequence with an ordered index
+   * Previously this gave bugs due to corruption of the
+   * tuple version
+   */
+  while (loops--)
+  {
+    Uint32 numRefresh = 2 + rand() % 10;
+
+    g_err << "Refresh, rollback * " << numRefresh << endl;
+
+    while (--numRefresh)
+    {
+      /* Refresh, rollback */
+      check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+      check(hugoOps.pkRefreshRecord(ndb, 0, records, 0) == 0, hugoOps);
+      check(hugoOps.execute_NoCommit(ndb) == 0, hugoOps);
+      check(hugoOps.execute_Rollback(ndb) == 0, hugoOps);
+      check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+    }
+
+    g_err << "Refresh, commit" << endl;
+    /* Refresh, commit */
+    check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+    check(hugoOps.pkRefreshRecord(ndb, 0, records, 0) == 0, hugoOps);
+    check(hugoOps.execute_NoCommit(ndb) == 0, hugoOps);
+    check(hugoOps.execute_Commit(ndb) == 0, hugoOps);
+    check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+
+    g_err << "Update, commit" << endl;
+    /* Update */
+    check(hugoOps.startTransaction(ndb) == 0, hugoOps);
+    check(hugoOps.pkUpdateRecord(ndb, 0, records, 2 + loops) == 0, hugoOps);
+    check(hugoOps.execute_NoCommit(ndb) == 0, hugoOps);
+    check(hugoOps.execute_Commit(ndb) == 0, hugoOps);
+    check(hugoOps.closeTransaction(ndb) == 0, hugoOps);
+  }
+
+  return NDBT_OK;
+}
+
 
 int
 runBuildDuring(NDBT_Context* ctx, NDBT_Step* step){
@@ -3619,6 +3686,16 @@ TESTCASE("Bug60851", "")
   INITIALIZER(runBug60851);
   FINALIZER(createPkIndex_Drop);
 }
+TESTCASE("RefreshWithOrderedIndex",
+         "Refresh tuples with ordered index(es)")
+{
+  TC_PROPERTY("OrderedIndex", 1);
+  TC_PROPERTY("LoggedIndexes", Uint32(0));
+  INITIALIZER(createPkIndex);
+  INITIALIZER(runRefreshTupleAbort);
+  FINALIZER(createPkIndex_Drop);
+  FINALIZER(runClearTable);
+}
 NDBT_TESTSUITE_END(testIndex);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/ndbapi/testRestartGci.cpp'
--- a/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-05-23 16:13:34 +0000
@@ -28,10 +28,12 @@
  */
 
 struct SavedRecord {
-  int m_gci;
+  Uint64 m_gci;
+  Uint32 m_author;
   BaseString m_str;
-  SavedRecord(int _gci, BaseString _str){ 
+  SavedRecord(Uint64 _gci, Uint32 _author, BaseString _str){
     m_gci = _gci; 
+    m_author = _author;
     m_str.assign(_str); 
   }
   SavedRecord(){
@@ -40,7 +42,7 @@ struct SavedRecord {
   };
 };
 Vector<SavedRecord> savedRecords;
-
+Uint64 highestExpectedGci;
 
 #define CHECK(b) if (!(b)) { \
   ndbout << "ERR: "<< step->getName() \
@@ -48,14 +50,80 @@ Vector<SavedRecord> savedRecords;
   result = NDBT_FAILED; \
   break; }
 
+static
+int
+maybeExtraBits(Ndb* ndb, NdbDictionary::Table& tab, int when, void* arg)
+{
+  switch(when){
+  case 0: // Before
+    break;
+  case 1: // After
+    return 0;
+  default:
+    return 0;
+  }
+
+  bool useExtendedBits = ((ndb_rand() % 5) != 0);
+  Uint32 numGciBits= ndb_rand() % 32;      /* 0 -> 31 */
+  Uint32 numAuthorBits = ndb_rand() % 32;  /* 0 -> 31 */
+
+  if (useExtendedBits && (numGciBits || numAuthorBits))
+  {
+    ndbout_c("Creating table %s with %u extra Gci and %u extra Author bits",
+             tab.getName(), numGciBits, numAuthorBits);
+    tab.setExtraRowGciBits(numGciBits);
+    tab.setExtraRowAuthorBits(numAuthorBits);
+  }
+  else
+  {
+    ndbout_c("Table has no extra bits");
+  }
+
+  return 0;
+}
+
+int runDropTable(NDBT_Context* ctx, NDBT_Step* step)
+{
+  GETNDB(step)->getDictionary()->dropTable(ctx->getTab()->getName());
+  return NDBT_OK;
+}
+
+int runCreateTable(NDBT_Context* ctx, NDBT_Step* step)
+{
+
+  runDropTable(ctx, step);
+
+  /* Use extra proc to control whether we have extra bits */
+  if (NDBT_Tables::createTable(GETNDB(step),
+                               ctx->getTab()->getName(),
+                               false, false,
+                               maybeExtraBits) == NDBT_OK)
+  {
+    ctx->setTab(GETNDB(step)->
+                getDictionary()->
+                getTable(ctx->getTab()->getName()));
+    return NDBT_OK;
+  }
+  return NDBT_FAILED;
+}
+
 int runInsertRememberGci(NDBT_Context* ctx, NDBT_Step* step){
   int result = NDBT_OK;
   int records = ctx->getNumRecords();
   HugoOperations hugoOps(*ctx->getTab());
+  HugoCalculator hugoCalc(*ctx->getTab());
   Ndb* pNdb = GETNDB(step);
   int i = 0;
 
-  while(ctx->isTestStopped() == false && i < records){
+  ndbout_c("Inserting %u records", records);
+  Uint64 minGci = 0xffffffffffffffff;
+  Uint64 maxGci = 0;
+  Uint32 numAuthorBits = ctx->getTab()->getExtraRowAuthorBits();
+  Uint32 authorMask = (1 << numAuthorBits) -1;
+  ndbout_c("numAuthor bits is %u, mask is %x",
+           numAuthorBits, authorMask);
+
+  while(i < records){
     // Insert record and read it in same transaction
     CHECK(hugoOps.startTransaction(pNdb) == 0);
     CHECK(hugoOps.pkInsertRecord(pNdb, i) == 0);
@@ -64,14 +132,50 @@ int runInsertRememberGci(NDBT_Context* c
       result = NDBT_FAILED;
       break;
     }
+    /* Set the author column (if present) */
+    Uint32 authorVal = 0;
+    if (ctx->getTab()->getExtraRowAuthorBits() > 0)
+    {
+      authorVal = (ndb_rand() & authorMask);
+      /* Pain here due to need to use NdbRecord */
+      char rowBuff[NDB_MAX_TUPLE_SIZE];
+      const NdbDictionary::Table* tab = ctx->getTab();
+      CHECK(hugoCalc.setValues((Uint8*) rowBuff, tab->getDefaultRecord(),
+                               i, 0) == 0);
+      NdbOperation::SetValueSpec setValueSpec;
+      setValueSpec.column = NdbDictionary::Column::ROW_AUTHOR;
+      setValueSpec.value = &authorVal;
+      NdbOperation::OperationOptions opts;
+      opts.optionsPresent= NdbOperation::OperationOptions::OO_SETVALUE;
+      opts.extraSetValues= &setValueSpec;
+      opts.numExtraSetValues = 1;
+
+      const NdbOperation* update = hugoOps.getTransaction()->
+        updateTuple(tab->getDefaultRecord(), rowBuff,
+                    tab->getDefaultRecord(), rowBuff,
+                    NULL, /* mask */
+                    &opts,
+                    sizeof(opts));
+      CHECK(update != NULL);
+    }
+    /* Read row back */
     CHECK(hugoOps.pkReadRecord(pNdb, i) == 0);
     if (hugoOps.execute_Commit(pNdb) != 0){
       ndbout << "Did not find record in DB " << i << endl;
       result = NDBT_FAILED;
       break;
     }
-    savedRecords.push_back(SavedRecord(hugoOps.getRecordGci(0),
-				     hugoOps.getRecordStr(0)));
+    Uint64 gci;
+    CHECK(hugoOps.getTransaction()->getGCI(&gci) == 0);
+
+    if (gci < minGci)
+      minGci = gci;
+    if (gci > maxGci)
+      maxGci = gci;
+
+    savedRecords.push_back(SavedRecord(gci,
+                                       authorVal,
+                                       hugoOps.getRecordStr(0)));
 
     CHECK(hugoOps.closeTransaction(pNdb) == 0);
     i++;
@@ -79,13 +183,21 @@ int runInsertRememberGci(NDBT_Context* c
     NdbSleep_MilliSleep(10);
   };
 
+  ndbout_c("  Inserted records from gci %x/%x to gci %x/%x",
+           (Uint32) (minGci >> 32), (Uint32) (minGci & 0xffffffff),
+           (Uint32) (maxGci >> 32), (Uint32) (maxGci & 0xffffffff));
+
+  highestExpectedGci = maxGci;
+
   return result;
 }
 
-int runRestart(NDBT_Context* ctx, NDBT_Step* step){
+int runRestartAll(NDBT_Context* ctx, NDBT_Step* step){
   Ndb* pNdb = GETNDB(step);
   NdbRestarter restarter;
 
+  ndbout_c("Restart of all nodes");
+
   // Restart cluster with abort
   if (restarter.restartAll(false, false, true) != 0){
     ctx->stopTest();
@@ -103,6 +215,42 @@ int runRestart(NDBT_Context* ctx, NDBT_S
   return NDBT_OK;
 }
 
+int runRestartOneInitial(NDBT_Context* ctx, NDBT_Step* step){
+  Ndb* pNdb = GETNDB(step);
+  NdbRestarter restarter;
+
+  if (restarter.getNumDbNodes() < 2)
+    return NDBT_OK;
+
+  /* We don't restart the Master as we need to know a
+   * non-restarted node to reliably get the restartGci
+   * afterwards!
+   * Should be no real reason not to restart the master.
+   */
+  int node = restarter.getRandomNotMasterNodeId(rand());
+  ndbout_c("Restarting node %u initial", node);
+
+  if (restarter.restartOneDbNode(node,
+                                 true,  /* Initial */
+                                 false, /* Nostart */
+                                 true)  /* Abort */
+      != 0)
+  {
+    ctx->stopTest();
+    return NDBT_FAILED;
+  }
+
+  if (restarter.waitClusterStarted(300) != 0){
+    return NDBT_FAILED;
+  }
+
+  if (pNdb->waitUntilReady() != 0){
+    return NDBT_FAILED;
+  }
+
+  return NDBT_OK;
+}
+
 int runRestartGciControl(NDBT_Context* ctx, NDBT_Step* step){
   int records = ctx->getNumRecords();
   Ndb* pNdb = GETNDB(step);
@@ -118,10 +266,38 @@ int runRestartGciControl(NDBT_Context* c
     NdbSleep_MilliSleep(10);
   }
 
-  // Stop the other thread
-  ctx->stopTest();
+  return runRestartAll(ctx,step);
+}
+
+int runDetermineRestartGci(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* pNdb = GETNDB(step);
+  Uint32 restartGci;
+  int res = pNdb->getDictionary()->getRestartGCI(&restartGci);
+  if (res != 0)
+  {
+    ndbout << "Failed to retrieve restart gci" << endl;
+    ndbout << pNdb->getDictionary()->getNdbError() << endl;
+    return NDBT_FAILED;
+  }
+
+  ndbout_c("Restart GCI is %u (0x%x)",
+           restartGci, restartGci);
 
-  return runRestart(ctx,step);
+  ndbout_c("Highest expected GCI was %x/%x",
+           (Uint32) (highestExpectedGci >> 32),
+           (Uint32) (highestExpectedGci & 0xffffffff));
+
+  highestExpectedGci = ((Uint64) restartGci) << 32 | 0xffffffff;
+  ndbout_c("Resetting Highest expected GCI to align with restart Gci (%x/%x)",
+           (Uint32) (highestExpectedGci >> 32),
+           (Uint32) (highestExpectedGci & 0xffffffff));
+  return NDBT_OK;
+}
+
+int runRequireExact(NDBT_Context* ctx, NDBT_Step* step){
+  ctx->incProperty("ExactGCI");
+  return NDBT_OK;
 }
 
 int runVerifyInserts(NDBT_Context* ctx, NDBT_Step* step){
@@ -130,17 +306,9 @@ int runVerifyInserts(NDBT_Context* ctx,
   UtilTransactions utilTrans(*ctx->getTab());
   HugoOperations hugoOps(*ctx->getTab());
   NdbRestarter restarter;
+  Uint32 extraGciBits = ctx->getTab()->getExtraRowGciBits();
+  Uint32 firstSaturatedValue = (1 << extraGciBits) -1;
 
-  Uint32 restartGCI;
-  int res = pNdb->getDictionary()->getRestartGCI(&restartGCI);
-  if (res != 0)
-  {
-    ndbout << "Failed to retreive restart gci" << endl;
-    ndbout << pNdb->getDictionary()->getNdbError() << endl;
-    return NDBT_FAILED;
-  }
-
-  ndbout << "restartGCI = " << restartGCI << endl;
   int count = 0;
   if (utilTrans.selectCount(pNdb, 64, &count) != 0){
     return NDBT_FAILED;
@@ -151,7 +319,7 @@ int runVerifyInserts(NDBT_Context* ctx,
   int recordsWithLowerOrSameGci = 0;
   unsigned i; 
   for (i = 0; i < savedRecords.size(); i++){
-    if (savedRecords[i].m_gci <= (int)restartGCI)
+    if (savedRecords[i].m_gci <= highestExpectedGci)
       recordsWithLowerOrSameGci++;
   }
   if (recordsWithLowerOrSameGci != count){
@@ -159,10 +327,13 @@ int runVerifyInserts(NDBT_Context* ctx,
     result = NDBT_FAILED;
   }
 
+  bool exactGCIonly = ctx->getProperty("ExactGCI", (unsigned) 0);
 
   // RULE2: The records found in db should have same or lower 
   // gci as in the vector
   int recordsWithIncorrectGci = 0;
+  int recordsWithRoundedGci = 0;
+  int recordsWithIncorrectAuthor = 0;
   for (i = 0; i < savedRecords.size(); i++){
     CHECK(hugoOps.startTransaction(pNdb) == 0);
     /* First read of row to check contents */
@@ -173,13 +344,15 @@ int runVerifyInserts(NDBT_Context* ctx,
     CHECK(readOp != NULL);
     CHECK(readOp->readTuple() == 0);
     CHECK(hugoOps.equalForRow(readOp, i) == 0);
-    NdbRecAttr* rowGci = readOp->getValue(NdbDictionary::Column::ROW_GCI);
+    NdbRecAttr* rowGci = readOp->getValue(NdbDictionary::Column::ROW_GCI64);
+    NdbRecAttr* rowAuthor = readOp->getValue(NdbDictionary::Column::ROW_AUTHOR);
     CHECK(rowGci != NULL);
+    CHECK(rowAuthor != NULL);
     if (hugoOps.execute_Commit(pNdb) != 0){
       // Record was not found in db'
 
       // Check record gci
-      if (savedRecords[i].m_gci <= (int)restartGCI){
+      if (savedRecords[i].m_gci <= highestExpectedGci) {
 	ndbout << "ERR: Record "<<i<<" should have existed" << endl;
 	result = NDBT_FAILED;
       }
@@ -189,7 +362,7 @@ int runVerifyInserts(NDBT_Context* ctx,
          * Let's disappear it, so that it doesn't cause confusion
          * after further restarts.
          */
-        savedRecords[i].m_gci = (Uint32(1) << 31) -1; // Big number
+        savedRecords[i].m_gci = (Uint64(1) << 63) -1; // Big number
       }
     } else {
       // Record was found in db
@@ -200,16 +373,50 @@ int runVerifyInserts(NDBT_Context* ctx,
 	result = NDBT_FAILED;
       }
       // Check record gci in range
-      if (savedRecords[i].m_gci > (int)restartGCI){
+      Uint64 expectedRecordGci = savedRecords[i].m_gci;
+      if (expectedRecordGci > highestExpectedGci){
 	ndbout << "ERR: Record "<<i<<" should not have existed" << endl;
 	result = NDBT_FAILED;
       }
+      bool expectRounding = (expectedRecordGci & 0xffffffff) >= firstSaturatedValue;
+      Uint64 expectedRoundedGci = (expectedRecordGci | 0xffffffff);
+      Uint64 readGci = rowGci->u_64_value();
+      Uint64 expectedRead = (expectRounding)?expectedRoundedGci :
+        expectedRecordGci;
       // Check record gci is exactly correct
-      if (savedRecords[i].m_gci != rowGci->int32_value()){
-        ndbout << "ERR: Record "<<i<<" should have GCI " <<
-          savedRecords[i].m_gci << ", but has " << 
-          rowGci->int32_value() << endl;
-        recordsWithIncorrectGci++;
+      if (expectedRead != readGci){
+        if ((!exactGCIonly) &&
+            (expectedRoundedGci == readGci))
+        {
+          /* Record rounded, though bits can be represented
+           * presumably due to Redo gci truncation
+           */
+          recordsWithRoundedGci++;
+        }
+        else
+        {
+          ndbout_c("ERR: Record %u should have GCI %x/%x, but has "
+                   "%x/%x.",
+                   i,
+                   (Uint32) (expectedRead >> 32),
+                   (Uint32) (expectedRead & 0xffffffff),
+                   (Uint32) (readGci >> 32),
+                   (Uint32) (readGci & 0xffffffff));
+          recordsWithIncorrectGci++;
+          result = NDBT_FAILED;
+        }
+      }
+
+      // Check author value is correct.
+      Uint32 expectedAuthor = savedRecords[i].m_author;
+
+      if (rowAuthor->u_32_value() != expectedAuthor)
+      {
+        ndbout_c("ERR: Record %u should have Author %d, but has %d.",
+                 i,
+                 expectedAuthor,
+                 rowAuthor->u_32_value());
+        recordsWithIncorrectAuthor++;
         result = NDBT_FAILED;
       }
     }
@@ -222,17 +429,26 @@ int runVerifyInserts(NDBT_Context* ctx,
   ndbout << "There are " << savedRecords.size() 
 	 << " records in vector" << endl;
 
-  ndbout << "There are " << recordsWithLowerOrSameGci 
-	 << " records with lower or same gci than " << restartGCI <<  endl;
+  ndbout_c("There are %u records with lower or same gci than %x/%x",
+           recordsWithLowerOrSameGci,
+           (Uint32)(highestExpectedGci >> 32),
+           (Uint32)(highestExpectedGci & 0xffffffff));
   
+  ndbout_c("There are %u records with rounded Gcis.  Exact GCI flag is %u",
+           recordsWithRoundedGci, exactGCIonly);
+
   ndbout << "There are " << recordsWithIncorrectGci
          << " records with incorrect Gci on recovery." << endl;
 
+  ndbout << "There are " << recordsWithIncorrectAuthor
+         << " records with incorrect Author on recovery." << endl;
+
   return result;
 }
 
 int runClearGlobals(NDBT_Context* ctx, NDBT_Step* step){
   savedRecords.clear();
+  highestExpectedGci = 0;
   return NDBT_OK;
 }
 
@@ -247,27 +463,276 @@ int runClearTable(NDBT_Context* ctx, NDB
 }
 
 
+int runLoadTable(NDBT_Context* ctx, NDBT_Step* step)
+{
+  int records = ctx->getNumRecords();
+  HugoTransactions hugoTrans(*ctx->getTab());
+  if (hugoTrans.loadTable(GETNDB(step), records, 512, false, 0, true) != 0){
+    return NDBT_FAILED;
+  }
+  return NDBT_OK;
+}
+
+int runNodeInitialRestarts(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter restarter;
+  const Uint32 numRestarts = 4;
+  for (Uint32 nr = 0; nr < numRestarts; nr++)
+  {
+    if (ctx->isTestStopped())
+    {
+      return NDBT_OK;
+    }
+    int nodeId = restarter.getNode(NdbRestarter::NS_RANDOM);
+    ndbout_c("Restarting node %u", nodeId);
+
+    if (restarter.restartOneDbNode(nodeId, NdbRestarter::NRRF_INITIAL) != 0)
+    {
+      ndbout_c("Error restarting node");
+      ctx->stopTest();
+      return NDBT_FAILED;
+    }
+
+    if (restarter.waitClusterStarted(300) != 0)
+    {
+      ctx->stopTest();
+      return NDBT_FAILED;
+    }
+
+    if (GETNDB(step)->waitUntilReady() != 0)
+    {
+      ctx->stopTest();
+      return NDBT_FAILED;
+    }
+  }
+
+  ctx->stopTest();
+
+  return NDBT_OK;
+}
+
+int runUpdateVerifyGCI(NDBT_Context* ctx, NDBT_Step* step)
+{
+  HugoOperations hugoOps(*ctx->getTab());
+  HugoCalculator hugoCalc(*ctx->getTab());
+  Ndb* pNdb = GETNDB(step);
+
+  /* Loop, updating the first record in the table, and checking
+   * that it has the GCI it should
+   */
+  Uint64 loopCount = 0;
+  Uint64 distinctCount = 0;
+  Uint64 expectedGCI = 0;
+  Uint64 lastGoodReadGCI = 0;
+  Uint32 extraGciBits = ctx->getTab()->getExtraRowGciBits();
+  Uint32 firstSaturatedValue = (1 << extraGciBits) -1;
+  ndbout_c("Extra GCI bits : %u, firstSaturatedValue : %u",
+           extraGciBits,
+           firstSaturatedValue);
+  int result = NDBT_OK;
+  while (!ctx->isTestStopped())
+  {
+    CHECK(hugoOps.startTransaction(pNdb) == 0);
+    /* Define a read op to get the 'existing' GCI */
+    NdbTransaction* trans = hugoOps.getTransaction();
+    CHECK(hugoOps.pkReadRecord(pNdb,
+                               0,
+                               1) == 0);
+    NdbOperation* readOp = trans->getNdbOperation(ctx->getTab());
+    CHECK(readOp != NULL);
+    CHECK(readOp->readTuple() == 0);
+    CHECK(hugoOps.equalForRow(readOp, 0) == 0);
+    NdbRecAttr* rowGci = readOp->getValue(NdbDictionary::Column::ROW_GCI64);
+    CHECK(rowGci != NULL);
+
+    /* Define an update op to set the next GCI */
+    CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, loopCount+1) == 0);
+
+    if (hugoOps.execute_Commit(pNdb) != 0)
+    {
+      if (hugoOps.getNdbError().classification ==
+          NdbError::NodeRecoveryError)
+      {
+        hugoOps.closeTransaction(pNdb);
+        ndbout_c("Temporary error at loopCount %llu", loopCount);
+        continue;
+      }
+
+      ndbout << "Error executing : " << hugoOps.getNdbError() << endl;
+      return NDBT_FAILED;
+    }
+
+    /* First check the data is as expected */
+    CHECK(hugoCalc.verifyRowValues(&hugoOps.get_row(0)) == 0);
+    CHECK((Uint64)hugoCalc.getUpdatesValue(&hugoOps.get_row(0)) == loopCount);
+    //ndbout_c("Updates value is %u", hugoCalc.getUpdatesValue(&hugoOps.get_row(0)));
+
+    Uint64 committedGCI;
+    CHECK(trans->getGCI(&committedGCI) == 0);
+    Uint32 gci_lo = Uint32(committedGCI & 0xffffffff);
+
+    Uint64 saturatedCommittedGCI = (gci_lo >= firstSaturatedValue) ?
+      committedGCI | 0xffffffff : committedGCI;
+    Uint64 rowGCI64 = rowGci->u_64_value();
+
+//    ndbout_c("Read row GCI64 %x/%x.  Committed GCI64 : %x/%x.  Saturated GCI64 :%x/%x Last good read : %x/%x",
+//             Uint32(rowGCI64 >> 32),
+//             Uint32(rowGCI64 & 0xffffffff),
+//             Uint32(committedGCI >> 32),
+//             Uint32(committedGCI & 0xffffffff),
+//             Uint32(saturatedCommittedGCI >> 32),
+//             Uint32(saturatedCommittedGCI & 0xffffffff),
+//             Uint32(lastGoodReadGCI >> 32),
+//             Uint32(lastGoodReadGCI & 0xffffffff));
+
+
+    if (rowGCI64 < lastGoodReadGCI)
+    {
+      ndbout_c("ERROR : Read row GCI value (%x/%x) lower than previous value (%x/%x)",
+               (Uint32) (rowGCI64 >> 32),
+               (Uint32) (rowGCI64 & 0xffffffff),
+               Uint32(lastGoodReadGCI >> 32),
+               Uint32(lastGoodReadGCI & 0xffffffff));
+    }
+    /* We certainly should not read a committed GCI value that's
+     * bigger than the read's commit-point GCI
+     */
+    if (saturatedCommittedGCI < rowGCI64)
+    {
+      ndbout_c("ERROR : Saturated committed GCI (%x/%x) lower than actual read GCI (%x/%x)",
+               Uint32(saturatedCommittedGCI >>32),
+               Uint32(saturatedCommittedGCI & 0xffffffff),
+               (Uint32) (rowGCI64 >> 32),
+               (Uint32) (rowGCI64 & 0xffffffff));
+    }
+    /* If we've read a committed GCI then we should certainly not
+     * be committing at lower values
+     */
+    if (saturatedCommittedGCI < lastGoodReadGCI)
+    {
+      ndbout_c("ERROR : Saturated committed GCI (%x/%x) lower than a previously"
+               "read GCI (%x/%x)",
+               Uint32(saturatedCommittedGCI >>32),
+               Uint32(saturatedCommittedGCI & 0xffffffff),
+               Uint32(lastGoodReadGCI >> 32),
+               Uint32(lastGoodReadGCI & 0xffffffff));
+    };
+    /* If we've previously had a particular committed GCI then we
+     * should certainly not now have a lower committed GCI
+     */
+    if (saturatedCommittedGCI < expectedGCI)
+    {
+      ndbout_c("ERROR : Saturated committed GCI (%x/%x) lower than expected GCI"
+               " (%x/%x)",
+               Uint32(saturatedCommittedGCI >>32),
+               Uint32(saturatedCommittedGCI & 0xffffffff),
+               Uint32(expectedGCI >> 32),
+               Uint32(expectedGCI & 0xffffffff));
+    }
+
+    if (loopCount > 0)
+    {
+      if (rowGCI64 != expectedGCI)
+      {
+        ndbout_c("MISMATCH : Expected GCI of %x/%x, but found %x/%x",
+                 (Uint32) (expectedGCI >> 32),
+                 (Uint32) (expectedGCI & 0xffffffff),
+                 (Uint32) (rowGCI64 >> 32),
+                 (Uint32) (rowGCI64 & 0xffffffff));
+        ndbout_c("At loopcount %llu", loopCount);
+        ndbout_c("Last good read GCI %x/%x",
+                 Uint32(lastGoodReadGCI >> 32),
+                 Uint32(lastGoodReadGCI & 0xffffffff));
+        ndbout_c("Read committed GCI : %x/%x",
+                 Uint32(saturatedCommittedGCI >>32),
+                 Uint32(saturatedCommittedGCI & 0xffffffff));
+        ndbout_c("Transaction coordinator node : %u",
+                 trans->getConnectedNodeId());
+        return NDBT_FAILED;
+      }
+
+      if (saturatedCommittedGCI != expectedGCI)
+      {
+        distinctCount++;
+      }
+    }
+
+    expectedGCI = saturatedCommittedGCI;
+    lastGoodReadGCI = rowGCI64;
+
+    hugoOps.closeTransaction(pNdb);
+    loopCount++;
+
+    /* Sleep to avoid excessive updating */
+    NdbSleep_MilliSleep(10);
+  }
+
+  ndbout_c("%llu updates with %llu distinct GCI values",
+           loopCount,
+           distinctCount);
+
+  return result;
+}
+
 NDBT_TESTSUITE(testRestartGci);
 TESTCASE("InsertRestartGci", 
 	 "Verify that only expected records are still in NDB\n"
 	 "after a restart" ){
-  INITIALIZER(runClearTable);
+  INITIALIZER(runCreateTable);
   INITIALIZER(runClearGlobals);
-  STEP(runInsertRememberGci);
-  STEP(runRestartGciControl);
+  INITIALIZER(runInsertRememberGci);
+  INITIALIZER(runRestartGciControl);
+  INITIALIZER(runDetermineRestartGci);
+  TC_PROPERTY("ExactGCI", Uint32(0)); /* Recovery from Redo == inexact low word */
   VERIFIER(runVerifyInserts);
   /* Restart again - LCP after first restart will mean that this
    * time we recover from LCP, not Redo
    */
-  VERIFIER(runRestart);
+  VERIFIER(runRestartAll);
+  VERIFIER(runDetermineRestartGci);
+  VERIFIER(runVerifyInserts);  // Check GCIs again
+  /* Restart again - one node, initial.  This will check
+   * COPYFRAG behaviour
+   */
+  VERIFIER(runRestartOneInitial);
   VERIFIER(runVerifyInserts);  // Check GCIs again
+  VERIFIER(runClearTable);
+  /* Re-fill table with records, will just be in Redo
+   * Then restart, testing COPYFRAG behaviour with
+   * non #ffff... low word
+   */
+  VERIFIER(runClearGlobals);
+  VERIFIER(runInsertRememberGci);
+  VERIFIER(runRestartOneInitial);
+  /* Require exact GCI match from here - no Redo messing it up */
+  VERIFIER(runRequireExact);
+  VERIFIER(runVerifyInserts);
+  /* Now-restart all nodes - all inserts should be
+   * in LCP, and should be restored correctly
+   */
+  VERIFIER(runRestartAll);
+  VERIFIER(runDetermineRestartGci);
+  VERIFIER(runVerifyInserts);
+  FINALIZER(runClearTable);
+  FINALIZER(runDropTable);
+}
+TESTCASE("InitialNodeRestartUpdate",
+         "Check that initial node restart (copyfrag) does "
+         "not affect GCI recording")
+{
+  INITIALIZER(runCreateTable);
+  INITIALIZER(runLoadTable);
+  STEP(runNodeInitialRestarts);
+  STEP(runUpdateVerifyGCI);
   FINALIZER(runClearTable);
+  FINALIZER(runDropTable);
 }
 NDBT_TESTSUITE_END(testRestartGci);
 
 int main(int argc, const char** argv){
   ndb_init();
   NDBT_TESTSUITE_INSTANCE(testRestartGci);
+  testRestartGci.setCreateTable(false);
   return testRestartGci.execute(argc, argv);
 }
 

=== modified file 'storage/ndb/test/run-test/conf-blade08.cnf'
--- a/storage/ndb/test/run-test/conf-blade08.cnf	2011-04-20 09:18:09 +0000
+++ b/storage/ndb/test/run-test/conf-blade08.cnf	2011-05-19 17:47:28 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 NoOfFragmentLogFiles = 4
 FragmentLogFileSize = 64M

=== modified file 'storage/ndb/test/run-test/conf-dl145a.cnf'
--- a/storage/ndb/test/run-test/conf-dl145a.cnf	2011-02-19 10:31:42 +0000
+++ b/storage/ndb/test/run-test/conf-dl145a.cnf	2011-05-19 18:19:47 +0000
@@ -20,7 +20,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 NoOfFragmentLogFiles = 4
 FragmentLogFileSize = 64M

=== modified file 'storage/ndb/test/run-test/conf-fimafeng08.cnf'
--- a/storage/ndb/test/run-test/conf-fimafeng08.cnf	2010-02-02 13:44:41 +0000
+++ b/storage/ndb/test/run-test/conf-fimafeng08.cnf	2011-05-19 18:19:47 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 
 RedoBuffer = 32M

=== modified file 'storage/ndb/test/run-test/conf-fimafeng09.cnf' (properties changed: +x to -x)
--- a/storage/ndb/test/run-test/conf-fimafeng09.cnf	2010-02-02 13:44:41 +0000
+++ b/storage/ndb/test/run-test/conf-fimafeng09.cnf	2011-05-19 18:19:47 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 RedoBuffer = 32M
 

=== modified file 'storage/ndb/test/run-test/conf-loki27.cnf' (properties changed: +x to -x)
--- a/storage/ndb/test/run-test/conf-loki27.cnf	2010-06-15 15:02:16 +0000
+++ b/storage/ndb/test/run-test/conf-loki27.cnf	2011-05-19 18:19:47 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 NoOfFragmentLogFiles = 4
 FragmentLogFileSize = 64M

=== modified file 'storage/ndb/test/run-test/conf-ndb07.cnf'
--- a/storage/ndb/test/run-test/conf-ndb07.cnf	2011-02-19 10:31:42 +0000
+++ b/storage/ndb/test/run-test/conf-ndb07.cnf	2011-05-19 18:19:47 +0000
@@ -27,7 +27,7 @@ IndexMemory = 100M
 DataMemory = 500M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 NoOfFragmentLogFiles = 8
 FragmentLogFileSize = 64M
 ODirect=1

=== modified file 'storage/ndb/test/run-test/conf-ndbmaster.cnf'
--- a/storage/ndb/test/run-test/conf-ndbmaster.cnf	2009-02-17 09:26:44 +0000
+++ b/storage/ndb/test/run-test/conf-ndbmaster.cnf	2011-05-19 18:19:47 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 
 SharedGlobalMemory=256M

=== modified file 'storage/ndb/test/run-test/conf-repl.cnf'
--- a/storage/ndb/test/run-test/conf-repl.cnf	2007-02-13 01:38:54 +0000
+++ b/storage/ndb/test/run-test/conf-repl.cnf	2011-05-19 17:47:28 +0000
@@ -11,7 +11,7 @@ skip-innodb
 skip-bdb
 
 [cluster_config]
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 DataMemory = 100M
 
 [cluster_config.master]

=== modified file 'storage/ndb/test/run-test/conf-techra29.cnf' (properties changed: +x to -x)
--- a/storage/ndb/test/run-test/conf-techra29.cnf	2010-02-02 13:44:41 +0000
+++ b/storage/ndb/test/run-test/conf-techra29.cnf	2011-05-19 18:19:47 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 RedoBuffer = 32M
 

=== modified file 'storage/ndb/test/run-test/conf-test.cnf'
--- a/storage/ndb/test/run-test/conf-test.cnf	2007-11-15 07:57:00 +0000
+++ b/storage/ndb/test/run-test/conf-test.cnf	2011-05-19 17:47:28 +0000
@@ -19,7 +19,7 @@ IndexMemory = 25M
 DataMemory = 100M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 NoOfFragmentLogFiles = 4
 FragmentLogFileSize = 64M

=== modified file 'storage/ndb/test/run-test/conf-tyr64.cnf' (properties changed: +x to -x)
--- a/storage/ndb/test/run-test/conf-tyr64.cnf	2010-02-02 13:44:41 +0000
+++ b/storage/ndb/test/run-test/conf-tyr64.cnf	2011-05-19 18:19:47 +0000
@@ -19,7 +19,7 @@ IndexMemory = 100M
 DataMemory = 300M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 
 RedoBuffer = 32M

=== modified file 'storage/ndb/test/run-test/conf-upgrade.cnf'
--- a/storage/ndb/test/run-test/conf-upgrade.cnf	2009-06-23 18:40:35 +0000
+++ b/storage/ndb/test/run-test/conf-upgrade.cnf	2011-05-19 17:47:28 +0000
@@ -26,7 +26,7 @@ IndexMemory = 50M
 DataMemory = 100M
 BackupMemory = 64M
 MaxNoOfConcurrentScans = 100
-MaxNoOfSavedMessages= 1000
+MaxNoOfSavedMessages= 5
 SendBufferMemory = 2M
 NoOfFragmentLogFiles = 4
 FragmentLogFileSize = 64M

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-07 06:17:02 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-17 23:29:55 +0000
@@ -930,9 +930,9 @@ max-time: 7200
 cmd: testTransactions
 args:
 
-max-time: 1500
+max-time: 3000
 cmd: testRestartGci
-args: T6 
+args: T6 D1
 
 max-time: 1500
 cmd: testBlobs

=== modified file 'storage/ndb/test/run-test/daily-devel-tests.txt'
--- a/storage/ndb/test/run-test/daily-devel-tests.txt	2011-04-08 11:06:53 +0000
+++ b/storage/ndb/test/run-test/daily-devel-tests.txt	2011-05-25 13:19:02 +0000
@@ -129,3 +129,16 @@ max-time: 1800
 cmd: testDict
 args: -n SchemaTrans -l 1
 
+# Refresh tuple
+max-time: 300
+cmd: testBasic
+args: -n RefreshTuple T6 D1
+
+max-time: 300
+cmd: testIndex
+args: -n RefreshWithOrderedIndex T2 D2
+
+max-time: 300
+cmd: testBasic
+args: -n RefreshLocking D1
+

=== modified file 'storage/ndb/test/src/HugoOperations.cpp'
--- a/storage/ndb/test/src/HugoOperations.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/src/HugoOperations.cpp	2011-05-25 13:19:02 +0000
@@ -567,6 +567,47 @@ int HugoOperations::pkDeleteRecord(Ndb*
   return NDBT_OK;
 }
 
+int HugoOperations::pkRefreshRecord(Ndb* pNdb,
+                                    int recordNo,
+                                    int numRecords,
+                                    int anyValueInfo){
+
+  char buffer[NDB_MAX_TUPLE_SIZE];
+  const NdbDictionary::Table * pTab =
+    pNdb->getDictionary()->getTable(tab.getName());
+
+  if (pTab == 0)
+  {
+    return NDBT_FAILED;
+  }
+
+  const NdbRecord * record = pTab->getDefaultRecord();
+  NdbOperation::OperationOptions opts;
+  opts.optionsPresent = NdbOperation::OperationOptions::OO_ANYVALUE;
+  for(int r=0; r < numRecords; r++)
+  {
+    bzero(buffer, sizeof(buffer));
+    if (calc.equalForRow((Uint8*)buffer, record, r + recordNo))
+    {
+      return NDBT_FAILED;
+    }
+
+    opts.anyValue = anyValueInfo?
+      (anyValueInfo << 16) | (r+recordNo) :
+      0;
+
+    const NdbOperation* pOp = pTrans->refreshTuple(record, buffer,
+                                                   &opts, sizeof(opts));
+    if (pOp == NULL)
+    {
+      ERR(pTrans->getNdbError());
+      setNdbError(pTrans->getNdbError());
+      return NDBT_FAILED;
+    }
+  }
+  return NDBT_OK;
+}
+
 int HugoOperations::execute_Commit(Ndb* pNdb,
 				   AbortOption eao){
 

=== modified file 'storage/ndb/test/src/HugoTransactions.cpp'
--- a/storage/ndb/test/src/HugoTransactions.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/HugoTransactions.cpp	2011-05-25 13:19:02 +0000
@@ -1502,6 +1502,79 @@ HugoTransactions::pkDelRecords(Ndb* pNdb
 }
 
 int 
+HugoTransactions::pkRefreshRecords(Ndb* pNdb,
+                                   int startFrom,
+                                   int count,
+                                   int batch)
+{
+  int r = 0;
+  int retryAttempt = 0;
+
+  g_info << "|- Refreshing records..." << startFrom << "-" << (startFrom+count)
+         << " (batch=" << batch << ")" << endl;
+
+  while (r < count)
+  {
+    if(r + batch > count)
+      batch = count - r;
+
+    if (retryAttempt >= m_retryMax)
+    {
+      g_info << "ERROR: has retried this operation " << retryAttempt
+	     << " times, failing!" << endl;
+      return NDBT_FAILED;
+    }
+
+    pTrans = pNdb->startTransaction();
+    if (pTrans == NULL)
+    {
+      const NdbError err = pNdb->getNdbError();
+
+      if (err.status == NdbError::TemporaryError){
+	ERR(err);
+	NdbSleep_MilliSleep(50);
+	retryAttempt++;
+	continue;
+      }
+      ERR(err);
+      return NDBT_FAILED;
+    }
+
+    if (pkRefreshRecord(pNdb, r, batch) != NDBT_OK)
+    {
+      ERR(pTrans->getNdbError());
+      closeTransaction(pNdb);
+      return NDBT_FAILED;
+    }
+
+    if (pTrans->execute(Commit, AbortOnError) == -1)
+    {
+      const NdbError err = pTrans->getNdbError();
+
+      switch(err.status){
+      case NdbError::TemporaryError:
+	ERR(err);
+	closeTransaction(pNdb);
+	NdbSleep_MilliSleep(50);
+	retryAttempt++;
+	continue;
+	break;
+
+      default:
+	ERR(err);
+	closeTransaction(pNdb);
+	return NDBT_FAILED;
+      }
+    }
+
+    closeTransaction(pNdb);
+    r += batch; // Read next record
+  }
+
+  return NDBT_OK;
+}
+
+int
 HugoTransactions::pkReadUnlockRecords(Ndb* pNdb, 
                                       int records,
                                       int batch,

=== modified file 'storage/ndb/test/src/NDBT_Table.cpp'
--- a/storage/ndb/test/src/NDBT_Table.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/NDBT_Table.cpp	2011-05-17 23:29:55 +0000
@@ -39,6 +39,8 @@ operator <<(class NdbOut& ndbout, const
   ndbout << "SingleUserMode: " << (Uint32) tab.getSingleUserMode() << endl;
   ndbout << "ForceVarPart: " << tab.getForceVarPart() << endl;
   ndbout << "FragmentCount: " << tab.getFragmentCount() << endl;
+  ndbout << "ExtraRowGciBits: " << tab.getExtraRowGciBits() << endl;
+  ndbout << "ExtraRowAuthorBits: " << tab.getExtraRowAuthorBits() << endl;
 
   //<< ((tab.getTupleKey() == TupleId) ? " tupleid" : "") <<endl;
   ndbout << "TableStatus: ";

=== modified file 'storage/ndb/test/tools/hugoPkUpdate.cpp'
--- a/storage/ndb/test/tools/hugoPkUpdate.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/tools/hugoPkUpdate.cpp	2011-05-25 13:19:02 +0000
@@ -43,6 +43,8 @@ struct ThrOutput {
   NDBT_Stats latency;
 };
 
+static int _refresh = 0;
+
 int main(int argc, const char** argv){
   ndb_init();
 
@@ -63,7 +65,9 @@ int main(int argc, const char** argv){
     //    { "batch", 'b', arg_integer, &_batch, "batch value", "batch" },
     { "records", 'r', arg_integer, &_records, "Number of records", "records" },
     { "usage", '?', arg_flag, &_help, "Print help", "" },
-    { "database", 'd', arg_string, &db, "Database", "" }
+    { "database", 'd', arg_string, &db, "Database", "" },
+    { "refresh", 0, arg_flag, &_refresh, "refresh record rather than update them", "" }
+
   };
   int num_args = sizeof(args) / sizeof(args[0]);
   int optind = 0;
@@ -135,7 +139,10 @@ int main(int argc, const char** argv){
     ths.stop();
 
     if (ths.get_err())
+    {
+      ths.disconnect();
       NDBT_ProgramExit(NDBT_FAILED);
+    }
 
     if (_stats) {
       NDBT_Stats latency;
@@ -160,6 +167,8 @@ int main(int argc, const char** argv){
     i++;
   }
 
+  ths.disconnect();
+
   return NDBT_ProgramExit(NDBT_OK);
 }
 
@@ -177,9 +186,19 @@ static void hugoPkUpdate(NDBT_Thread& th
   hugoTrans.setThrInfo(ths.get_count(), thr.get_thread_no());
 
   int ret;
-  ret = hugoTrans.pkUpdateRecords(thr.get_ndb(),
-                                  input->records,
-                                  input->batch);
+  if (_refresh == 0)
+  {
+    ret = hugoTrans.pkUpdateRecords(thr.get_ndb(),
+                                    input->records,
+                                    input->batch);
+  }
+  else
+  {
+    ret = hugoTrans.pkRefreshRecords(thr.get_ndb(),
+                                     0,
+                                     input->records,
+                                     input->batch);
+  }
   if (ret != 0)
     thr.set_err(ret);
 }

=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/tools/CMakeLists.txt	2011-05-24 11:51:39 +0000
@@ -72,12 +72,6 @@ MYSQL_ADD_EXECUTABLE(ndb_config
   COMPONENT ClusterTools)
 TARGET_LINK_LIBRARIES(ndb_config ndbmgmclient ndbconf)
 
-SET(options "-I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmapi")
-SET(options "${options} -I${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmsrv")
-SET(options "${options} -I${CMAKE_SOURCE_DIR}/storage/ndb/include/mgmcommon")
-SET_TARGET_PROPERTIES(ndb_config PROPERTIES
-                      COMPILE_FLAGS "${options}")
-
 # Build ndbinfo_sql and run it to create ndbinfo.sql
 ADD_EXECUTABLE(ndbinfo_sql ndbinfo_sql.cpp)
 TARGET_LINK_LIBRARIES(ndbinfo_sql ndbclient)

=== modified file 'storage/ndb/tools/Makefile.am'
--- a/storage/ndb/tools/Makefile.am	2011-02-23 22:48:42 +0000
+++ b/storage/ndb/tools/Makefile.am	2011-05-24 11:51:39 +0000
@@ -60,10 +60,6 @@ ndb_config_SOURCES = ndb_config.cpp \
 	../src/mgmsrv/ConfigInfo.cpp \
 	../src/mgmsrv/InitConfigFileParser.cpp
 
-ndb_config_CXXFLAGS = -I$(top_srcdir)/storage/ndb/src/mgmapi \
-                      -I$(top_srcdir)/storage/ndb/src/mgmsrv \
-                      -I$(top_srcdir)/storage/ndb/include/mgmcommon
-
 ndb_restore_LDADD = $(top_builddir)/storage/ndb/src/common/util/libndbazio.la \
                     $(LDADD)
 

=== modified file 'storage/ndb/tools/ndb_config.cpp'
--- a/storage/ndb/tools/ndb_config.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/tools/ndb_config.cpp	2011-05-24 11:51:39 +0000
@@ -28,8 +28,8 @@
 
 #include <NdbOut.hpp>
 #include <mgmapi.h>
-#include <mgmapi_configuration.hpp>
-#include <ConfigInfo.hpp>
+#include "../src/mgmapi/mgmapi_configuration.hpp"
+#include "../src/mgmsrv/ConfigInfo.hpp"
 #include <NdbAutoPtr.hpp>
 #include <NdbTCP.h>
 
@@ -552,7 +552,7 @@ noconnect:
   return conf;
 }
 
-#include <Config.hpp>
+#include "../src/mgmsrv/Config.hpp"
 #include <EventLogger.hpp>
 
 extern EventLogger *g_eventLogger;

=== modified file 'storage/ndb/tools/ndbinfo_sql.cpp'
--- a/storage/ndb/tools/ndbinfo_sql.cpp	2011-04-12 11:59:36 +0000
+++ b/storage/ndb/tools/ndbinfo_sql.cpp	2011-05-23 11:57:55 +0000
@@ -327,6 +327,12 @@ int main(int argc, char** argv){
   sql.assfmt("CREATE DATABASE IF NOT EXISTS `%s`", opt_ndbinfo_db);
   print_conditional_sql(sql);
 
+  printf("# Set NDBINFO in offline mode during (re)create of tables\n");
+  printf("# and views to avoid errors caused by no such table or\n");
+  printf("# different table definition in NDB\n");
+  sql.assfmt("SET @@global.ndbinfo_offline=TRUE");
+  print_conditional_sql(sql);
+
   printf("# Drop any old views in %s\n", opt_ndbinfo_db);
   for (size_t i = 0; i < num_views; i++)
   {
@@ -430,6 +436,10 @@ int main(int argc, char** argv){
     print_conditional_sql(sql);
   }
 
+  printf("# Finally turn off offline mode\n");
+  sql.assfmt("SET @@global.ndbinfo_offline=FALSE");
+  print_conditional_sql(sql);
+
   return 0;
 }
 

=== modified file 'storage/ndb/tools/select_all.cpp'
--- a/storage/ndb/tools/select_all.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/tools/select_all.cpp	2011-05-17 23:29:55 +0000
@@ -49,6 +49,8 @@ static int _dumpDisk = 0;
 static int use_rowid = 0;
 static int nodata = 0;
 static int use_gci = 0;
+static int use_gci64 = 0;
+static int use_author = 0;
 
 static struct my_option my_long_options[] =
 {
@@ -86,6 +88,12 @@ static struct my_option my_long_options[
   { "gci", NDB_OPT_NOSHORT, "Dump gci",
     (uchar**) &use_gci, (uchar**) &use_gci, 0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, 
+  { "gci64", NDB_OPT_NOSHORT, "Dump ROW$GCI64",
+    (uchar**) &use_gci64, (uchar**) &use_gci64, 0,
+    GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "author", NDB_OPT_NOSHORT, "Dump ROW$AUTHOR",
+    (uchar**) &use_author, (uchar**) &use_author, 0,
+    GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
   { "tupscan", 't', "Scan in tup order",
     (uchar**) &_tup, (uchar**) &_tup, 0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, 
@@ -328,7 +336,7 @@ int scanReadRecords(Ndb* pNdb,
     if(_dumpDisk && disk)
       disk_ref = pOp->getValue(NdbDictionary::Column::DISK_REF);
 
-    NdbRecAttr * rowid= 0, *frag = 0, *gci = 0;
+    NdbRecAttr * rowid= 0, *frag = 0, *gci = 0, *gci64 = 0, *author = 0;
     if (use_rowid)
     {
       frag = pOp->getValue(NdbDictionary::Column::FRAGMENT);
@@ -339,7 +347,17 @@ int scanReadRecords(Ndb* pNdb,
     {
       gci = pOp->getValue(NdbDictionary::Column::ROW_GCI);
     }
+
+    if (use_gci64)
+    {
+      gci64 = pOp->getValue(NdbDictionary::Column::ROW_GCI64);
+    }
     
+    if (use_author)
+    {
+      author = pOp->getValue(NdbDictionary::Column::ROW_AUTHOR);
+    }
+
     check = pTrans->execute(NdbTransaction::NoCommit);   
     if( check == -1 ) {
       const NdbError err = pTrans->getNdbError();
@@ -386,6 +404,18 @@ int scanReadRecords(Ndb* pNdb,
         ndbout << "DISK_REF";
       }
 
+      if (gci64)
+      {
+        DELIMITER;
+        ndbout << "ROW$GCI64";
+      }
+
+      if (author)
+      {
+        DELIMITER;
+        ndbout << "ROW$AUTHOR";
+      }
+
       ndbout << endl;
     }
 #undef DELIMITER
@@ -426,8 +456,30 @@ int scanReadRecords(Ndb* pNdb,
 	       << " m_page: " << disk_ref->u_32_value() 
 	       << " m_page_idx: " << *(Uint16*)(disk_ref->aRef() + 4) << " ]";
       }
+
+      if (gci64)
+      {
+	if (gci64->isNULL())
+	  ndbout << "\tNULL";
+        else
+        {
+          Uint64 tmp = gci64->u_64_value();
+          ndbout << "\t" << Uint32(tmp >> 32) << "/" << Uint32(tmp);
+        }
+      }
+
+      if (author)
+      {
+	if (author->isNULL())
+	  ndbout << "\tNULL";
+        else
+        {
+          ndbout << "\t" << author->u_32_value();
+        }
+      }
+
       
-      if (rowid || disk_ref || gci || !nodata)
+      if (rowid || disk_ref || gci || !nodata || gci64 || author)
 	ndbout << endl;
       eof = pOp->nextResult();
     }

No bundle (reason: revision is a merge (you can force generation of a bundle with env var BZR_FORCE_BUNDLE=1)).
Thread
bzr commit into mysql-5.1-telco-7.0-wl4124 branch (pekka.nousiainen:4383) Pekka Nousiainen25 May