#At file:///home/marty/MySQL/mysql-5.1-telco-7.0_wl5151/
3407 Martin Skold 2010-02-24 [merge]
Merge
added:
storage/ndb/cmake/
storage/ndb/cmake/cmake_parse_arguments.cmake
storage/ndb/cmake/libutils.cmake
modified:
.bzrignore
mysql-test/suite/ndb/r/ndb_dd_alter.result
mysql-test/suite/ndb/r/ndb_dd_restore_compat.result
mysql-test/suite/ndb/t/show_attributes.inc
storage/ndb/CMakeLists.txt
storage/ndb/Makefile.am
storage/ndb/config/type_ndbapitools.cmake
storage/ndb/include/kernel/GlobalSignalNumbers.h
storage/ndb/include/util/Parser.hpp
storage/ndb/src/CMakeLists.txt
storage/ndb/src/kernel/blocks/CMakeLists.txt
storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
storage/ndb/src/kernel/blocks/suma/Suma.cpp
storage/ndb/src/kernel/blocks/suma/Suma.hpp
storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
storage/ndb/src/kernel/vm/ArrayPool.hpp
storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp
storage/ndb/src/mgmsrv/CMakeLists.txt
storage/ndb/src/mgmsrv/MgmtSrvr.cpp
storage/ndb/src/mgmsrv/MgmtSrvr.hpp
storage/ndb/src/mgmsrv/Services.cpp
storage/ndb/src/ndbapi/NdbDictionary.cpp
storage/ndb/test/include/HugoTransactions.hpp
storage/ndb/test/include/NdbRestarter.hpp
storage/ndb/test/ndbapi/testMgm.cpp
storage/ndb/test/ndbapi/testUpgrade.cpp
storage/ndb/test/run-test/command.cpp
storage/ndb/test/run-test/db.cpp
storage/ndb/test/run-test/upgrade-tests.txt
storage/ndb/test/src/CpcClient.cpp
storage/ndb/test/src/HugoTransactions.cpp
storage/ndb/test/src/NdbRestarter.cpp
storage/ndb/tools/CMakeLists.txt
storage/ndb/tools/desc.cpp
=== modified file '.bzrignore'
--- a/.bzrignore 2010-02-12 14:17:23 +0000
+++ b/.bzrignore 2010-02-24 14:30:56 +0000
@@ -72,6 +72,7 @@
.snprj/*
.vimrc
50
+target
=6
BUILD/compile-pentium-maintainer
BitKeeper/etc/RESYNC_TREE
@@ -2750,6 +2751,7 @@ storage/ndb/src/kernel/blocks/suma/libsu
storage/ndb/src/kernel/blocks/trix/libtrix.dsp
storage/ndb/src/kernel/error/liberror.dsp
storage/ndb/src/kernel/ndbd
+storage/ndb/src/kernel/ndbmtd
storage/ndb/src/kernel/ndbd.dsp
storage/ndb/src/kernel/vm/libkernel.dsp
storage/ndb/src/libndb.ver
@@ -2765,6 +2767,7 @@ storage/ndb/src/ndbapi/libndbapi.dsp
storage/ndb/src/ndbapi/ndberror_check
storage/ndb/test/ndbapi/DbAsyncGenerator
storage/ndb/test/ndbapi/DbCreate
+storage/ndb/test/ndbapi/NdbRepStress
storage/ndb/test/ndbapi/bank/bankCreator
storage/ndb/test/ndbapi/bank/bankMakeGL
storage/ndb/test/ndbapi/bank/bankSumAccounts
@@ -2780,7 +2783,11 @@ storage/ndb/test/ndbapi/flexBench
storage/ndb/test/ndbapi/flexBench.dsp
storage/ndb/test/ndbapi/flexHammer
storage/ndb/test/ndbapi/flexTT
+storage/ndb/test/ndbapi/msa
+storage/ndb/test/ndbapi/ndbapi_50compat0
+storage/ndb/test/ndbapi/ndbapi_50compat1
storage/ndb/test/ndbapi/ndbapi_slow_select
+storage/ndb/test/ndbapi/reorg_tab
storage/ndb/test/ndbapi/testBackup
storage/ndb/test/ndbapi/testBasic
storage/ndb/test/ndbapi/testBasic.dsp
@@ -2795,28 +2802,37 @@ storage/ndb/test/ndbapi/testIndex
storage/ndb/test/ndbapi/testIndexStat
storage/ndb/test/ndbapi/testInterpreter
storage/ndb/test/ndbapi/testLcp
+storage/ndb/test/ndbapi/testLimits
storage/ndb/test/ndbapi/testMgm
+storage/ndb/test/ndbapi/testMgmd
+storage/ndb/test/ndbapi/testNDBT
storage/ndb/test/ndbapi/testNdbApi
+storage/ndb/test/ndbapi/testNdbinfo
storage/ndb/test/ndbapi/testNodeRestart
storage/ndb/test/ndbapi/testOIBasic
storage/ndb/test/ndbapi/testOperations
storage/ndb/test/ndbapi/testPartitioning
storage/ndb/test/ndbapi/testReadPerf
+storage/ndb/test/ndbapi/testReconnect
storage/ndb/test/ndbapi/testRestartGci
storage/ndb/test/ndbapi/testSRBank
storage/ndb/test/ndbapi/testScan
+storage/ndb/test/ndbapi/testScanFilter
storage/ndb/test/ndbapi/testScan.dsp
storage/ndb/test/ndbapi/testScanInterpreter
storage/ndb/test/ndbapi/testScanPerf
+storage/ndb/test/ndbapi/testSingleUserMode
storage/ndb/test/ndbapi/testSystemRestart
storage/ndb/test/ndbapi/testTimeout
storage/ndb/test/ndbapi/testTransactions
+storage/ndb/test/ndbapi/testUpgrade
storage/ndb/test/ndbapi/test_event
storage/ndb/test/ndbapi/test_event_merge
storage/ndb/test/run-test/atrt
storage/ndb/test/src/libNDBT.dsp
storage/ndb/test/tools/copy_tab
storage/ndb/test/tools/create_index
+storage/ndb/test/tools/eventlog
storage/ndb/test/tools/hugoCalculator
storage/ndb/test/tools/hugoFill
storage/ndb/test/tools/hugoLoad
@@ -2828,6 +2844,7 @@ storage/ndb/test/tools/hugoPkUpdate
storage/ndb/test/tools/hugoScanRead
storage/ndb/test/tools/hugoScanUpdate
storage/ndb/test/tools/listen_event
+storage/ndb/test/tools/ndb_connect
storage/ndb/test/tools/ndb_cpcc
storage/ndb/test/tools/rep_latency
storage/ndb/test/tools/restart
@@ -2851,6 +2868,8 @@ storage/ndb/tools/ndb_show_tables.dsp
storage/ndb/tools/ndb_test_platform
storage/ndb/tools/ndb_waiter
storage/ndb/tools/ndb_waiter.dsp
+storage/ndb/tools/ndbinfo.sql
+storage/ndb/tools/ndbinfo_sql
strings/*.ds?
strings/*.vcproj
strings/.deps/bchange.Po
=== modified file 'mysql-test/suite/ndb/r/ndb_dd_alter.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_alter.result 2009-10-23 16:30:54 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_alter.result 2010-02-18 12:51:52 +0000
@@ -87,7 +87,7 @@ a6 Time NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Datetime NULL AT=FIXED ST=DISK
a9 Varchar(255;latin1_swedish_ci) NULL AT=SHORT_VAR ST=DISK
-a10 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
SELECT * FROM test.t1 ORDER BY a1;
a1 a2 a3 a4 a5 a6 a7 a8 a9 a10
1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data
@@ -187,7 +187,7 @@ a6 Time NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Datetime NULL AT=FIXED ST=DISK
a9 Varchar(255;latin1_swedish_ci) NULL AT=SHORT_VAR ST=DISK
-a10 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
SELECT * FROM test.t1 ORDER BY a1;
a1 a2 a3 a4 a5 a6 a7 a8 a9 a10
1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data
@@ -274,7 +274,7 @@ a6 Time NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Datetime NULL AT=FIXED ST=DISK
a9 Varchar(255;latin1_swedish_ci) NULL AT=SHORT_VAR ST=DISK
-a10 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
ALTER TABLE test.t1 ENGINE=MyISAM;
SHOW CREATE TABLE test.t1;
Table Create Table
@@ -397,11 +397,11 @@ a6 Bigint NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Time NULL AT=FIXED ST=DISK
a9 Datetime NULL AT=FIXED ST=DISK
-a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2
-a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=<none>
+a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
SELECT a1, a2,a3,hex(a4),a5,a6,a7,a8,a9,a10,a11,a12,a13 FROM test.t1 ORDER BY a1;
a1 a2 a3 hex(a4) a5 a6 a7 a8 a9 a10 a11 a12 a13
1 2.2345 20000001 0 1 23457 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field
@@ -468,11 +468,11 @@ a6 Bigint NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Time NULL AT=FIXED ST=DISK
a9 Datetime NULL AT=FIXED ST=DISK
-a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2
-a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=<none>
+a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
ALTER TABLE test.t1 DROP INDEX a2_i;
SHOW CREATE TABLE test.t1;
Table Create Table
@@ -504,11 +504,11 @@ a6 Bigint NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Time NULL AT=FIXED ST=DISK
a9 Datetime NULL AT=FIXED ST=DISK
-a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2
-a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=<none>
+a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
TRUNCATE TABLE test.t1;
SHOW CREATE TABLE test.t1;
Table Create Table
@@ -540,11 +540,11 @@ a6 Bigint NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Time NULL AT=FIXED ST=DISK
a9 Datetime NULL AT=FIXED ST=DISK
-a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2
-a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2
-a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2
+a10 Text(256,0,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=<none>
+a11 Text(256,4000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a12 Text(256,8000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a13 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
+a14 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=DISK BV=2 BT=NDB$BLOB_#_#
ALTER TABLE test.t1 DROP a14;
ALTER TABLE test.t1 DROP a13;
ALTER TABLE test.t1 DROP a12;
=== modified file 'mysql-test/suite/ndb/r/ndb_dd_restore_compat.result'
--- a/mysql-test/suite/ndb/r/ndb_dd_restore_compat.result 2009-07-13 13:22:46 +0000
+++ b/mysql-test/suite/ndb/r/ndb_dd_restore_compat.result 2010-02-18 12:51:52 +0000
@@ -36,7 +36,7 @@ a6 Time NULL AT=FIXED ST=DISK
a7 Date NULL AT=FIXED ST=DISK
a8 Datetime NULL AT=FIXED ST=DISK
a9 Varchar(255;latin1_swedish_ci) NULL AT=FIXED ST=DISK
-a10 Blob(256,2000,16) NULL AT=FIXED ST=DISK BV=1
+a10 Blob(256,2000,16) NULL AT=FIXED ST=DISK BV=1 BT=NDB$BLOB_#_#
select * from t1 order by a1;
a1 a2 a3 a4 a5 a6 a7 a8 a9 a10
1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data
=== modified file 'mysql-test/suite/ndb/t/show_attributes.inc'
--- a/mysql-test/suite/ndb/t/show_attributes.inc 2009-07-15 20:47:18 +0000
+++ b/mysql-test/suite/ndb/t/show_attributes.inc 2010-02-18 12:51:52 +0000
@@ -14,6 +14,7 @@ eval LOAD DATA INFILE '$dump_file' INTO
--remove_file $dump_file
# Query the temporary table
+--replace_regex /BT=NDB\$BLOB_[0-9]+_[0-9]+/BT=NDB$BLOB_#_#/
SELECT TRIM(TRAILING "\r" FROM a) as 'Attributes:' FROM test.desc
WHERE a LIKE BINARY "%ST=%";
=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt 2009-12-16 10:02:54 +0000
+++ b/storage/ndb/CMakeLists.txt 2010-02-19 03:00:21 +0000
@@ -13,6 +13,8 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+INCLUDE("${CMAKE_SOURCE_DIR}/storage/ndb/cmake/libutils.cmake")
+
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DVM_TRACE -DNDB_DEBUG -DERROR_INSERT -DARRAY_GUARD")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DVM_TRACE -DNDB_DEBUG -DERROR_INSERT -DARRAY_GUARD")
=== modified file 'storage/ndb/Makefile.am'
--- a/storage/ndb/Makefile.am 2009-05-27 15:21:45 +0000
+++ b/storage/ndb/Makefile.am 2010-02-19 06:00:01 +0000
@@ -16,7 +16,7 @@
SUBDIRS = src tools . include swig @ndb_opt_subdirs@
DIST_SUBDIRS = src tools include test docs swig
-EXTRA_DIST = config ndbapi-examples plug.in CMakeLists.txt
+EXTRA_DIST = config cmake ndbapi-examples plug.in CMakeLists.txt
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
include $(top_srcdir)/storage/ndb/config/common.mk.am
=== added directory 'storage/ndb/cmake'
=== added file 'storage/ndb/cmake/cmake_parse_arguments.cmake'
--- a/storage/ndb/cmake/cmake_parse_arguments.cmake 1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/cmake_parse_arguments.cmake 2010-02-18 10:45:20 +0000
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2007 MySQL AB, 2009 Sun Microsystems,Inc
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Handy macro to parse macro arguments
+MACRO(CMAKE_PARSE_ARGUMENTS prefix arg_names option_names)
+ SET(DEFAULT_ARGS)
+ FOREACH(arg_name ${arg_names})
+ SET(${prefix}_${arg_name})
+ ENDFOREACH(arg_name)
+ FOREACH(option ${option_names})
+ SET(${prefix}_${option} FALSE)
+ ENDFOREACH(option)
+
+ SET(current_arg_name DEFAULT_ARGS)
+ SET(current_arg_list)
+ FOREACH(arg ${ARGN})
+ SET(larg_names ${arg_names})
+ LIST(FIND larg_names "${arg}" is_arg_name)
+ IF (is_arg_name GREATER -1)
+ SET(${prefix}_${current_arg_name} ${current_arg_list})
+ SET(current_arg_name ${arg})
+ SET(current_arg_list)
+ ELSE (is_arg_name GREATER -1)
+ SET(loption_names ${option_names})
+ LIST(FIND loption_names "${arg}" is_option)
+ IF (is_option GREATER -1)
+ SET(${prefix}_${arg} TRUE)
+ ELSE (is_option GREATER -1)
+ SET(current_arg_list ${current_arg_list} ${arg})
+ ENDIF (is_option GREATER -1)
+ ENDIF (is_arg_name GREATER -1)
+ ENDFOREACH(arg)
+ SET(${prefix}_${current_arg_name} ${current_arg_list})
+ENDMACRO()
\ No newline at end of file
=== added file 'storage/ndb/cmake/libutils.cmake'
--- a/storage/ndb/cmake/libutils.cmake 1970-01-01 00:00:00 +0000
+++ b/storage/ndb/cmake/libutils.cmake 2010-02-18 10:45:20 +0000
@@ -0,0 +1,296 @@
+# Copyright (C) 2009 Sun Microsystems, Inc
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+# This file exports macros that emulate some functionality found in GNU libtool
+# on Unix systems. One such feature is convenience libraries. In this context,
+# convenience library is a static library that can be linked to shared library
+# On systems that force position-independent code, linking into shared library
+# normally requires compilation with a special flag (often -fPIC). To enable
+# linking static libraries to shared, we compile source files that come into
+# static library with the PIC flag (${CMAKE_SHARED_LIBRARY_C_FLAGS} in CMake)
+# Some systems, like Windows or OSX do not need special compilation (Windows
+# never uses PIC and OSX always uses it).
+#
+# The intention behind convenience libraries is simplify the build and to reduce
+# excessive recompiles.
+
+# Except for convenience libraries, this file provides macros to merge static
+# libraries (we need it for mysqlclient) and to create shared library out of
+# convenience libraries(again, for mysqlclient)
+
+# Following macros are exported
+# - ADD_CONVENIENCE_LIBRARY(target source1...sourceN)
+# This macro creates convenience library. The functionality is similar to
+# ADD_LIBRARY(target STATIC source1...sourceN), the difference is that resulting
+# library can always be linked to shared library
+#
+# - MERGE_LIBRARIES(target [STATIC|SHARED|MODULE] [linklib1 .... linklibN]
+# [EXPORTS exported_func1 .... exported_func_N]
+# [OUTPUT_NAME output_name]
+# This macro merges several static libraries into a single one or creates a shared
+# library from several convenience libraries
+
+# Important global flags
+# - WITH_PIC : If set, it is assumed that everything is compiled as position
+# independent code (that is CFLAGS/CMAKE_C_FLAGS contain -fPIC or equivalent)
+# If defined, ADD_CONVENIENCE_LIBRARY does not add PIC flag to compile flags
+#
+# - DISABLE_SHARED: If set, it is assumed that shared libraries are not produced
+# during the build. ADD_CONVENIENCE_LIBRARY does not add anything to compile flags
+
+
+GET_FILENAME_COMPONENT(MYSQL_CMAKE_SCRIPT_DIR ${CMAKE_CURRENT_LIST_FILE} PATH)
+IF(WIN32 OR CYGWIN OR APPLE OR WITH_PIC OR DISABLE_SHARED OR NOT CMAKE_SHARED_LIBRARY_C_FLAGS)
+ SET(_SKIP_PIC 1)
+ENDIF()
+
+INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/cmake_parse_arguments.cmake)
+# CREATE_EXPORT_FILE (VAR target api_functions)
+# Internal macro, used to create source file for shared libraries that
+# otherwise consists entirely of "convenience" libraries. On Windows,
+# also exports API functions as dllexport. On unix, creates a dummy file
+# that references all exports and this prevents linker from creating an
+# empty library(there are unportable alternatives, --whole-archive)
+MACRO(CREATE_EXPORT_FILE VAR TARGET API_FUNCTIONS)
+ IF(WIN32)
+ SET(DUMMY ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_dummy.c)
+ SET(EXPORTS ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_exports.def)
+ CONFIGURE_FILE_CONTENT("" ${DUMMY})
+ SET(CONTENT "EXPORTS\n")
+ FOREACH(FUNC ${API_FUNCTIONS})
+ SET(CONTENT "${CONTENT} ${FUNC}\n")
+ ENDFOREACH()
+ CONFIGURE_FILE_CONTENT(${CONTENT} ${EXPORTS})
+ SET(${VAR} ${DUMMY} ${EXPORTS})
+ ELSE()
+ SET(EXPORTS ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_exports_file.cc)
+ SET(CONTENT)
+ FOREACH(FUNC ${API_FUNCTIONS})
+ SET(CONTENT "${CONTENT} extern void* ${FUNC}\;\n")
+ ENDFOREACH()
+ SET(CONTENT "${CONTENT} void *${TARGET}_api_funcs[] = {\n")
+ FOREACH(FUNC ${API_FUNCTIONS})
+ SET(CONTENT "${CONTENT} &${FUNC},\n")
+ ENDFOREACH()
+ SET(CONTENT "${CONTENT} (void *)0\n}\;")
+ CONFIGURE_FILE_CONTENT(${CONTENT} ${EXPORTS})
+ SET(${VAR} ${EXPORTS})
+ ENDIF()
+ENDMACRO()
+
+
+# MYSQL_ADD_CONVENIENCE_LIBRARY(name source1...sourceN)
+# Create static library that can be linked to shared library.
+# On systems that force position-independent code, adds -fPIC or
+# equivalent flag to compile flags.
+MACRO(ADD_CONVENIENCE_LIBRARY)
+ SET(TARGET ${ARGV0})
+ SET(SOURCES ${ARGN})
+ LIST(REMOVE_AT SOURCES 0)
+ ADD_LIBRARY(${TARGET} STATIC ${SOURCES})
+ IF(NOT _SKIP_PIC)
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES COMPILE_FLAGS
+ "${CMAKE_SHARED_LIBRARY_C_FLAGS}")
+ ENDIF()
+ENDMACRO()
+
+
+# Write content to file, using CONFIGURE_FILE
+# The advantage compared to FILE(WRITE) is that timestamp
+# does not change if file already has the same content
+MACRO(CONFIGURE_FILE_CONTENT content file)
+ SET(CMAKE_CONFIGURABLE_FILE_CONTENT
+ "${content}\n")
+ CONFIGURE_FILE(
+ ${MYSQL_CMAKE_SCRIPT_DIR}/configurable_file_content.in
+ ${file}
+ @ONLY)
+ENDMACRO()
+
+# Merge static libraries into a big static lib. The resulting library
+# should not not have dependencies on other static libraries.
+# We use it in MySQL to merge mysys,dbug,vio etc into mysqlclient
+
+MACRO(MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
+ # To produce a library we need at least one source file.
+ # It is created by ADD_CUSTOM_COMMAND below and will helps
+ # also help to track dependencies.
+ SET(SOURCE_FILE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_depends.c)
+ ADD_LIBRARY(${TARGET} STATIC ${SOURCE_FILE})
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES OUTPUT_NAME ${OUTPUT_NAME})
+
+ SET(OSLIBS)
+ FOREACH(LIB ${LIBS_TO_MERGE})
+ GET_TARGET_PROPERTY(LIB_LOCATION ${LIB} LOCATION)
+ GET_TARGET_PROPERTY(LIB_TYPE ${LIB} TYPE)
+ IF(NOT LIB_LOCATION)
+ # 3rd party library like libz.so. Make sure that everything
+ # that links to our library links to this one as well.
+ LIST(APPEND OSLIBS ${LIB})
+ ELSE()
+ # This is a target in current project
+ # (can be a static or shared lib)
+ IF(LIB_TYPE STREQUAL "STATIC_LIBRARY")
+ SET(STATIC_LIBS ${STATIC_LIBS} ${LIB_LOCATION})
+ ADD_DEPENDENCIES(${TARGET} ${LIB})
+ # Extract dependend OS libraries
+ GET_DEPENDEND_OS_LIBS(${LIB} LIB_OSLIBS)
+ LIST(APPEND OSLIBS ${LIB_OSLIBS})
+ ELSE()
+ # This is a shared library our static lib depends on.
+ LIST(APPEND OSLIBS ${LIB})
+ ENDIF()
+ ENDIF()
+ ENDFOREACH()
+ IF(OSLIBS)
+ LIST(REMOVE_DUPLICATES OSLIBS)
+ TARGET_LINK_LIBRARIES(${TARGET} ${OSLIBS})
+ ENDIF()
+
+ # Make the generated dummy source file depended on all static input
+ # libs. If input lib changes,the source file is touched
+ # which causes the desired effect (relink).
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${SOURCE_FILE}
+ COMMAND ${CMAKE_COMMAND} -E touch ${SOURCE_FILE}
+ DEPENDS ${STATIC_LIBS})
+
+ IF(MSVC)
+ # To merge libs, just pass them to lib.exe command line.
+ SET(LINKER_EXTRA_FLAGS "")
+ FOREACH(LIB ${STATIC_LIBS})
+ SET(LINKER_EXTRA_FLAGS "${LINKER_EXTRA_FLAGS} ${LIB}")
+ ENDFOREACH()
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES STATIC_LIBRARY_FLAGS
+ "${LINKER_EXTRA_FLAGS}")
+ ELSE()
+ GET_TARGET_PROPERTY(TARGET_LOCATION ${TARGET} LOCATION)
+ IF(APPLE)
+ # Use OSX's libtool to merge archives (ihandles universal
+ # binaries properly)
+ ADD_CUSTOM_COMMAND(TARGET ${TARGET} POST_BUILD
+ COMMAND rm ${TARGET_LOCATION}
+ COMMAND /usr/bin/libtool -static -o ${TARGET_LOCATION}
+ ${STATIC_LIBS}
+ )
+ ELSE()
+ # Generic Unix, Cygwin or MinGW. In post-build step, call
+ # script, that extracts objects from archives with "ar x"
+ # and repacks them with "ar r"
+ SET(TARGET ${TARGET})
+ CONFIGURE_FILE(
+ ${MYSQL_CMAKE_SCRIPT_DIR}/merge_archives_unix.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
+ @ONLY
+ )
+ ADD_CUSTOM_COMMAND(TARGET ${TARGET} POST_BUILD
+ COMMAND rm ${TARGET_LOCATION}
+ COMMAND ${CMAKE_COMMAND} -P
+ ${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
+ )
+ ENDIF()
+ ENDIF()
+ENDMACRO()
+
+# Create libs from libs.
+# Merges static libraries, creates shared libraries out of convenience libraries.
+# MERGE_LIBRARIES(target [STATIC|SHARED|MODULE]
+# [linklib1 .... linklibN]
+# [EXPORTS exported_func1 .... exportedFuncN]
+# [OUTPUT_NAME output_name]
+#)
+MACRO(MERGE_LIBRARIES)
+ CMAKE_PARSE_ARGUMENTS(ARG
+ "EXPORTS;OUTPUT_NAME"
+ "STATIC;SHARED;MODULE;NOINSTALL"
+ ${ARGN}
+ )
+ LIST(GET ARG_DEFAULT_ARGS 0 TARGET)
+ SET(LIBS ${ARG_DEFAULT_ARGS})
+ LIST(REMOVE_AT LIBS 0)
+ IF(ARG_STATIC)
+ IF (NOT ARG_OUTPUT_NAME)
+ SET(ARG_OUTPUT_NAME ${TARGET})
+ ENDIF()
+ MERGE_STATIC_LIBS(${TARGET} ${ARG_OUTPUT_NAME} "${LIBS}")
+ ELSEIF(ARG_SHARED OR ARG_MODULE)
+ IF(ARG_SHARED)
+ SET(LIBTYPE SHARED)
+ ELSE()
+ SET(LIBTYPE MODULE)
+ ENDIF()
+ # check for non-PIC libraries
+ IF(NOT _SKIP_PIC)
+ FOREACH(LIB ${LIBS})
+ GET_TARGET_PROPERTY(${LIB} TYPE LIBTYPE)
+ IF(LIBTYPE STREQUAL "STATIC_LIBRARY")
+ GET_TARGET_PROPERTY(LIB COMPILE_FLAGS LIB_COMPILE_FLAGS)
+ STRING(REPLACE "${CMAKE_SHARED_LIBRARY_C_FLAGS}"
+ "<PIC_FLAG>" LIB_COMPILE_FLAGS ${LIB_COMPILE_FLAG})
+ IF(NOT LIB_COMPILE_FLAGS MATCHES "<PIC_FLAG>")
+ MESSAGE(FATAL_ERROR
+ "Attempted to link non-PIC static library ${LIB} to shared library ${TARGET}\n"
+ "Please use ADD_CONVENIENCE_LIBRARY, instead of ADD_LIBRARY for ${LIB}"
+ )
+ ENDIF()
+ ENDIF()
+ ENDFOREACH()
+ ENDIF()
+ CREATE_EXPORT_FILE(SRC ${TARGET} "${ARG_EXPORTS}")
+ ADD_LIBRARY(${TARGET} ${LIBTYPE} ${SRC})
+ TARGET_LINK_LIBRARIES(${TARGET} ${LIBS})
+ IF(ARG_OUTPUT_NAME)
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES OUTPUT_NAME "${ARG_OUTPUT_NAME}")
+ ENDIF()
+ ELSE()
+ MESSAGE(FATAL_ERROR "Unknown library type")
+ ENDIF()
+ IF(NOT ARG_NOINSTALL)
+ MYSQL_INSTALL_TARGETS(${TARGET} DESTINATION "${INSTALL_LIBDIR}")
+ ENDIF()
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES LINK_INTERFACE_LIBRARIES "")
+ENDMACRO()
+
+FUNCTION(GET_DEPENDEND_OS_LIBS target result)
+ SET(deps ${${target}_LIB_DEPENDS})
+ IF(deps)
+ FOREACH(lib ${deps})
+ # Filter out keywords for used for debug vs optimized builds
+ IF(NOT lib MATCHES "general" AND NOT lib MATCHES "debug" AND NOT lib MATCHES "optimized")
+ GET_TARGET_PROPERTY(lib_location ${lib} LOCATION)
+ IF(NOT lib_location)
+ SET(ret ${ret} ${lib})
+ ENDIF()
+ ENDIF()
+ ENDFOREACH()
+ ENDIF()
+ SET(${result} ${ret} PARENT_SCOPE)
+ENDFUNCTION()
+
+MACRO(RESTRICT_SYMBOL_EXPORTS target)
+ IF(CMAKE_COMPILER_IS_GNUCXX AND UNIX)
+ CHECK_C_COMPILER_FLAG("-fvisibility=hidden" HAVE_VISIBILITY_HIDDEN)
+ IF(HAVE_VISIBILITY_HIDDEN)
+ GET_TARGET_PROPERTY(COMPILE_FLAGS ${target} COMPILE_FLAGS)
+ IF(NOT COMPILE_FLAGS)
+ # Avoid COMPILE_FLAGS-NOTFOUND
+ SET(COMPILE_FLAGS)
+ ENDIF()
+ SET_TARGET_PROPERTIES(${target} PROPERTIES
+ COMPILE_FLAGS "${COMPILE_FLAGS} -fvisibility=hidden")
+ ENDIF()
+ ENDIF()
+ENDMACRO()
=== modified file 'storage/ndb/config/type_ndbapitools.cmake'
--- a/storage/ndb/config/type_ndbapitools.cmake 2008-08-20 13:22:09 +0000
+++ b/storage/ndb/config/type_ndbapitools.cmake 2010-02-18 10:45:20 +0000
@@ -17,7 +17,7 @@ LINK_LIBRARIES(ndbclient
dbug
mysys
strings
- ${NDB_SCI_LIBS})
+ )
IF(WIN32)
LINK_LIBRARIES(wsock32)
ENDIF(WIN32)
=== modified file 'storage/ndb/include/kernel/GlobalSignalNumbers.h'
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h 2009-10-15 12:36:53 +0000
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h 2010-02-18 06:30:45 +0000
@@ -191,7 +191,7 @@ extern const GlobalSignalNumber NO_OF_SI
/* 120 not unused */
#define GSN_ROUTE_ORD 121
#define GSN_NODE_VERSION_REP 122
-/* 123 unused */
+/* 123 not unused */
/* 124 unused */
#define GSN_CHECK_LCP_STOP 125
#define GSN_CLOSE_COMCONF 126 /* local */
@@ -673,6 +673,7 @@ extern const GlobalSignalNumber NO_OF_SI
#define GSN_TCINDXNEXTCONF 525
#define GSN_TCINDXNEXREF 526
#define GSN_FIRE_TRIG_ORD 527
+#define GSN_FIRE_TRIG_ORD_L 123 /* local from TUP to SUMA */
/**
* These are used only by kernel
=== modified file 'storage/ndb/include/util/Parser.hpp'
--- a/storage/ndb/include/util/Parser.hpp 2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/util/Parser.hpp 2010-02-18 23:01:15 +0000
@@ -70,7 +70,8 @@ public:
const ParserRow<T> * m_currentCmd;
const ParserRow<T> * m_currentArg;
char * m_currentToken;
- char m_tokenBuffer[512];
+ STATIC_CONST(MaxParseBytes = 512);
+ char m_tokenBuffer[ MaxParseBytes ];
NdbMutex *m_mutex;
Vector<const ParserRow<T> *> m_aliasUsed;
=== modified file 'storage/ndb/src/CMakeLists.txt'
--- a/storage/ndb/src/CMakeLists.txt 2009-10-19 11:15:38 +0000
+++ b/storage/ndb/src/CMakeLists.txt 2010-02-18 10:45:20 +0000
@@ -24,17 +24,17 @@ ADD_SUBDIRECTORY(mgmsrv)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/ndb/include
{CMAKE_SOURCE_DIR}/include)
-IF(NOT EXISTS cmake_dummy.cpp)
- FILE(WRITE ${CMAKE_CURRENT_SOURCE_DIR}/cmake_dummy.cpp "")
-ENDIF()
-ADD_LIBRARY(ndbclient STATIC cmake_dummy.cpp)
-TARGET_LINK_LIBRARIES(ndbclient
- ndbapi
- ndbtransport
- ndbtrace
- ndbsignaldata
- ndbmgmapi
- ndbmgmsrv
- ndblogger
- ndbportlib
- ndbgeneral)
+MERGE_LIBRARIES(ndbclient STATIC NOINSTALL
+ mysys dbug strings
+ ndbapi
+ ndbtransport
+ ndbtrace
+ ndbsignaldata
+ ndbmgmapi
+ ndbmgmsrv
+ ndblogger
+ ndbportlib
+ ndbgeneral
+ ndbconf
+)
+TARGET_LINK_LIBRARIES(ndbclient ws2_32)
=== modified file 'storage/ndb/src/kernel/blocks/CMakeLists.txt'
--- a/storage/ndb/src/kernel/blocks/CMakeLists.txt 2010-01-04 00:59:59 +0000
+++ b/storage/ndb/src/kernel/blocks/CMakeLists.txt 2010-02-18 10:45:20 +0000
@@ -72,4 +72,4 @@ ADD_EXECUTABLE(ndb_print_file
diskpage.cpp
dbtup/tuppage.cpp
)
-TARGET_LINK_LIBRARIES(ndb_print_file ndbclient dbug mysys strings)
+TARGET_LINK_LIBRARIES(ndb_print_file ndbclient)
=== modified file 'storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp 2010-01-25 16:22:52 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp 2010-02-15 12:43:01 +0000
@@ -971,6 +971,7 @@ private:
Page8Ptr spPageptr;
Uint32 cfirstfreepage;
Uint32 cpagesize;
+ Uint32 cpageCount;
Uint32 cnoOfAllocatedPages;
Uint32 cnoOfAllocatedPagesMax;
/* --------------------------------------------------------------------------------- */
=== modified file 'storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp 2010-01-25 16:22:52 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp 2010-02-15 12:43:01 +0000
@@ -44,7 +44,7 @@ void Dbacc::initData()
scanRec = 0;
tabrec = 0;
- cnoOfAllocatedPagesMax = cnoOfAllocatedPages = cpagesize = 0;
+ cnoOfAllocatedPagesMax = cnoOfAllocatedPages = cpagesize = cpageCount = 0;
// Records with constant sizes
RSS_OP_COUNTER_INIT(cnoOfFreeFragrec);
@@ -71,6 +71,8 @@ void Dbacc::initRecords()
* 2) Add chunks to cfirstfreepage-list
*/
cfirstfreepage = RNIL;
+ cpagesize = 0;
+ cpageCount = 0;
for (Int32 i = chunkcnt - 1; i >= 0; i--)
{
Ptr<GlobalPage> pagePtr;
@@ -85,16 +87,12 @@ void Dbacc::initRecords()
base[j].word32[0] = ptrI + j + 1;
}
- if (cfirstfreepage == RNIL)
- {
- base[cnt-1].word32[0] = RNIL;
- cfirstfreepage = ptrI;
- }
- else
- {
- base[cnt-1].word32[0] = cfirstfreepage;
- cfirstfreepage = ptrI;
- }
+ base[cnt-1].word32[0] = cfirstfreepage;
+ cfirstfreepage = ptrI;
+
+ cpageCount += cnt;
+ if (ptrI + cnt > cpagesize)
+ cpagesize = ptrI + cnt;
}
}
=== modified file 'storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp 2010-01-25 16:22:52 +0000
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp 2010-02-15 12:43:01 +0000
@@ -112,7 +112,7 @@ void Dbacc::execCONTINUEB(Signal* signal
jam();
Uint32 cnt = signal->theData[1];
static int c_currentMemUsed = 0;
- int now = cpagesize ? (cnoOfAllocatedPages * 100)/cpagesize : 0;
+ int now = cpageCount ? (cnoOfAllocatedPages * 100)/cpageCount : 0;
const int thresholds[] = { 99, 90, 80, 0};
Uint32 i = 0;
@@ -8255,7 +8255,7 @@ Dbacc::reportMemoryUsage(Signal* signal,
signal->theData[1] = gth;
signal->theData[2] = sizeof(* rpPageptr.p);
signal->theData[3] = cnoOfAllocatedPages;
- signal->theData[4] = cpagesize;
+ signal->theData[4] = cpageCount;
signal->theData[5] = DBACC;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
}
@@ -8278,7 +8278,7 @@ void Dbacc::execDBINFO_SCANREQ(Signal *s
{
{ "Index memory",
cnoOfAllocatedPages,
- cpagesize,
+ cpageCount,
sizeof(page8),
cnoOfAllocatedPagesMax = 0,
{ CFG_DB_INDEX_MEM,0,0,0 }},
=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp 2010-01-28 15:16:46 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp 2010-02-15 13:30:39 +0000
@@ -2751,6 +2751,7 @@ private:
LogPageRecordPtr logPagePtr;
UintR cfirstfreeLogPage;
UintR clogPageFileSize;
+ Uint32 clogPageCount;
#define ZPAGE_REF_FILE_SIZE 20
PageRefRecord *pageRefRecord;
=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp 2010-01-28 15:16:46 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp 2010-02-15 13:30:39 +0000
@@ -137,6 +137,8 @@ void Dblqh::initRecords()
}
cfirstfreeLogPage = RNIL;
+ clogPageFileSize = 0;
+ clogPageCount = 0;
for (Int32 i = chunkcnt - 1; i >= 0; i--)
{
const Uint32 cnt = chunks[i].cnt;
@@ -156,18 +158,14 @@ void Dblqh::initRecords()
base[j].logPageWord[ZPOS_IN_WRITING]= 0;
}
- if (cfirstfreeLogPage == RNIL)
- {
- base[cnt-1].logPageWord[ZNEXT_PAGE] = RNIL;
- cfirstfreeLogPage = ptrI;
- }
- else
- {
- base[cnt-1].logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage;
- cfirstfreeLogPage = ptrI;
- }
+ base[cnt-1].logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage;
+ cfirstfreeLogPage = ptrI;
+
+ clogPageCount += cnt;
+ if (ptrI + cnt > clogPageFileSize)
+ clogPageFileSize = ptrI + cnt;
}
- cnoOfLogPages = clogPageFileSize;
+ cnoOfLogPages = clogPageCount;
}
#ifndef NO_REDO_PAGE_CACHE
=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp 2010-02-04 21:15:23 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp 2010-02-18 06:30:45 +0000
@@ -13752,8 +13752,9 @@ void
Dblqh::execSUB_GCP_COMPLETE_REP(Signal* signal)
{
jamEntry();
- sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal,
- signal->getLength(), JBB);
+ Uint32 len = signal->getLength();
+ EXECUTE_DIRECT(DBTUP, GSN_SUB_GCP_COMPLETE_REP, signal, len);
+ sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal, len, JBB);
}
/* ------------------------------------------------------------------------- */
@@ -21932,8 +21933,8 @@ void Dblqh::execDBINFO_SCANREQ(Signal *s
case Ndbinfo::LOGBUFFERS_TABLEID:
{
const size_t entry_size = sizeof(LogPageRecord);
- const Uint64 free = cnoOfLogPages;
- const Uint64 total = clogPageFileSize;
+ const Uint64 free = cnoOfLogPages;
+ const Uint64 total = clogPageCount;
const Uint64 high = 0; // TODO
Ndbinfo::Row row(signal, req);
=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp 2010-01-13 13:28:03 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp 2010-02-18 06:30:45 +0000
@@ -1777,6 +1777,7 @@ private:
void execFSREMOVECONF(Signal*);
void execDBINFO_SCANREQ(Signal*);
+ void execSUB_GCP_COMPLETE_REP(Signal*);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -2611,15 +2612,6 @@ private:
Uint32 noOfAttributes,
Uint32* inBuffer);
- void sendFireTrigOrd(Signal* signal,
- KeyReqStruct *req_struct,
- Operationrec * regOperPtr,
- TupTriggerData* trigPtr,
- Uint32 fragmentId,
- Uint32 noPrimKeySignals,
- Uint32 noBeforeSignals,
- Uint32 noAfterSignals);
-
bool primaryKey(Tablerec* const, Uint32);
// these set terrorCode and return non-zero on error
@@ -2658,6 +2650,18 @@ private:
void removeTuxEntries(Signal* signal,
Tablerec* regTabPtr);
+ void ndbmtd_buffer_suma_trigger(Signal* signal, Uint32 len,
+ LinearSectionPtr ptr[]);
+ void flush_ndbmtd_suma_buffer(Signal*);
+
+ struct SumaTriggerBuffer
+ {
+ SumaTriggerBuffer() { m_out_of_memory = 0;m_pageId = RNIL; m_freeWords = 0;}
+ Uint32 m_out_of_memory;
+ Uint32 m_pageId;
+ Uint32 m_freeWords;
+ } m_suma_trigger_buffer;
+
// *****************************************************************
// Error Handling routines.
// *****************************************************************
=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp 2010-01-13 13:28:03 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp 2010-02-18 06:30:45 +0000
@@ -125,6 +125,7 @@ Dbtup::Dbtup(Block_context& ctx, Uint32
addRecSignal(GSN_FSREMOVECONF, &Dbtup::execFSREMOVECONF, true);
addRecSignal(GSN_DROP_FRAG_REQ, &Dbtup::execDROP_FRAG_REQ);
+ addRecSignal(GSN_SUB_GCP_COMPLETE_REP, &Dbtup::execSUB_GCP_COMPLETE_REP);
fragoperrec = 0;
fragrecord = 0;
=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp 2009-12-14 22:14:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp 2010-02-18 06:30:45 +0000
@@ -197,7 +197,16 @@ Dbtup::execDROP_TRIG_IMPL_REQ(Signal* si
// Drop trigger
Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(receiverRef));
- if (r == 0){
+ if (r == 0)
+ {
+ /**
+ * make sure that any trigger data is sent before DROP_TRIG_CONF
+ * NOTE: This is only needed for SUMA triggers
+ * (which are the only buffered ones) but it shouldn't
+ * be too bad to do it for all triggers...
+ */
+ flush_ndbmtd_suma_buffer(signal);
+
// Send conf
DropTrigImplConf* conf = (DropTrigImplConf*)signal->getDataPtrSend();
conf->senderRef = reference();
@@ -939,6 +948,7 @@ out:
jam();
return;
}
+
//--------------------------------------------------------------------
// Now all data for this trigger has been read. It is now time to send
// the trigger information consisting of two or three sets of TRIG_
@@ -1090,7 +1100,7 @@ out:
if (executeDirect)
{
jam();
- EXECUTE_DIRECT(refToMain(trigPtr->m_receiverRef),
+ EXECUTE_DIRECT(refToMain(ref),
GSN_FIRE_TRIG_ORD,
signal,
FireTrigOrd::SignalLengthSuma);
@@ -1106,8 +1116,17 @@ out:
ptr[1].sz = noBeforeWords;
ptr[2].p = afterBuffer;
ptr[2].sz = noAfterWords;
- sendSignal(trigPtr->m_receiverRef, GSN_FIRE_TRIG_ORD,
- signal, FireTrigOrd::SignalLengthSuma, JBB, ptr, 3);
+ if (refToMain(ref) == SUMA && (refToInstance(ref) != instance()))
+ {
+ jam();
+ ndbmtd_buffer_suma_trigger(signal, FireTrigOrd::SignalLengthSuma, ptr);
+ }
+ else
+ {
+ jam();
+ sendSignal(ref, GSN_FIRE_TRIG_ORD,
+ signal, FireTrigOrd::SignalLengthSuma, JBB, ptr, 3);
+ }
}
break;
case (TriggerType::SUBSCRIPTION):
@@ -1119,7 +1138,7 @@ out:
if (executeDirect)
{
jam();
- EXECUTE_DIRECT(refToMain(trigPtr->m_receiverRef),
+ EXECUTE_DIRECT(refToMain(ref),
GSN_FIRE_TRIG_ORD,
signal,
FireTrigOrd::SignalWithGCILength);
@@ -1137,7 +1156,7 @@ out:
ptr[1].sz = noBeforeWords;
ptr[2].p = afterBuffer;
ptr[2].sz = noAfterWords;
- sendSignal(trigPtr->m_receiverRef, GSN_FIRE_TRIG_ORD,
+ sendSignal(ref, GSN_FIRE_TRIG_ORD,
signal, FireTrigOrd::SignalWithGCILength, JBB, ptr, 3);
}
break;
@@ -1370,17 +1389,6 @@ void Dbtup::sendTrigAttrInfo(Signal* sig
} while (dataLen != dataIndex);
}
-void Dbtup::sendFireTrigOrd(Signal* signal,
- KeyReqStruct *req_struct,
- Operationrec * const regOperPtr,
- TupTriggerData* const trigPtr,
- Uint32 fragmentId,
- Uint32 noPrimKeyWords,
- Uint32 noBeforeValueWords,
- Uint32 noAfterValueWords)
-{
-}
-
/*
* Ordered index triggers.
*
@@ -1583,3 +1591,119 @@ Dbtup::removeTuxEntries(Signal* signal,
triggerList.next(triggerPtr);
}
}
+
+void
+Dbtup::ndbmtd_buffer_suma_trigger(Signal * signal,
+ Uint32 len,
+ LinearSectionPtr sec[3])
+{
+ jam();
+ Uint32 tot = len + 5;
+ for (Uint32 i = 0; i<3; i++)
+ tot += sec[i].sz;
+
+ Uint32 * ptr = 0;
+ Uint32 free = m_suma_trigger_buffer.m_freeWords;
+ Uint32 pageId = m_suma_trigger_buffer.m_pageId;
+ Uint32 oom = m_suma_trigger_buffer.m_out_of_memory;
+ if (free < tot)
+ {
+ jam();
+ if (pageId != RNIL)
+ {
+ flush_ndbmtd_suma_buffer(signal);
+ }
+ if (oom == 0)
+ {
+ jam();
+ ndbassert(m_suma_trigger_buffer.m_pageId == RNIL);
+ void * vptr = m_ctx.m_mm.alloc_page(RT_DBTUP_PAGE,
+ &m_suma_trigger_buffer.m_pageId,
+ Ndbd_mem_manager::NDB_ZONE_ANY);
+ ptr = reinterpret_cast<Uint32*>(vptr);
+ free = GLOBAL_PAGE_SIZE_WORDS - tot;
+ }
+ }
+ else
+ {
+ jam();
+ ptr = reinterpret_cast<Uint32*>(c_page_pool.getPtr(pageId));
+ ptr += (GLOBAL_PAGE_SIZE_WORDS - free);
+ free -= tot;
+ }
+
+ if (likely(ptr != 0))
+ {
+ jam();
+ * ptr++ = tot;
+ * ptr++ = len;
+ * ptr++ = sec[0].sz;
+ * ptr++ = sec[1].sz;
+ * ptr++ = sec[2].sz;
+ memcpy(ptr, signal->getDataPtrSend(), 4 * len);
+ ptr += len;
+ for (Uint32 i = 0; i<3; i++)
+ {
+ memcpy(ptr, sec[i].p, 4 * sec[i].sz);
+ ptr += sec[i].sz;
+ }
+
+ m_suma_trigger_buffer.m_freeWords = free;
+ if (free < (len + 5))
+ {
+ flush_ndbmtd_suma_buffer(signal);
+ }
+ }
+ else
+ {
+ jam();
+ m_suma_trigger_buffer.m_out_of_memory = 1;
+ }
+}
+
+void
+Dbtup::flush_ndbmtd_suma_buffer(Signal* signal)
+{
+ jam();
+
+ Uint32 pageId = m_suma_trigger_buffer.m_pageId;
+ Uint32 free = m_suma_trigger_buffer.m_freeWords;
+ Uint32 oom = m_suma_trigger_buffer.m_out_of_memory;
+
+ if (pageId != RNIL)
+ {
+ jam();
+ Uint32 save[2];
+ save[0] = signal->theData[0];
+ save[1] = signal->theData[1];
+ signal->theData[0] = pageId;
+ signal->theData[1] = GLOBAL_PAGE_SIZE_WORDS - free;
+ sendSignal(SUMA_REF, GSN_FIRE_TRIG_ORD_L, signal, 2, JBB);
+
+ signal->theData[0] = save[0];
+ signal->theData[1] = save[1];
+ }
+ else if (oom)
+ {
+ jam();
+ Uint32 save[2];
+ save[0] = signal->theData[0];
+ save[1] = signal->theData[1];
+ signal->theData[0] = RNIL;
+ signal->theData[1] = 0;
+ sendSignal(SUMA_REF, GSN_FIRE_TRIG_ORD_L, signal, 2, JBB);
+
+ signal->theData[0] = save[0];
+ signal->theData[1] = save[1];
+ }
+
+ m_suma_trigger_buffer.m_pageId = RNIL;
+ m_suma_trigger_buffer.m_freeWords = 0;
+ m_suma_trigger_buffer.m_out_of_memory = 0;
+}
+
+void
+Dbtup::execSUB_GCP_COMPLETE_REP(Signal* signal)
+{
+ flush_ndbmtd_suma_buffer(signal);
+}
=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp 2010-01-26 14:03:52 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp 2010-02-18 06:30:45 +0000
@@ -3984,6 +3984,67 @@ reformat(Signal* signal, LinearSectionPt
return sz_2 > 0 ? 3 : 2;
}
+/**
+ * Pass entire pages with SUMA-trigger-data from
+ * TUP to SUMA to avoid extensive LongSignalMessage buffer contention
+ */
+void
+Suma::execFIRE_TRIG_ORD_L(Signal* signal)
+{
+ jamEntry();
+
+ ndbassert(signal->getNoOfSections() == 0);
+ Uint32 pageId = signal->theData[0];
+ Uint32 len = signal->theData[1];
+
+ if (pageId == RNIL && len == 0)
+ {
+ jam();
+ /**
+ * Out of memory
+ */
+ out_of_buffer(signal);
+ return;
+ }
+
+ Uint32 * ptr = reinterpret_cast<Uint32*>(c_page_pool.getPtr(pageId));
+ while (len)
+ {
+ Uint32 * save = ptr;
+ Uint32 msglen = * ptr++;
+ Uint32 siglen = * ptr++;
+ Uint32 sec0len = * ptr++;
+ Uint32 sec1len = * ptr++;
+ Uint32 sec2len = * ptr++;
+
+ /**
+ * Copy value directly into local buffers
+ */
+ Uint32 trigId = ((FireTrigOrd*)ptr)->getTriggerId();
+ memcpy(signal->theData, ptr, 4 * siglen); // signal
+ ptr += siglen;
+ memcpy(f_buffer, ptr, 4*sec0len);
+ ptr += sec0len;
+ memcpy(b_buffer, ptr, 4*sec1len);
+ ptr += sec1len;
+ memcpy(f_buffer + sec0len, ptr, 4*sec2len);
+ ptr += sec2len;
+
+ f_trigBufferSize = sec0len + sec2len;
+ b_trigBufferSize = sec1len;
+ f_bufferLock = trigId;
+ b_bufferLock = trigId;
+
+ execFIRE_TRIG_ORD(signal);
+
+ ndbrequire(ptr == save + msglen);
+ ndbrequire(len >= msglen);
+ len -= msglen;
+ }
+
+ m_ctx.m_mm.release_page(RT_DBTUP_PAGE, pageId);
+}
+
void
Suma::execFIRE_TRIG_ORD(Signal* signal)
{
@@ -4054,7 +4115,8 @@ Suma::execFIRE_TRIG_ORD(Signal* signal)
LinearSectionPtr ptr[3];
const Uint32 nptr= reformat(signal, ptr,
- f_buffer, sz, b_buffer, b_trigBufferSize);
+ f_buffer, f_trigBufferSize,
+ b_buffer, b_trigBufferSize);
Uint32 ptrLen= 0;
for(Uint32 i =0; i < nptr; i++)
ptrLen+= ptr[i].sz;
=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.hpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp 2010-01-26 14:03:52 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp 2010-02-18 06:30:45 +0000
@@ -87,6 +87,7 @@ public:
*/
void execTRIG_ATTRINFO(Signal* signal);
void execFIRE_TRIG_ORD(Signal* signal);
+ void execFIRE_TRIG_ORD_L(Signal* signal);
void execSUB_GCP_COMPLETE_REP(Signal* signal);
/**
=== modified file 'storage/ndb/src/kernel/blocks/suma/SumaInit.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp 2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp 2010-02-18 06:30:45 +0000
@@ -115,6 +115,7 @@ Suma::Suma(Block_context& ctx) :
*/
addRecSignal(GSN_TRIG_ATTRINFO, &Suma::execTRIG_ATTRINFO);
addRecSignal(GSN_FIRE_TRIG_ORD, &Suma::execFIRE_TRIG_ORD);
+ addRecSignal(GSN_FIRE_TRIG_ORD_L, &Suma::execFIRE_TRIG_ORD_L);
addRecSignal(GSN_CREATE_TRIG_IMPL_REF, &Suma::execCREATE_TRIG_IMPL_REF);
addRecSignal(GSN_CREATE_TRIG_IMPL_CONF, &Suma::execCREATE_TRIG_IMPL_CONF);
=== modified file 'storage/ndb/src/kernel/vm/ArrayPool.hpp'
--- a/storage/ndb/src/kernel/vm/ArrayPool.hpp 2009-11-12 22:29:56 +0000
+++ b/storage/ndb/src/kernel/vm/ArrayPool.hpp 2010-02-18 06:30:45 +0000
@@ -83,7 +83,7 @@ public:
}
inline void decNoFree(Uint32 cnt) {
- assert(noOfFree > cnt);
+ assert(noOfFree >= cnt);
noOfFree -= cnt;
updateFreeMin();
}
=== modified file 'storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp'
--- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp 2010-01-23 10:08:15 +0000
+++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp 2010-02-18 00:06:17 +0000
@@ -185,6 +185,8 @@ retry:
Uint32
Ndbd_mem_manager::ndb_log2(Uint32 input)
{
+ if (input > 65535)
+ return 16;
input = input | (input >> 8);
input = input | (input >> 4);
input = input | (input >> 2);
=== modified file 'storage/ndb/src/mgmsrv/CMakeLists.txt'
--- a/storage/ndb/src/mgmsrv/CMakeLists.txt 2010-01-03 23:42:21 +0000
+++ b/storage/ndb/src/mgmsrv/CMakeLists.txt 2010-02-18 10:45:20 +0000
@@ -22,14 +22,7 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/
${CMAKE_SOURCE_DIR}/storage/ndb/src/mgmclient
${CMAKE_SOURCE_DIR}/sql)
-LINK_LIBRARIES(ndbmgmclient
- ndbclient
- dbug
- mysys
- strings
- ndbgeneral
- ndbportlib)
-
+LINK_LIBRARIES(ndbmgmclient ndbclient)
ADD_LIBRARY(ndbconf
Config.cpp
ConfigInfo.cpp
=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp 2010-02-01 10:03:20 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp 2010-02-18 13:40:02 +0000
@@ -238,7 +238,8 @@ MgmtSrvr::MgmtSrvr(const MgmtOpts& opts,
_logLevelThreadSleep(500),
m_event_listner(this),
m_master_node(0),
- _logLevelThread(NULL)
+ _logLevelThread(NULL),
+ m_version_string(ndbGetOwnVersionString())
{
DBUG_ENTER("MgmtSrvr::MgmtSrvr");
=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.hpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp 2009-11-13 11:24:05 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp 2010-02-18 13:40:02 +0000
@@ -554,6 +554,12 @@ public:
int match_hostname(const struct sockaddr *, const char *) const;
int try_alloc(unsigned id, const char *, enum ndb_mgm_node_type type,
const struct sockaddr *client_addr, Uint32 timeout_ms);
+
+ BaseString m_version_string;
+ const char* get_version_string(void) const {
+ return m_version_string.c_str();
+ }
+
};
#endif // MgmtSrvr_H
=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp 2010-01-19 08:24:03 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp 2010-02-18 13:40:02 +0000
@@ -677,14 +677,14 @@ void
MgmApiSession::getVersion(Parser<MgmApiSession>::Context &,
Properties const &) {
m_output->println("version");
- m_output->println("id: %d", NDB_VERSION);
- m_output->println("major: %d", getMajor(NDB_VERSION));
- m_output->println("minor: %d", getMinor(NDB_VERSION));
- m_output->println("build: %d", getBuild(NDB_VERSION));
- m_output->println("string: %s", NDB_VERSION_STRING);
- m_output->println("mysql_major: %d", getMajor(NDB_MYSQL_VERSION_D));
- m_output->println("mysql_minor: %d", getMinor(NDB_MYSQL_VERSION_D));
- m_output->println("mysql_build: %d", getBuild(NDB_MYSQL_VERSION_D));
+ m_output->println("id: %d", NDB_VERSION_D);
+ m_output->println("major: %d", NDB_VERSION_MAJOR);
+ m_output->println("minor: %d", NDB_VERSION_MINOR);
+ m_output->println("build: %d", NDB_VERSION_BUILD);
+ m_output->println("string: %s", m_mgmsrv.get_version_string());
+ m_output->println("mysql_major: %d", NDB_MYSQL_VERSION_MAJOR);
+ m_output->println("mysql_minor: %d", NDB_MYSQL_VERSION_MINOR);
+ m_output->println("mysql_build: %d", NDB_MYSQL_VERSION_BUILD);
m_output->println("%s", "");
}
=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp 2010-02-09 05:24:15 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp 2010-02-18 16:37:48 +0000
@@ -2775,6 +2775,7 @@ operator<<(NdbOut& out, const NdbDiction
case NdbDictionary::Column::Blob:
case NdbDictionary::Column::Text:
out << " BV=" << col.getBlobVersion();
+ out << " BT=" << ((col.getBlobTable() != 0) ? col.getBlobTable()->getName() : "<none>");
break;
default:
break;
=== modified file 'storage/ndb/test/include/HugoTransactions.hpp'
--- a/storage/ndb/test/include/HugoTransactions.hpp 2009-05-27 12:11:46 +0000
+++ b/storage/ndb/test/include/HugoTransactions.hpp 2010-02-18 23:50:31 +0000
@@ -70,6 +70,11 @@ public:
NdbOperation::LockMode = NdbOperation::LM_Read,
int rand = 0);
+ int pkReadUnlockRecords(Ndb*,
+ int records,
+ int batchsize = 1,
+ NdbOperation::LockMode = NdbOperation::LM_Read);
+
int scanUpdateRecords(Ndb*, NdbScanOperation::ScanFlag,
int records,
int abort = 0,
=== modified file 'storage/ndb/test/include/NdbRestarter.hpp'
--- a/storage/ndb/test/include/NdbRestarter.hpp 2009-08-11 08:10:20 +0000
+++ b/storage/ndb/test/include/NdbRestarter.hpp 2010-02-18 23:50:31 +0000
@@ -92,6 +92,9 @@ public:
int getRandomNodeSameNodeGroup(int nodeId, int randomNumber);
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
int getRandomNotMasterNodeId(int randomNumber);
+
+ int getMasterNodeVersion(int& version);
+ int getNodeTypeVersionRange(ndb_mgm_node_type type, int& minVer, int& maxVer);
NdbMgmHandle handle;
=== modified file 'storage/ndb/test/ndbapi/testMgm.cpp'
--- a/storage/ndb/test/ndbapi/testMgm.cpp 2009-11-08 12:52:27 +0000
+++ b/storage/ndb/test/ndbapi/testMgm.cpp 2010-02-18 13:40:02 +0000
@@ -1131,47 +1131,6 @@ int runSleepAndStop(NDBT_Context* ctx, N
static bool
-get_version(NdbMgmd& mgmd,
- Properties& reply)
-{
- Properties args;
- if (!mgmd.call("get version", args,
- "version", reply))
- {
- g_err << "get_version: mgmd.call failed" << endl;
- return false;
- }
-
- //reply.print();
- return true;
-}
-
-int runTestGetVersion(NDBT_Context* ctx, NDBT_Step* step)
-{
- NdbMgmd mgmd;
-
- if (!mgmd.connect())
- return NDBT_FAILED;
-
- Properties reply;
- if (!get_version(mgmd, reply))
- return NDBT_FAILED;
-
- return NDBT_OK;
-}
-
-int runTestGetVersionUntilStopped(NDBT_Context* ctx, NDBT_Step* step)
-{
- int result= NDBT_OK;
- while(!ctx->isTestStopped() &&
- (result= runTestGetVersion(ctx, step)) == NDBT_OK)
- ;
- return result;
-}
-
-
-
-static bool
check_connection(NdbMgmd& mgmd)
{
Properties args, reply;
@@ -2322,6 +2281,94 @@ int runTestBug45497(NDBT_Context* ctx, N
}
+static int
+runTestGetVersion(NDBT_Context* ctx, NDBT_Step* step)
+{
+
+ NdbMgmd mgmd;
+
+ if (!mgmd.connect())
+ return NDBT_FAILED;
+
+ char verStr[64];
+ int major, minor, build;
+ if (ndb_mgm_get_version(mgmd.handle(),
+ &major, &minor, &build,
+ sizeof(verStr), verStr) != 1)
+ {
+ g_err << "ndb_mgm_get_version failed,"
+ << "error: " << ndb_mgm_get_latest_error_msg(mgmd.handle())
+ << "desc: " << ndb_mgm_get_latest_error_desc(mgmd.handle()) << endl;
+ return NDBT_FAILED;
+ }
+
+ g_info << "Using major: " << major
+ << " minor: " << minor
+ << " build: " << build
+ << " string: " << verStr << endl;
+
+ int l = 0;
+ int loops = ctx->getNumLoops();
+ while(l < loops)
+ {
+ char verStr2[64];
+ int major2, minor2, build2;
+ if (ndb_mgm_get_version(mgmd.handle(),
+ &major2, &minor2, &build2,
+ sizeof(verStr2), verStr2) != 1)
+ {
+ g_err << "ndb_mgm_get_version failed,"
+ << "error: " << ndb_mgm_get_latest_error_msg(mgmd.handle())
+ << "desc: " << ndb_mgm_get_latest_error_desc(mgmd.handle()) << endl;
+ return NDBT_FAILED;
+ }
+
+ if (major != major2)
+ {
+ g_err << "Got different major: " << major2
+ << " excpected: " << major << endl;
+ return NDBT_FAILED;
+ }
+
+ if (minor != minor2)
+ {
+ g_err << "Got different minor: " << minor2
+ << " excpected: " << minor << endl;
+ return NDBT_FAILED;
+ }
+
+ if (build != build2)
+ {
+ g_err << "Got different build: " << build2
+ << " excpected: " << build << endl;
+ return NDBT_FAILED;
+ }
+
+ if (strcmp(verStr, verStr2) != 0)
+ {
+ g_err << "Got different verStr: " << verStr2
+ << " excpected: " << verStr << endl;
+ return NDBT_FAILED;
+ }
+
+ l++;
+ }
+
+ return NDBT_OK;
+}
+
+
+static int
+runTestGetVersionUntilStopped(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result= NDBT_OK;
+ while(!ctx->isTestStopped() &&
+ (result= runTestGetVersion(ctx, step)) == NDBT_OK)
+ ;
+ return result;
+}
+
+
NDBT_TESTSUITE(testMgm);
DRIVER(DummyDriver); /* turn off use of NdbApi */
TESTCASE("ApiSessionFailure",
@@ -2396,10 +2443,9 @@ TESTCASE("TestGetNodeId",
"Test 'get nodeid'"){
INITIALIZER(runTestGetNodeId);
}
-
TESTCASE("TestGetVersion",
- "Test 'get version'"){
- INITIALIZER(runTestGetVersion);
+ "Test 'get version' and 'ndb_mgm_get_version'"){
+ STEPS(runTestGetVersion, 20);
}
TESTCASE("TestTransporterConnect",
"Test 'transporter connect'"){
@@ -2430,7 +2476,7 @@ TESTCASE("Stress",
STEP(runSetConfigUntilStopped);
STEPS(runGetConfigUntilStopped, 10);
STEPS(runTestStatusUntilStopped, 10);
-// STEPS(runTestGetVersionUntilStopped, 5);
+ STEPS(runTestGetVersionUntilStopped, 5);
STEP(runSleepAndStop);
}
TESTCASE("Stress2",
@@ -2439,7 +2485,7 @@ TESTCASE("Stress2",
STEPS(runTestSetConfigParallelUntilStopped, 5);
STEPS(runGetConfigUntilStopped, 10);
STEPS(runTestStatusUntilStopped, 10);
-// STEPS(runTestGetVersionUntilStopped, 5);
+ STEPS(runTestGetVersionUntilStopped, 5);
STEP(runSleepAndStop);
}
TESTCASE("Bug45497",
=== modified file 'storage/ndb/test/ndbapi/testUpgrade.cpp'
--- a/storage/ndb/test/ndbapi/testUpgrade.cpp 2009-10-30 08:17:03 +0000
+++ b/storage/ndb/test/ndbapi/testUpgrade.cpp 2010-02-18 23:50:31 +0000
@@ -24,6 +24,7 @@
#include <AtrtClient.hpp>
#include <Bitmask.hpp>
#include <NdbBackup.hpp>
+#include <ndb_version.h>
static Vector<BaseString> table_list;
@@ -131,30 +132,74 @@ createDropEvent(NDBT_Context* ctx, NDBT_
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
- for (unsigned i = 0; i<table_list.size(); i++)
+ if (ctx->getProperty("NoDDL", Uint32(0)) == 0)
{
- int res = NDBT_OK;
- const NdbDictionary::Table* tab = myDict->getTable(table_list[i].c_str());
- if (tab == 0)
- {
- continue;
- }
- if ((res = createEvent(pNdb, *tab) != NDBT_OK))
- {
- return res;
- }
-
-
-
- if ((res = dropEvent(pNdb, *tab)) != NDBT_OK)
+ for (unsigned i = 0; i<table_list.size(); i++)
{
- return res;
+ int res = NDBT_OK;
+ const NdbDictionary::Table* tab = myDict->getTable(table_list[i].c_str());
+ if (tab == 0)
+ {
+ continue;
+ }
+ if ((res = createEvent(pNdb, *tab) != NDBT_OK))
+ {
+ return res;
+ }
+
+
+
+ if ((res = dropEvent(pNdb, *tab)) != NDBT_OK)
+ {
+ return res;
+ }
}
}
return NDBT_OK;
}
+/* An enum for expressing how many of the multiple nodes
+ * of a given type an action should be applied to
+ */
+enum NodeSet
+{
+ All = 0,
+ NotAll = 1, /* less than All, or None if there's only 1 */
+ None = 2
+};
+
+uint getNodeCount(NodeSet set, uint numNodes)
+{
+ switch(set)
+ {
+ case All:
+ return numNodes;
+ case NotAll:
+ {
+ if (numNodes < 2)
+ return 0;
+
+ if (numNodes == 2)
+ return 1;
+
+ uint range = numNodes - 2;
+
+ /* At least 1, at most numNodes - 1 */
+ return (1 + (rand() % (range + 1)));
+ }
+ case None:
+ {
+ return 0;
+ }
+ default:
+ g_err << "Unknown set type : " << set << endl;
+ abort();
+ return 0;
+ }
+};
+
+
/**
Test that one node at a time can be upgraded
*/
@@ -162,6 +207,9 @@ createDropEvent(NDBT_Context* ctx, NDBT_
int runUpgrade_NR1(NDBT_Context* ctx, NDBT_Step* step){
AtrtClient atrt;
+ NodeSet mgmdNodeSet = (NodeSet) ctx->getProperty("MgmdNodeSet", Uint32(0));
+ NodeSet ndbdNodeSet = (NodeSet) ctx->getProperty("NdbdNodeSet", Uint32(0));
+
SqlResultSet clusters;
if (!atrt.getClusters(clusters))
return NDBT_FAILED;
@@ -185,51 +233,65 @@ int runUpgrade_NR1(NDBT_Context* ctx, ND
SqlResultSet mgmds;
if (!atrt.getMgmds(clusterId, mgmds))
return NDBT_FAILED;
-
- while (mgmds.next())
+
+ uint mgmdCount = mgmds.numRows();
+ uint restartCount = getNodeCount(mgmdNodeSet, mgmdCount);
+
+ ndbout << "Restarting "
+ << restartCount << " of " << mgmdCount
+ << " mgmds" << endl;
+
+ while (mgmds.next() && restartCount --)
{
ndbout << "Restart mgmd " << mgmds.columnAsInt("node_id") << endl;
if (!atrt.changeVersion(mgmds.columnAsInt("id"), ""))
return NDBT_FAILED;
-
+
if (restarter.waitConnected())
return NDBT_FAILED;
ndbout << "Connected to mgmd"<< endl;
}
-
+
ndbout << "Waiting for started"<< endl;
if (restarter.waitClusterStarted())
return NDBT_FAILED;
ndbout << "Started"<< endl;
-
+
// Restart ndbd(s)
SqlResultSet ndbds;
if (!atrt.getNdbds(clusterId, ndbds))
return NDBT_FAILED;
- while(ndbds.next())
+ uint ndbdCount = ndbds.numRows();
+ restartCount = getNodeCount(ndbdNodeSet, ndbdCount);
+
+ ndbout << "Restarting "
+ << restartCount << " of " << ndbdCount
+ << " ndbds" << endl;
+
+ while(ndbds.next() && restartCount --)
{
int nodeId = ndbds.columnAsInt("node_id");
int processId = ndbds.columnAsInt("id");
ndbout << "Restart node " << nodeId << endl;
-
+
if (!atrt.changeVersion(processId, ""))
return NDBT_FAILED;
-
+
if (restarter.waitNodesNoStart(&nodeId, 1))
return NDBT_FAILED;
-
+
if (restarter.startNodes(&nodeId, 1))
return NDBT_FAILED;
-
+
if (restarter.waitNodesStarted(&nodeId, 1))
return NDBT_FAILED;
-
+
if (createDropEvent(ctx, step))
return NDBT_FAILED;
}
}
-
+
ctx->stopTest();
return NDBT_OK;
}
@@ -259,6 +321,9 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
args = "--initial=0";
}
+ NodeSet mgmdNodeSet = (NodeSet) ctx->getProperty("MgmdNodeSet", Uint32(0));
+ NodeSet ndbdNodeSet = (NodeSet) ctx->getProperty("NdbdNodeSet", Uint32(0));
+
SqlResultSet clusters;
if (!atrt.getClusters(clusters))
return NDBT_FAILED;
@@ -283,7 +348,14 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
if (!atrt.getMgmds(clusterId, mgmds))
return NDBT_FAILED;
- while (mgmds.next())
+ uint mgmdCount = mgmds.numRows();
+ uint restartCount = getNodeCount(mgmdNodeSet, mgmdCount);
+
+ ndbout << "Restarting "
+ << restartCount << " of " << mgmdCount
+ << " mgmds" << endl;
+
+ while (mgmds.next() && restartCount --)
{
ndbout << "Restart mgmd" << mgmds.columnAsInt("node_id") << endl;
if (!atrt.changeVersion(mgmds.columnAsInt("id"), ""))
@@ -310,12 +382,19 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
nodes.push_back(n);
}
+ uint ndbdCount = ndbds.numRows();
+ restartCount = getNodeCount(ndbdNodeSet, ndbdCount);
+
+ ndbout << "Restarting "
+ << restartCount << " of " << ndbdCount
+ << " ndbds" << endl;
+
int nodesarray[256];
int cnt= 0;
Bitmask<4> seen_groups;
Bitmask<4> restarted_nodes;
- for (Uint32 i = 0; i<nodes.size(); i++)
+ for (Uint32 i = 0; (i<nodes.size() && restartCount); i++)
{
int nodeId = nodes[i].nodeId;
int processId = nodes[i].processId;
@@ -340,6 +419,7 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
}
nodesarray[cnt++]= nodeId;
+ restartCount--;
}
if (!waitNode)
@@ -362,7 +442,7 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
// Restart the remaining nodes
cnt= 0;
- for (Uint32 i = 0; i<nodes.size(); i++)
+ for (Uint32 i = 0; (i<nodes.size() && restartCount); i++)
{
int nodeId = nodes[i].nodeId;
int processId = nodes[i].processId;
@@ -380,6 +460,7 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
}
nodesarray[cnt++]= nodeId;
+ restartCount --;
}
@@ -438,6 +519,47 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
return res;
}
+/**
+ Test that we can upgrade the Ndbds on their own
+*/
+int runUpgrade_NdbdOnly(NDBT_Context* ctx, NDBT_Step* step)
+{
+ ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(None));
+ int res = runUpgrade_Half(ctx, step);
+ ctx->stopTest();
+ return res;
+}
+
+/**
+ Test that we can upgrade the Ndbds first, then
+ the MGMDs
+*/
+int runUpgrade_NdbdFirst(NDBT_Context* ctx, NDBT_Step* step)
+{
+ ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(None));
+ int res = runUpgrade_Half(ctx, step);
+ if (res == NDBT_OK)
+ {
+ ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(All));
+ ctx->setProperty("NdbdNodeSet", (Uint32) NodeSet(None));
+ res = runUpgrade_Half(ctx, step);
+ }
+ ctx->stopTest();
+ return res;
+}
+
+/**
+ Upgrade some of the MGMDs
+*/
+int runUpgrade_NotAllMGMD(NDBT_Context* ctx, NDBT_Step* step)
+{
+ ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(NotAll));
+ ctx->setProperty("NdbdNodeSet", (Uint32) NodeSet(None));
+ int res = runUpgrade_Half(ctx, step);
+ ctx->stopTest();
+ return res;
+}
+
int runCheckStarted(NDBT_Context* ctx, NDBT_Step* step){
// Check cluster is started
@@ -494,6 +616,27 @@ runCreateOneTable(NDBT_Context* ctx, NDB
return NDBT_OK;
}
+int runGetTableList(NDBT_Context* ctx, NDBT_Step* step)
+{
+ table_list.clear();
+ ndbout << "Looking for tables ... ";
+ for (int i = 0; i<NDBT_Tables::getNumTables(); i++)
+ {
+ const NdbDictionary::Table* tab =
+ GETNDB(step)->getDictionary()
+ ->getTable(NDBT_Tables::getTable(i)
+ ->getName());
+ if (tab != NULL)
+ {
+ ndbout << tab->getName() << " ";
+ table_list.push_back(BaseString(tab->getName()));
+ }
+ }
+ ndbout << endl;
+
+ return NDBT_OK;
+}
+
int
runLoadAll(NDBT_Context* ctx, NDBT_Step* step)
{
@@ -513,6 +656,27 @@ runLoadAll(NDBT_Context* ctx, NDBT_Step*
return result;
}
+int
+runClearAll(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary * pDict = pNdb->getDictionary();
+ int records = ctx->getNumRecords();
+ int result = NDBT_OK;
+
+ for (unsigned i = 0; i<table_list.size(); i++)
+ {
+ const NdbDictionary::Table* tab = pDict->getTable(table_list[i].c_str());
+ if (tab)
+ {
+ HugoTransactions trans(* tab);
+ trans.clearTable(pNdb, records);
+ }
+ }
+
+ return result;
+}
+
int
runBasic(NDBT_Context* ctx, NDBT_Step* step)
@@ -525,7 +689,6 @@ runBasic(NDBT_Context* ctx, NDBT_Step* s
int l = 0;
while (!ctx->isTestStopped())
{
- l++;
for (unsigned i = 0; i<table_list.size(); i++)
{
const NdbDictionary::Table* tab = pDict->getTable(table_list[i].c_str());
@@ -535,6 +698,7 @@ runBasic(NDBT_Context* ctx, NDBT_Step* s
trans.loadTable(pNdb, records);
trans.scanUpdateRecords(pNdb, records);
trans.pkUpdateRecords(pNdb, records);
+ trans.pkReadUnlockRecords(pNdb, records);
break;
case 1:
trans.scanUpdateRecords(pNdb, records);
@@ -558,6 +722,7 @@ runBasic(NDBT_Context* ctx, NDBT_Step* s
break;
}
}
+ l++;
}
return result;
@@ -611,6 +776,15 @@ startPostUpgradeChecks(NDBT_Context* ctx
* This will restart *self* in new version
*/
+ BaseString extraArgs;
+ if (ctx->getProperty("RestartNoDDL", Uint32(0)))
+ {
+ /* Ask post-upgrade steps not to perform DDL
+ * (e.g. for 6.3->7.0 upgrade)
+ */
+ extraArgs.append(" --noddl ");
+ }
+
/**
* mysql-getopt works so that passing "-n X -n Y" is ok
* and is interpreted as "-n Y"
@@ -620,7 +794,9 @@ startPostUpgradeChecks(NDBT_Context* ctx
* this will restart it as "testUpgrade -n X -n X--post-upgrade"
*/
BaseString tc;
- tc.assfmt("-n %s--post-upgrade", ctx->getCase()->getName());
+ tc.assfmt("-n %s--post-upgrade %s",
+ ctx->getCase()->getName(),
+ extraArgs.c_str());
ndbout << "About to restart self with extra arg: " << tc.c_str() << endl;
@@ -641,6 +817,18 @@ startPostUpgradeChecks(NDBT_Context* ctx
}
int
+startPostUpgradeChecksApiFirst(NDBT_Context* ctx, NDBT_Step* step)
+{
+ /* If Api is upgraded before all NDBDs then it may not
+ * be possible to use DDL from the upgraded API
+ * The upgraded Api will decide, but we pass NoDDL
+ * in
+ */
+ ctx->setProperty("RestartNoDDL", 1);
+ return startPostUpgradeChecks(ctx, step);
+}
+
+int
runPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step)
{
/**
@@ -659,56 +847,116 @@ runPostUpgradeChecks(NDBT_Context* ctx,
ndbout << "done" << endl;
- /**
- * Bug48227
- *
- */
- Ndb* pNdb = GETNDB(step);
- NdbDictionary::Dictionary *pDict = pNdb->getDictionary();
+ if (ctx->getProperty("NoDDL", Uint32(0)) == 0)
{
- NdbDictionary::Dictionary::List l;
- pDict->listObjects(l);
- for (Uint32 i = 0; i<l.count; i++)
- ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name);
+ /**
+ * Bug48227
+ *
+ */
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary *pDict = pNdb->getDictionary();
+ {
+ NdbDictionary::Dictionary::List l;
+ pDict->listObjects(l);
+ for (Uint32 i = 0; i<l.count; i++)
+ ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name);
+ }
+
+ pDict->dropTable("I3");
+ if (NDBT_Tables::createTable(pNdb, "I3"))
+ {
+ ndbout_c("Failed to create table!");
+ ndbout << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ {
+ NdbDictionary::Dictionary::List l;
+ pDict->listObjects(l);
+ for (Uint32 i = 0; i<l.count; i++)
+ ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name);
+ }
+
+ NdbRestarter res;
+ if (res.restartAll() != 0)
+ {
+ ndbout_c("restartAll() failed");
+ return NDBT_FAILED;
+ }
+
+ if (res.waitClusterStarted() != 0)
+ {
+ ndbout_c("waitClusterStarted() failed");
+ return NDBT_FAILED;
+ }
+
+ if (pDict->getTable("I3") == 0)
+ {
+ ndbout_c("Table disappered");
+ return NDBT_FAILED;
+ }
}
- pDict->dropTable("I3");
- if (NDBT_Tables::createTable(pNdb, "I3"))
- {
- ndbout_c("Failed to create table!");
- ndbout << pDict->getNdbError() << endl;
- return NDBT_FAILED;
- }
+ return NDBT_OK;
+}
- {
- NdbDictionary::Dictionary::List l;
- pDict->listObjects(l);
- for (Uint32 i = 0; i<l.count; i++)
- ndbout_c("found %u : %s", l.elements[i].id, l.elements[i].name);
+
+int
+runWait(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Uint32 waitSeconds = ctx->getProperty("WaitSeconds", Uint32(30));
+ while (waitSeconds &&
+ !ctx->isTestStopped())
+ {
+ NdbSleep_MilliSleep(1000);
+ waitSeconds --;
}
+ ctx->stopTest();
+ return NDBT_OK;
+}
+
+int runPostUpgradeDecideDDL(NDBT_Context* ctx, NDBT_Step* step)
+{
+ /* We are running post-upgrade, now examine the versions
+ * of connected nodes and update the 'NoDDL' variable
+ * accordingly
+ */
+ /* DDL should be ok as long as
+ * 1) All data nodes have the same version
+ * 2) We are at the same version as the data nodes
+ */
+ bool useDDL = true;
- NdbRestarter res;
- if (res.restartAll() != 0)
+ NdbRestarter restarter;
+ int minNdbVer = 0;
+ int maxNdbVer = 0;
+ int myVer = NDB_VERSION;
+
+ if (restarter.getNodeTypeVersionRange(NDB_MGM_NODE_TYPE_NDB,
+ minNdbVer,
+ maxNdbVer) == -1)
{
- ndbout_c("restartAll() failed");
+ g_err << "getNodeTypeVersionRange call failed" << endl;
return NDBT_FAILED;
}
- if (res.waitClusterStarted() != 0)
+ if (minNdbVer != maxNdbVer)
{
- ndbout_c("waitClusterStarted() failed");
- return NDBT_FAILED;
+ useDDL = false;
+ ndbout << "Ndbd nodes have mixed versions, DDL not supported" << endl;
}
-
- if (pDict->getTable("I3") == 0)
+ if (myVer != minNdbVer)
{
- ndbout_c("Table disappered");
- return NDBT_FAILED;
+ useDDL = false;
+ ndbout << "Api has different version to Ndbd nodes, DDL not supported" << endl;
}
+ ctx->setProperty("NoDDL", useDDL?0:1);
+
return NDBT_OK;
}
+
NDBT_TESTSUITE(testUpgrade);
TESTCASE("Upgrade_NR1",
"Test that one node at a time can be upgraded"){
@@ -817,6 +1065,80 @@ POSTUPGRADE("Upgrade_Traffic_FS_one")
INITIALIZER(runCheckStarted);
INITIALIZER(runPostUpgradeChecks);
}
+TESTCASE("Upgrade_Api_Only",
+ "Test that upgrading the Api node only works")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runCreateAllTables);
+ VERIFIER(startPostUpgradeChecksApiFirst);
+}
+POSTUPGRADE("Upgrade_Api_Only")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runPostUpgradeDecideDDL);
+ INITIALIZER(runGetTableList);
+ TC_PROPERTY("WaitSeconds", 30);
+ STEP(runBasic);
+ STEP(runPostUpgradeChecks);
+ STEP(runWait);
+ FINALIZER(runClearAll);
+}
+TESTCASE("Upgrade_Api_Before_NR1",
+ "Test that upgrading the Api node before the kernel works")
+{
+ /* Api, then MGMD(s), then NDBDs */
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runCreateAllTables);
+ VERIFIER(startPostUpgradeChecksApiFirst);
+}
+POSTUPGRADE("Upgrade_Api_Before_NR1")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runPostUpgradeDecideDDL);
+ INITIALIZER(runGetTableList);
+ STEP(runBasic);
+ STEP(runUpgrade_NR1); /* Upgrade kernel nodes using NR1 */
+ FINALIZER(runPostUpgradeChecks);
+ FINALIZER(runClearAll);
+}
+TESTCASE("Upgrade_Api_NDBD_MGMD",
+ "Test that updating in reverse order works")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runCreateAllTables);
+ VERIFIER(startPostUpgradeChecksApiFirst);
+}
+POSTUPGRADE("Upgrade_Api_NDBD_MGMD")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runPostUpgradeDecideDDL);
+ INITIALIZER(runGetTableList);
+ STEP(runBasic);
+ STEP(runUpgrade_NdbdFirst);
+ FINALIZER(runPostUpgradeChecks);
+ FINALIZER(runClearAll);
+}
+TESTCASE("Upgrade_Mixed_MGMD_API_NDBD",
+ "Test that upgrading MGMD/API partially before data nodes works")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runCreateAllTables);
+ STEP(runUpgrade_NotAllMGMD); /* Upgrade an MGMD */
+ STEP(runBasic);
+ VERIFIER(startPostUpgradeChecksApiFirst); /* Upgrade Api */
+}
+POSTUPGRADE("Upgrade_Mixed_MGMD_API_NDBD")
+{
+ INITIALIZER(runCheckStarted);
+ INITIALIZER(runPostUpgradeDecideDDL);
+ INITIALIZER(runGetTableList);
+ INITIALIZER(runClearAll); /* Clear rows from old-ver basic run */
+ STEP(runBasic);
+ STEP(runUpgrade_NdbdFirst); /* Upgrade all Ndbds, then MGMDs finally */
+ FINALIZER(runPostUpgradeChecks);
+ FINALIZER(runClearAll);
+}
+
NDBT_TESTSUITE_END(testUpgrade);
int main(int argc, const char** argv){
=== modified file 'storage/ndb/test/run-test/command.cpp'
--- a/storage/ndb/test/run-test/command.cpp 2009-11-13 04:54:11 +0000
+++ b/storage/ndb/test/run-test/command.cpp 2010-02-18 23:50:31 +0000
@@ -63,6 +63,46 @@ ack_command(AtrtClient& atrtdb, int comm
}
+BaseString
+set_env_var(const BaseString& existing,
+ const BaseString& name,
+ const BaseString& value)
+{
+ /* Split existing on space
+ * (may have issues with env vars with spaces)
+ * Split assignments on =
+ * Where name == name, output new value
+ */
+ BaseString newEnv;
+ Vector<BaseString> assignments;
+ int assignmentCount = existing.split(assignments, BaseString(" "));
+
+ for (int i=0; i < assignmentCount; i++)
+ {
+ Vector<BaseString> terms;
+ int termCount = assignments[i].split(terms, BaseString("="));
+
+ if (termCount)
+ {
+ if (strcmp(name.c_str(), terms[0].c_str()) == 0)
+ {
+ /* Found element */
+ newEnv.append(name);
+ newEnv.append('=');
+ newEnv.append(value);
+ }
+ else
+ {
+ newEnv.append(assignments[i]);
+ }
+ }
+ newEnv.append(' ');
+ }
+
+ return newEnv;
+}
+
+
Vector<atrt_process> g_saved_procs;
static
@@ -85,6 +125,29 @@ do_change_version(atrt_config& config, S
}
atrt_process& proc= *config.m_processes[process_id];
+ const char* new_prefix= g_prefix1 ? g_prefix1 : g_prefix;
+ const char* old_prefix= g_prefix;
+ const char *start= strstr(proc.m_proc.m_path.c_str(), old_prefix);
+ if (!start){
+ /* Process path does not contain old prefix.
+ * Perhaps it contains the new prefix - e.g. is already
+ * upgraded?
+ */
+ if (strstr(proc.m_proc.m_path.c_str(), new_prefix))
+ {
+ /* Process is already upgraded, *assume* that this
+ * is ok
+ * Alternatives could be - error, or downgrade.
+ */
+ g_logger.info("Process already upgraded");
+ return true;
+ }
+
+ g_logger.critical("Could not find '%s' in '%s'",
+ old_prefix, proc.m_proc.m_path.c_str());
+ return false;
+ }
+
// Save current proc state
if (proc.m_save.m_saved == false)
{
@@ -95,20 +158,15 @@ do_change_version(atrt_config& config, S
g_logger.info("stopping process...");
if (!stop_process(proc))
return false;
-
- const char* new_prefix= g_prefix1 ? g_prefix1 : g_prefix;
- const char* old_prefix= g_prefix;
- proc.m_proc.m_env.appfmt(" MYSQL_BASE_DIR=%s", new_prefix);
- const char *start= strstr(proc.m_proc.m_path.c_str(), old_prefix);
- if (!start){
- g_logger.critical("Could not find '%s' in '%s'",
- old_prefix, proc.m_proc.m_path.c_str());
- return false;
- }
+ BaseString newEnv = set_env_var(proc.m_proc.m_env,
+ BaseString("MYSQL_BASE_DIR"),
+ BaseString(new_prefix));
+ proc.m_proc.m_env.assign(newEnv);
BaseString suffix(proc.m_proc.m_path.substr(strlen(old_prefix)));
proc.m_proc.m_path.assign(new_prefix).append(suffix);
if (process_args && strlen(process_args))
{
+ /* Beware too long args */
proc.m_proc.m_args.append(" ");
proc.m_proc.m_args.append(process_args);
}
=== modified file 'storage/ndb/test/run-test/db.cpp'
--- a/storage/ndb/test/run-test/db.cpp 2009-11-13 04:54:11 +0000
+++ b/storage/ndb/test/run-test/db.cpp 2010-02-18 23:50:31 +0000
@@ -195,13 +195,13 @@ connect_mysqld(atrt_process* proc)
return false;
}
- if (port)
- {
- mysql_protocol_type val = MYSQL_PROTOCOL_TCP;
- mysql_options(&proc->m_mysql, MYSQL_OPT_PROTOCOL, &val);
- }
for (size_t i = 0; i<20; i++)
{
+ if (port)
+ {
+ mysql_protocol_type val = MYSQL_PROTOCOL_TCP;
+ mysql_options(&proc->m_mysql, MYSQL_OPT_PROTOCOL, &val);
+ }
if (mysql_real_connect(&proc->m_mysql,
proc->m_host->m_hostname.c_str(),
"root", "", "test",
=== modified file 'storage/ndb/test/run-test/upgrade-tests.txt'
--- a/storage/ndb/test/run-test/upgrade-tests.txt 2009-05-25 19:53:07 +0000
+++ b/storage/ndb/test/run-test/upgrade-tests.txt 2010-02-18 23:01:15 +0000
@@ -22,3 +22,19 @@ cmd: testUpgrade
args: -n Upgrade_Traffic_FS T1
max-time: 1200
+cmd: testUpgrade
+args: -n Upgrade_Api_Only T1
+max-time: 1200
+
+cmd: testUpgrade
+args: -n Upgrade_Api_Before_NR1 T2
+max-time: 1200
+
+cmd: testUpgrade
+args: -n Upgrade_Api_NDBD_MGMD T1
+max-time: 1200
+
+cmd: testUpgrade
+args: -n Upgrade_Mixed_MGMD_API_NDBD T2
+max-time: 1200
+
=== modified file 'storage/ndb/test/src/CpcClient.cpp'
--- a/storage/ndb/test/src/CpcClient.cpp 2010-01-18 19:20:01 +0000
+++ b/storage/ndb/test/src/CpcClient.cpp 2010-02-18 23:50:31 +0000
@@ -441,6 +441,15 @@ SimpleCpcClient::cpc_send(const char *cm
break;
case PropertiesType_char:
args.get(name, val_s);
+ if (strlen(val_s.c_str()) > Parser_t::Context::MaxParseBytes)
+ {
+ ndbout << "Argument " << name << " at "
+ << strlen(val_s.c_str())
+ << " longer than max of "
+ << Parser_t::Context::MaxParseBytes
+ << endl;
+ abort();
+ }
cpc_out.println("%s: %s", name, val_s.c_str());
break;
default:
=== modified file 'storage/ndb/test/src/HugoTransactions.cpp'
--- a/storage/ndb/test/src/HugoTransactions.cpp 2010-01-28 15:16:46 +0000
+++ b/storage/ndb/test/src/HugoTransactions.cpp 2010-02-18 23:50:31 +0000
@@ -1501,6 +1501,144 @@ HugoTransactions::pkDelRecords(Ndb* pNdb
return NDBT_OK;
}
+int
+HugoTransactions::pkReadUnlockRecords(Ndb* pNdb,
+ int records,
+ int batch,
+ NdbOperation::LockMode lm)
+{
+ int reads = 0;
+ int r = 0;
+ int retryAttempt = 0;
+ int check;
+
+ if (batch == 0) {
+ g_info << "ERROR: Argument batch == 0 in pkReadRecords(). Not allowed." << endl;
+ return NDBT_FAILED;
+ }
+
+ if (idx != NULL) {
+ g_info << "ERROR: Cannot call pkReadUnlockRecords for index" << endl;
+ return NDBT_FAILED;
+ }
+
+ while (r < records){
+ if(r + batch > records)
+ batch = records - r;
+
+ if (retryAttempt >= m_retryMax){
+ g_info << "ERROR: has retried this operation " << retryAttempt
+ << " times, failing!" << endl;
+ return NDBT_FAILED;
+ }
+
+ pTrans = pNdb->startTransaction();
+ if (pTrans == NULL) {
+ const NdbError err = pNdb->getNdbError();
+
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ continue;
+ }
+ ERR(err);
+ return NDBT_FAILED;
+ }
+
+ MicroSecondTimer timer_start;
+ MicroSecondTimer timer_stop;
+ bool timer_active =
+ m_stats_latency != 0 &&
+ r >= batch && // first batch is "warmup"
+ r + batch != records; // last batch is usually partial
+
+ if (timer_active)
+ NdbTick_getMicroTimer(&timer_start);
+
+ Vector<const NdbLockHandle*> lockHandles;
+
+ NdbOperation::LockMode lmused;
+ if(pkReadRecordLockHandle(pNdb, lockHandles, r, batch, lm, &lmused) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+
+ check = pTrans->execute(NoCommit, AbortOnError);
+
+ if( check == -1 ) {
+ const NdbError err = pTrans->getNdbError();
+
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ closeTransaction(pNdb);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ continue;
+ }
+ switch(err.code){
+ case 626: // Tuple did not exist
+ g_info << r << ": " << err.code << " " << err.message << endl;
+ r++;
+ break;
+
+ default:
+ ERR(err);
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+ } else {
+ /* Execute succeeded ok */
+ for (int b=0; (b<batch) && (r+b<records); b++){
+ if (calc.verifyRowValues(rows[b]) != 0){
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+ reads++;
+ r++;
+ }
+
+ if (pkUnlockRecord(pNdb,
+ lockHandles) != NDBT_OK)
+ {
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+
+ check = pTrans->execute(Commit, AbortOnError);
+
+ if (check == -1 )
+ {
+ const NdbError err = pTrans->getNdbError();
+
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ closeTransaction(pNdb);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ continue;
+ }
+ ERR(err);
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+ }
+
+ closeTransaction(pNdb);
+
+ if (timer_active) {
+ NdbTick_getMicroTimer(&timer_stop);
+ NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop);
+ m_stats_latency->addObservation((double)ticks);
+ }
+ }
+ deallocRows();
+ g_info << reads << " records read" << endl;
+ return NDBT_OK;
+}
+
int
HugoTransactions::lockRecords(Ndb* pNdb,
=== modified file 'storage/ndb/test/src/NdbRestarter.cpp'
--- a/storage/ndb/test/src/NdbRestarter.cpp 2009-08-11 08:10:20 +0000
+++ b/storage/ndb/test/src/NdbRestarter.cpp 2010-02-18 23:50:31 +0000
@@ -898,4 +898,77 @@ loop:
return 0;
}
+int
+NdbRestarter::getMasterNodeVersion(int& version)
+{
+ int masterNodeId = getMasterNodeId();
+ if (masterNodeId != -1)
+ {
+ for(size_t i = 0; i < ndbNodes.size(); i++)
+ {
+ if (ndbNodes[i].node_id == masterNodeId)
+ {
+ version = ndbNodes[i].version;
+ return 0;
+ }
+ }
+ }
+
+ g_err << "Could not find node info for master node id "
+ << masterNodeId << endl;
+ return -1;
+}
+
+int
+NdbRestarter::getNodeTypeVersionRange(ndb_mgm_node_type type,
+ int& minVer,
+ int& maxVer)
+{
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ Vector<ndb_mgm_node_state>* nodeVec = NULL;
+
+ switch (type)
+ {
+ case NDB_MGM_NODE_TYPE_API:
+ nodeVec = &apiNodes;
+ break;
+ case NDB_MGM_NODE_TYPE_NDB:
+ nodeVec = &ndbNodes;
+ break;
+ case NDB_MGM_NODE_TYPE_MGM:
+ nodeVec = &mgmNodes;
+ break;
+ default:
+ g_err << "Bad node type : " << type << endl;
+ return -1;
+ }
+
+ if (nodeVec->size() == 0)
+ {
+ g_err << "No nodes of type " << type << " online" << endl;
+ return -1;
+ }
+
+ minVer = 0;
+ maxVer = 0;
+
+ for(size_t i = 0; i < nodeVec->size(); i++)
+ {
+ int nodeVer = (*nodeVec)[i].version;
+ if ((minVer == 0) ||
+ (nodeVer < minVer))
+ minVer = nodeVer;
+
+ if (nodeVer > maxVer)
+ maxVer = nodeVer;
+ }
+
+ return 0;
+}
+
template class Vector<ndb_mgm_node_state>;
=== modified file 'storage/ndb/tools/CMakeLists.txt'
--- a/storage/ndb/tools/CMakeLists.txt 2010-01-04 00:00:52 +0000
+++ b/storage/ndb/tools/CMakeLists.txt 2010-02-18 10:45:20 +0000
@@ -56,6 +56,7 @@ SET_TARGET_PROPERTIES(ndb_config PROPERT
# Build ndbinfo_sql and run it to create ndbinfo.sql
ADD_EXECUTABLE(ndbinfo_sql ndbinfo_sql.cpp)
+TARGET_LINK_LIBRARIES(ndbinfo_sql ndbclient)
GET_TARGET_PROPERTY(NDBINFO_SQL_EXE ndbinfo_sql LOCATION)
ADD_CUSTOM_COMMAND(OUTPUT ${PROJECT_SOURCE_DIR}/storage/ndb/tools/ndbinfo.sql
COMMAND ${NDBINFO_SQL_EXE} ARGS > ndbinfo.sql
=== modified file 'storage/ndb/tools/desc.cpp'
--- a/storage/ndb/tools/desc.cpp 2009-09-04 11:33:38 +0000
+++ b/storage/ndb/tools/desc.cpp 2010-02-18 16:37:48 +0000
@@ -33,6 +33,7 @@ int desc_hashmap(Ndb_cluster_connection
static const char* _dbname = "TEST_DB";
static int _unqualified = 0;
static int _partinfo = 0;
+static int _blobinfo = 0;
const char *load_default_groups[]= { "mysql_cluster",0 };
@@ -53,6 +54,9 @@ static struct my_option my_long_options[
{ "retries", 'r', "Retry every second for # retries",
(uchar**) &_retries, (uchar**) &_retries, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "blob-info", 'b', "Show information for hidden blob tables (requires -p)",
+ (uchar**) &_blobinfo, (uchar**) &_blobinfo, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -281,7 +285,24 @@ int desc_table(Ndb *myndb, char* name)
ndbout << endl;
if (_partinfo)
+ {
print_part_info(myndb, pTab);
+ ndbout << endl;
+ if (_blobinfo)
+ {
+ int noOfAttributes = pTab->getNoOfColumns();
+ for (int i = 0; i < noOfAttributes; i++)
+ {
+ const NdbDictionary::Column* column = pTab->getColumn(i);
+ if ((column->getType() == NdbDictionary::Column::Blob) ||
+ (column->getType() == NdbDictionary::Column::Text))
+ {
+ print_part_info(myndb, (NDBT_Table*) column->getBlobTable());
+ ndbout << endl;
+ }
+ }
+ }
+ }
return 1;
}
@@ -309,7 +330,12 @@ void print_part_info(Ndb* pNdb, NDBT_Tab
{ 0, 0, 0 }
};
- ndbout << "-- Per partition info -- " << endl;
+ ndbout << "-- Per partition info";
+
+ if (_blobinfo && _partinfo)
+ ndbout << " for " << pTab->getName();
+
+ ndbout << " -- " << endl;
const Uint32 codeWords= 1;
Uint32 codeSpace[ codeWords ];
@@ -380,7 +406,6 @@ void print_part_info(Ndb* pNdb, NDBT_Tab
printf("\n");
}
} while(0);
-
pTrans->close();
}
@@ -406,6 +431,5 @@ int desc_hashmap(Ndb_cluster_connection
delete [] tmp;
return 1;
}
-
return 0;
}
Thread |
---|
• bzr commit into mysql-5.1-telco-7.0 branch (Martin.Skold:3407) Bug#46914Bug#47929 Bug#50599 Bug#51027 Bug#51048 Bug#51256 Bug#51273 | Martin Skold | 24 Feb |