List:Commits« Previous MessageNext Message »
From:Mauritz Sundell Date:June 21 2012 12:43pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (mauritz.sundell:4945 to 4946)
View as plain text  
 4946 Mauritz Sundell	2012-06-21 [merge]
      ndb - increase of hashmap size and supporting 1024 partitions
      
      This increased the hashmap size from 240 to 3840 to ensure that we don't get unbalanced
      access to the partitions in a large cluster setup. Influenced performance at 16 nodes and
      beyond by a fairly significant factor.
      
      Part of Mikael Ronstroms "Patches used in benchmark tree with Intel"

    modified:
      storage/ndb/include/kernel/ndb_limits.h
      storage/ndb/include/kernel/signaldata/CreateTable.hpp
      storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
      storage/ndb/include/kernel/signaldata/ScanTab.hpp
      storage/ndb/include/ndb_version.h.in
      storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp
      storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
      storage/ndb/src/kernel/blocks/suma/Suma.hpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbScanOperation.cpp
 4945 Martin Skold	2012-06-21 [merge]
      Merge from 6.3

=== modified file 'storage/ndb/include/kernel/ndb_limits.h'
--- a/storage/ndb/include/kernel/ndb_limits.h	2012-02-02 11:01:13 +0000
+++ b/storage/ndb/include/kernel/ndb_limits.h	2012-06-21 12:38:01 +0000
@@ -96,8 +96,26 @@
 #define MAX_KEY_SIZE_IN_WORDS 1023
 #define MAX_FRM_DATA_SIZE 6000
 #define MAX_NULL_BITS 4096
-#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
+/*
+ * Fragmentation data are Uint16, first two are #replicas,
+ * and #fragments, then for each fragment, first log-part-id
+ * then nodeid for each replica.
+ * See creation in Dbdih::execCREATE_FRAGMENTATION_REQ()
+ * and read in Dbdih::execDIADDTABREQ()
+ */
+#define MAX_FRAGMENT_DATA_ENTRIES (2 + (1 + MAX_REPLICAS) * MAX_NDB_PARTITIONS)
+#define MAX_FRAGMENT_DATA_BYTES (2 * MAX_FRAGMENT_DATA_ENTRIES)
+#define MAX_FRAGMENT_DATA_WORDS ((MAX_FRAGMENT_DATA_BYTES + 3) / 4)
+
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define MAX_NDB_PARTITIONS 240
+#else
 #define MAX_NDB_PARTITIONS 1024
+#endif
+
+#define NDB_PARTITION_BITS 16
+#define NDB_PARTITION_MASK ((Uint32)((1 << NDB_PARTITION_BITS) - 1))
+
 #define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
 
 #define MAX_WORDS_META_FILE 24576
@@ -191,7 +209,22 @@
  */
 #define LCP_RESTORE_BUFFER (4*32)
 
-#define NDB_DEFAULT_HASHMAP_BUCKTETS 240
+
+/**
+ * Support at least one partition per LDM. And
+ * also try to make size a multiple of all possible
+ * data node counts, so that all partitions are
+ * related to the same number of hashmap buckets
+ * as possible, otherwise some partitions will be
+ * bigger than others.
+ */
+
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define NDB_DEFAULT_HASHMAP_BUCKETS 240
+#else
+#define NDB_DEFAULT_HASHMAP_BUCKETS (48 * 16 * 5) /* 3840 */
+#endif
+#define NDB_DEFAULT_HASHMAP_BUCKETS_BYTES (2 * NDB_DEFAULT_HASHMAP_BUCKETS)
 
 /**
  * Bits/mask used for coding/decoding blockno/blockinstance
@@ -220,6 +253,16 @@
 
 #define NDB_FILE_BUFFER_SIZE (256*1024)
 
+/*
+ * NDB_FS_RW_PAGES must be big enough for biggest request,
+ * probably PACK_TABLE_PAGES (see Dbdih.hpp)
+ */
+#if NDB_VERSION_D < NDB_MAKE_VERSION(7,2,0)
+#define NDB_FS_RW_PAGES 32
+#else
+#define NDB_FS_RW_PAGES 134
+#endif
+
 /**
  * MAX_ATTRIBUTES_IN_TABLE old handling
  */
@@ -257,4 +300,22 @@
 #define MAX_INDEX_STAT_VALUE_CSIZE  512 /* Longvarbinary(2048) */
 #define MAX_INDEX_STAT_VALUE_FORMAT 1
 
+#ifdef NDB_STATIC_ASSERT
+
+static inline void ndb_limits_constraints()
+{
+  NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS <= NDB_DEFAULT_HASHMAP_BUCKETS);
+
+  NDB_STATIC_ASSERT(MAX_NDB_PARTITIONS - 1 <= NDB_PARTITION_MASK);
+
+  // MAX_NDB_NODES should be 48, but code assumes it is 49
+  STATIC_CONST(MAX_NDB_DATA_NODES = MAX_DATA_NODE_ID);
+  NDB_STATIC_ASSERT(MAX_NDB_NODES == MAX_NDB_DATA_NODES + 1);
+
+  // Default partitioning is 1 partition per LDM
+  NDB_STATIC_ASSERT(MAX_NDB_DATA_NODES * MAX_NDBMT_LQH_THREADS <= MAX_NDB_PARTITIONS);
+}
+
+#endif
+
 #endif

=== modified file 'storage/ndb/include/kernel/signaldata/CreateTable.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2012-06-21 12:29:21 +0000
@@ -76,7 +76,8 @@ struct CreateTableRef {
     NoLoggingTemporaryTable = 778,
     InvalidHashMap = 790,
     TableDefinitionTooBig = 793,
-    FeatureRequiresUpgrade = 794
+    FeatureRequiresUpgrade = 794,
+    TooManyFragments = 1224
   };
 
   Uint32 senderRef;

=== modified file 'storage/ndb/include/kernel/signaldata/DictTabInfo.hpp'
--- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp	2012-06-21 12:35:31 +0000
@@ -360,7 +360,7 @@ public:
     char   FrmData[MAX_FRM_DATA_SIZE];
     Uint32 FragmentCount;
     Uint32 ReplicaDataLen;
-    Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES];
+    Uint16 ReplicaData[MAX_FRAGMENT_DATA_ENTRIES];
     Uint32 FragmentDataLen;
     Uint16 FragmentData[3*MAX_NDB_PARTITIONS];
 
@@ -802,7 +802,7 @@ struct DictHashMapInfo {
   struct HashMap {
     char   HashMapName[MAX_TAB_NAME_SIZE];
     Uint32 HashMapBuckets;
-    Uint16 HashMapValues[512];
+    Uint16 HashMapValues[NDB_DEFAULT_HASHMAP_BUCKETS];
     Uint32 HashMapObjectId;
     Uint32 HashMapVersion;
     HashMap() {}

=== modified file 'storage/ndb/include/kernel/signaldata/ScanTab.hpp'
--- a/storage/ndb/include/kernel/signaldata/ScanTab.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/ScanTab.hpp	2012-06-21 12:33:15 +0000
@@ -131,7 +131,14 @@ private:
 /**
  * Request Info
  *
- p = Parallelism           - 8  Bits -> Max 256 (Bit 0-7)
+ p = Parallelism           - 8  Bits -> Max 255 (Bit 0-7).
+                                        Note: these bits are ignored since
+                                        7.0.34, 7.1.23, 7.2.7 and should be
+                                        zero-filled until future reuse.
+                                        For signal sent to old nodes they
+                                        should be filled in.
+                                        Check version with
+                                        ndbd_scan_tabreq_implicit_parallelism().
  l = Lock mode             - 1  Bit 8
  h = Hold lock mode        - 1  Bit 10
  c = Read Committed        - 1  Bit 11

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2012-06-11 10:23:01 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2012-06-21 12:33:15 +0000
@@ -750,4 +750,26 @@ ndbd_128_instances_address(Uint32 x)
   return x >= NDBD_128_INSTANCES_ADDRESS_72;
 }
 
+#define NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_70 NDB_MAKE_VERSION(7,0,34)
+#define NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_71 NDB_MAKE_VERSION(7,1,23)
+#define NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_72 NDB_MAKE_VERSION(7,2,7)
+
+static
+inline
+int
+ndbd_scan_tabreq_implicit_parallelism(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor < 2)
+  {
+    if (minor == 0)
+      return x >= NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_70;
+    else if (minor == 1)
+      return x >= NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_71;
+  }
+  return x >= NDBD_SCAN_TABREQ_IMPLICIT_PARALLELISM_72;
+}
+
 #endif

=== modified file 'storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp	2012-06-21 12:35:31 +0000
@@ -48,8 +48,8 @@ DictTabInfo::TableMapping[] = {
   DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE),
   DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen),
   DTIMAP2(Table, FragmentCount, FragmentCount, 0, MAX_NDB_PARTITIONS),
-  DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, 2*MAX_FRAGMENT_DATA_BYTES),
-  DTIMAPB(Table, ReplicaData, ReplicaData, 0, 2*MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen),
+  DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, MAX_FRAGMENT_DATA_BYTES),
+  DTIMAPB(Table, ReplicaData, ReplicaData, 0, MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen),
   DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, 6*MAX_NDB_PARTITIONS),
   DTIMAPB(Table, FragmentData, FragmentData, 0, 6*MAX_NDB_PARTITIONS, FragmentDataLen),
   DTIMAP2(Table, TablespaceDataLen, TablespaceDataLen, 0, 8*MAX_NDB_PARTITIONS),
@@ -337,14 +337,15 @@ const
 SimpleProperties::SP2StructMapping
 DictHashMapInfo::Mapping[] = {
   DHMIMAPS(HashMap, HashMapName, HashMapName, 0, MAX_TAB_NAME_SIZE),
-  DHMIMAP2(HashMap, HashMapBuckets, HashMapBuckets, 0, 256),
+  DHMIMAP2(HashMap, HashMapBuckets, HashMapBuckets, 0, NDB_DEFAULT_HASHMAP_BUCKETS),
   DTIMAP(HashMap, HashMapObjectId, HashMapObjectId),
   DTIMAP(HashMap, HashMapVersion, HashMapVersion),
 
   /**
    * This *should* change to Uint16 or similar once endian is pushed
    */
-  DHMIMAPB(HashMap, HashMapValues, HashMapValues, 0, 256*2, HashMapBuckets)
+  DHMIMAPB(HashMap, HashMapValues, HashMapValues, 0,
+           NDB_DEFAULT_HASHMAP_BUCKETS_BYTES, HashMapBuckets)
 };
 
 //static

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2012-01-23 08:00:45 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2012-06-21 12:35:31 +0000
@@ -4931,9 +4931,12 @@ void Dbdict::handleTabInfoInit(Signal *
       tablePtr.p->fragmentCount = fragments = get_default_fragments(signal);
     }
 
+    tabRequire(fragments <= MAX_NDB_PARTITIONS,
+               CreateTableRef::TooManyFragments);
+
     char buf[MAX_TAB_NAME_SIZE+1];
     BaseString::snprintf(buf, sizeof(buf), "DEFAULT-HASHMAP-%u-%u",
-                         NDB_DEFAULT_HASHMAP_BUCKTETS,
+                         NDB_DEFAULT_HASHMAP_BUCKETS,
                          fragments);
     DictObject* dictObj = get_object(buf);
     if (dictObj && dictObj->m_type == DictTabInfo::HashMap)
@@ -6452,17 +6455,18 @@ Dbdict::createTab_dih(Signal* signal, Sc
 
   // fragmentation in long signal section
   {
-    Uint32 page[1024];
+    Uint32 page[MAX_FRAGMENT_DATA_WORDS];
     LinearSectionPtr ptr[3];
     Uint32 noOfSections = 0;
 
     const Uint32 size = fragSec.getSize();
+    ndbrequire(size <= NDB_ARRAY_SIZE(page));
 
     // wl3600_todo add ndbrequire on SR, NR
     if (size != 0) {
       jam();
       LocalArenaPoolImpl op_sec_pool(op_ptr.p->m_trans_ptr.p->m_arena,c_opSectionBufferPool);
-      bool ok = copyOut(op_sec_pool, fragSec, page, 1024);
+      bool ok = copyOut(op_sec_pool, fragSec, page, size);
       ndbrequire(ok);
       ptr[noOfSections].sz = size;
       ptr[noOfSections].p = page;
@@ -23053,7 +23057,7 @@ Dbdict::createNodegroup_subOps(Signal* s
      *   and still continue transaction
      *   but that i dont know how
      */
-    Uint32 buckets = 240;
+    Uint32 buckets = NDB_DEFAULT_HASHMAP_BUCKETS;
     Uint32 fragments = get_default_fragments(signal, 1);
     char buf[MAX_TAB_NAME_SIZE+1];
     BaseString::snprintf(buf, sizeof(buf), "DEFAULT-HASHMAP-%u-%u",
@@ -28615,7 +28619,7 @@ Dbdict::createHashMap_parse(Signal* sign
     if (impl_req->requestType & CreateHashMapReq::CreateDefault)
     {
       jam();
-      impl_req->buckets = NDB_DEFAULT_HASHMAP_BUCKTETS;
+      impl_req->buckets = NDB_DEFAULT_HASHMAP_BUCKETS;
       impl_req->fragments = 0;
     }
 
@@ -28628,6 +28632,13 @@ Dbdict::createHashMap_parse(Signal* sign
       fragments = get_default_fragments(signal);
     }
 
+    if (fragments > MAX_NDB_PARTITIONS)
+    {
+      jam();
+      setError(error, CreateTableRef::TooManyFragments, __LINE__);
+      return;
+    }
+
     BaseString::snprintf(hm.HashMapName, sizeof(hm.HashMapName),
                          "DEFAULT-HASHMAP-%u-%u",
                          buckets,
@@ -28858,13 +28869,18 @@ Dbdict::createHashMap_parse(Signal* sign
     Uint32 tmp = 0;
     for (Uint32 i = 0; i<hm.HashMapBuckets; i++)
     {
-      ndbrequire(hm.HashMapValues[i] < 256);
-      map_ptr.p->m_map[i] = (Uint8)hm.HashMapValues[i];
+      map_ptr.p->m_map[i] = hm.HashMapValues[i];
       if (hm.HashMapValues[i] > tmp)
         tmp = hm.HashMapValues[i];
     }
     map_ptr.p->m_fragments = tmp + 1;
   }
+  if (map_ptr.p->m_fragments > MAX_NDB_PARTITIONS)
+  {
+    jam();
+    setError(error, CreateTableRef::TooManyFragments, __LINE__);
+    goto error;
+  }
 
   if (ERROR_INSERTED(6211))
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2012-02-03 12:30:24 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2012-06-21 12:33:15 +0000
@@ -100,12 +100,30 @@
 /*#########*/
 /* SIZES   */
 /*#########*/
-#define ZPAGEREC 100
+/*
+ * Only pages enough for one table needed, since only
+ * one metadata change at the time is allowed.
+ */
+#define ZPAGEREC PACK_TABLE_PAGES
 #define ZCREATE_REPLICA_FILE_SIZE 4
 #define ZPROXY_MASTER_FILE_SIZE 10
 #define ZPROXY_FILE_SIZE 10
 #endif
 
+/*
+ * Pack table into pages.
+ * See use of writePageWord() in
+ * packTableIntoPagesLab() and helper
+ * functions to determine the constants
+ * below.
+ */
+#define MAX_CRASHED_REPLICAS 8
+#define PACK_REPLICAS_WORDS (4 + 4 * MAX_LCP_STORED + 2 * MAX_CRASHED_REPLICAS)
+#define PACK_FRAGMENT_WORDS (6 + 2 * MAX_REPLICAS * PACK_REPLICAS_WORDS)
+#define PACK_TABLE_WORDS (10 + MAX_NDB_PARTITIONS * PACK_FRAGMENT_WORDS)
+#define PACK_TABLE_PAGE_WORDS (2048 - 32)
+#define PACK_TABLE_PAGES ((PACK_TABLE_WORDS + PACK_TABLE_PAGE_WORDS - 1) / PACK_TABLE_PAGE_WORDS)
+
 class Dbdih: public SimulatedBlock {
 #ifdef ERROR_INSERT
   typedef void (Dbdih::* SendFunction)(Signal*, Uint32, Uint32);
@@ -515,12 +533,12 @@ public:
     Method method;
     Storage tabStorage;
 
-    Uint32 pageRef[32];
+    Uint32 pageRef[PACK_TABLE_PAGES]; // TODO: makedynamic
 //-----------------------------------------------------------------------------
 // Each entry in this array contains a reference to 16 fragment records in a
 // row. Thus finding the correct record is very quick provided the fragment id.
 //-----------------------------------------------------------------------------
-    Uint32 startFid[MAX_NDB_NODES * MAX_FRAG_PER_LQH / NO_OF_FRAGS_PER_CHUNK];
+    Uint32 startFid[(MAX_NDB_PARTITIONS - 1) / NO_OF_FRAGS_PER_CHUNK + 1];
 
     Uint32 tabFile[2];
     Uint32 connectrec;                                    
@@ -547,7 +565,7 @@ public:
 
     Uint8 kvalue;
     Uint8 noOfBackups;
-    Uint8 noPages;
+    Uint16 noPages;
     Uint16 tableType;
     Uint16 primaryTableId;
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2012-04-24 13:17:43 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2012-06-21 12:35:31 +0000
@@ -85,7 +85,6 @@
 extern EventLogger * g_eventLogger;
 
 #define SYSFILE ((Sysfile *)&sysfileData[0])
-#define MAX_CRASHED_REPLICAS 8
 #define ZINIT_CREATE_GCI Uint32(0)
 #define ZINIT_REPLICA_LAST_GCI Uint32(-1)
 
@@ -7981,7 +7980,7 @@ void Dbdih::execDIADDTABREQ(Signal* sign
   }
 
   union {
-    Uint16 fragments[2 + MAX_FRAG_PER_LQH*MAX_REPLICAS*MAX_NDB_NODES];
+    Uint16 fragments[MAX_FRAGMENT_DATA_ENTRIES];
     Uint32 align;
   };
   (void)align; // kill warning
@@ -17457,6 +17456,7 @@ void Dbdih::writeTabfile(Signal* signal,
   signal->theData[4] = ZVAR_NO_WORD;
   signal->theData[5] = tab->noPages;
 
+  NDB_STATIC_ASSERT(NDB_ARRAY_SIZE(tab->pageRef) <= NDB_FS_RW_PAGES);
   Uint32 section[2 * NDB_ARRAY_SIZE(tab->pageRef)];
   for (Uint32 i = 0; i < tab->noPages; i++)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2012-05-03 09:47:51 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2012-06-21 12:33:15 +0000
@@ -10423,9 +10423,7 @@ void Dbtc::execSCAN_TABREQ(Signal* signa
   const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
   Uint32 currSavePointId = 0;
   
-  Uint32 scanConcurrency = scanTabReq->getParallelism(ri);
   Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri);
-  Uint32 scanParallel = scanConcurrency;
   Uint32 errCode;
   ScanRecordPtr scanptr;
 
@@ -10441,6 +10439,9 @@ void Dbtc::execSCAN_TABREQ(Signal* signa
   SectionHandle handle(this, signal);
   SegmentedSectionPtr api_op_ptr;
   handle.getSection(api_op_ptr, 0);
+  // Scan parallelism is determined by the number of receiver ids sent
+  Uint32 scanParallel = api_op_ptr.sz;
+  Uint32 scanConcurrency = scanParallel;
   Uint32 * apiPtr = signal->theData+25; // temp storage
   copy(apiPtr, api_op_ptr);
 

=== modified file 'storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2011-11-25 12:18:23 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp	2012-06-21 12:26:14 +0000
@@ -2182,7 +2182,7 @@ Ndbcntr::createHashMap(Signal* signal, U
   req->requestInfo = 0;
   req->transId = c_schemaTransId;
   req->transKey = c_schemaTransKey;
-  req->buckets = 240;
+  req->buckets = NDB_DEFAULT_HASHMAP_BUCKETS;
   req->fragments = 0;
   sendSignal(DBDICT_REF, GSN_CREATE_HASH_MAP_REQ, signal,
 	     CreateHashMapReq::SignalLength, JBB);

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncIoThread.hpp	2012-06-21 12:33:15 +0000
@@ -82,7 +82,7 @@ public:
 	char *buf;
 	size_t size;
 	off_t offset;
-      } pages[32];
+      } pages[NDB_FS_RW_PAGES];
     } readWrite;
     struct {
       const char * buf;

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2011-11-16 11:05:46 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp	2012-06-21 12:33:15 +0000
@@ -585,7 +585,7 @@ Ndbfs::execFSCLOSEREQ(Signal * signal)
 void 
 Ndbfs::readWriteRequest(int action, Signal * signal)
 {
-  Uint32 theData[25 + 2 * 32];
+  Uint32 theData[25 + 2 * NDB_FS_RW_PAGES];
   memcpy(theData, signal->theData, 4 * signal->getLength());
   SectionHandle handle(this, signal);
   if (handle.m_cnt > 0)

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.hpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp	2011-12-02 12:22:02 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp	2012-06-21 12:29:21 +0000
@@ -119,11 +119,13 @@ public:
   void suma_ndbrequire(bool v);
 
   // wl4391_todo big enough for now
+  // Keep m_fragDesc within 32 bit,
+  // m_dummy is used to pass value.
   union FragmentDescriptor { 
     struct  {
-      Uint8 m_fragmentNo;
+      Uint16 m_fragmentNo;
       Uint8 m_lqhInstanceKey;
-      Uint16 m_nodeId;
+      Uint8 m_nodeId;
     } m_fragDesc;
     Uint32 m_dummy;
   };

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2012-02-02 11:01:13 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2012-06-21 12:29:21 +0000
@@ -1362,10 +1362,10 @@ SectionHandle::~SectionHandle()
 
 struct Hash2FragmentMap
 {
-  STATIC_CONST( MAX_MAP = 240 );
+  STATIC_CONST( MAX_MAP = NDB_DEFAULT_HASHMAP_BUCKETS );
   Uint32 m_cnt;
   Uint32 m_fragments;
-  Uint8 m_map[MAX_MAP];
+  Uint16 m_map[MAX_MAP];
   Uint32 nextPool;
   Uint32 m_object_id;
 };

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2012-01-07 11:29:10 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2012-06-21 12:24:52 +0000
@@ -1896,7 +1896,7 @@ NdbDictionary::Dictionary::getDefaultHas
 {
   BaseString tmp;
   tmp.assfmt("DEFAULT-HASHMAP-%u-%u",
-             NDB_DEFAULT_HASHMAP_BUCKTETS, fragments);
+             NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
 
   return getHashMap(dst, tmp.c_str());
 }
@@ -1928,12 +1928,12 @@ NdbDictionary::Dictionary::initDefaultHa
 {
   BaseString tmp;
   tmp.assfmt("DEFAULT-HASHMAP-%u-%u",
-             NDB_DEFAULT_HASHMAP_BUCKTETS, fragments);
+             NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
 
   dst.setName(tmp.c_str());
 
   Vector<Uint32> map;
-  for (Uint32 i = 0; i<NDB_DEFAULT_HASHMAP_BUCKTETS; i++)
+  for (Uint32 i = 0; i < NDB_DEFAULT_HASHMAP_BUCKETS; i++)
   {
     map.push_back(i % fragments);
   }
@@ -2057,14 +2057,14 @@ retry:
     if (cnt == 0)
     {
       newmap.m_name.assfmt("HASHMAP-%u-%u-%u",
-                           NDB_DEFAULT_HASHMAP_BUCKTETS,
+                           NDB_DEFAULT_HASHMAP_BUCKETS,
                            oldcnt,
                            newcnt);
     }
     else
     {
       newmap.m_name.assfmt("HASHMAP-%u-%u-%u-#%u",
-                           NDB_DEFAULT_HASHMAP_BUCKTETS,
+                           NDB_DEFAULT_HASHMAP_BUCKETS,
                            oldcnt,
                            newcnt,
                            cnt);

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2012-03-19 09:44:55 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2012-06-21 12:29:21 +0000
@@ -2539,7 +2539,7 @@ NdbDictInterface::getTable(class NdbApiS
       }
       for (Uint32 i = 0; i<tmp.m_map.size(); i++)
       {
-        assert(tmp.m_map[i] <= 255);
+        assert(tmp.m_map[i] <= NDB_PARTITION_MASK);
         rt->m_hash_map.push_back(tmp.m_map[i]);
       }
     }
@@ -8216,6 +8216,7 @@ NdbDictInterface::create_hashmap(const N
   hm.HashMapBuckets = src.getMapLen();
   for (Uint32 i = 0; i<hm.HashMapBuckets; i++)
   {
+    assert(NdbHashMapImpl::getImpl(src).m_map[i] <= NDB_PARTITION_MASK);
     hm.HashMapValues[i] = NdbHashMapImpl::getImpl(src).m_map[i];
   }
 

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2012-06-21 12:29:21 +0000
@@ -219,7 +219,7 @@ public:
   Uint32 m_hashValueMask;
   Uint32 m_hashpointerValue;
   Vector<Uint16> m_fragments;
-  Vector<Uint8> m_hash_map;
+  Vector<Uint16> m_hash_map;
 
   Uint64 m_max_rows;
   Uint64 m_min_rows;

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-11-09 13:10:53 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2012-06-21 12:33:15 +0000
@@ -3071,7 +3071,21 @@ NdbQueryImpl::doSend(int nodeId, bool la
 
     ScanTabReq::setViaSPJFlag(reqInfo, 1);
     ScanTabReq::setPassAllConfsFlag(reqInfo, 1);
-    ScanTabReq::setParallelism(reqInfo, getRootFragCount());
+
+    Uint32 nodeVersion = impl->getNodeNdbVersion(nodeId);
+    if (!ndbd_scan_tabreq_implicit_parallelism(nodeVersion))
+    {
+      // Implicit parallelism implies support for greater
+      // parallelism than storable explicitly in old reqInfo.
+      Uint32 fragments = getRootFragCount();
+      if (fragments > PARALLEL_MASK)
+      {
+        setErrorCode(Err_SendFailed /* TODO: TooManyFragments, to too old cluster version */);
+        return -1;
+      }
+      ScanTabReq::setParallelism(reqInfo, fragments);
+    }
+
     ScanTabReq::setRangeScanFlag(reqInfo, rangeScan);
     ScanTabReq::setDescendingFlag(reqInfo, descending);
     ScanTabReq::setTupScanFlag(reqInfo, tupScan);
@@ -5164,7 +5178,7 @@ int NdbQueryOperationImpl::setParallelis
     getQuery().setErrorCode(Err_FunctionNotImplemented);
     return -1;
   }
-  else if (parallelism < 1 || parallelism > MAX_NDB_PARTITIONS)
+  else if (parallelism < 1 || parallelism > NDB_PARTITION_MASK)
   {
     getQuery().setErrorCode(Err_ParameterError);
     return -1;

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-11-09 13:10:53 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2012-06-21 12:33:15 +0000
@@ -1408,6 +1408,9 @@ NdbScanOperation::processTableScanDefs(N
     return -1;
   }//if
   
+  NdbImpl* impl = theNdb->theImpl;
+  Uint32 nodeId = theNdbCon->theDBnode;
+  Uint32 nodeVersion = impl->getNodeNdbVersion(nodeId);
   theSCAN_TABREQ->setSignal(GSN_SCAN_TABREQ, refToBlock(theNdbCon->m_tcRef));
   ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
   req->apiConnectPtr = theNdbCon->theTCConPtr;
@@ -1419,7 +1422,17 @@ NdbScanOperation::processTableScanDefs(N
   req->first_batch_size = batch; // Save user specified batch size
   
   Uint32 reqInfo = 0;
-  ScanTabReq::setParallelism(reqInfo, parallel);
+  if (!ndbd_scan_tabreq_implicit_parallelism(nodeVersion))
+  {
+    // Implicit parallelism implies support for greater
+    // parallelism than storable explicitly in old reqInfo.
+    if (parallel > PARALLEL_MASK)
+    {
+      setErrorCodeAbort(4000 /* TODO: TooManyFragments, to too old cluster version */);
+      return -1;
+    }
+    ScanTabReq::setParallelism(reqInfo, parallel);
+  }
   ScanTabReq::setScanBatch(reqInfo, 0);
   ScanTabReq::setRangeScanFlag(reqInfo, rangeScan);
   ScanTabReq::setTupScanFlag(reqInfo, tupScan);

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0 branch (mauritz.sundell:4945 to 4946) Mauritz Sundell25 Jun