List:Commits« Previous MessageNext Message »
From:Mauritz Sundell Date:September 21 2012 12:36pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (mauritz.sundell:4984 to 4987)
View as plain text  
 4987 Mauritz Sundell	2012-09-21
      ndb - make online reorg change hashmap size if appropriate
      
      Bug #14645319ONLINE REORGANIZE CAN NOT USE BIGGER HASHMAP ON OLD TABLES
      
      before online reorg never changed hashmap size.
      
      now it either keeps the old size or use the hardcoded
      default hashmap size.
      
      changing hashmap size only occur if the number of
      fragments have increased and the old hashmap size
      is not a multiple of the new fragment count.
      also the bigger hashmap size must be a multiple of
      the old hashmap size to guarantee that data are moved
      from old fragments to new fragments only, therefore
      the old hashmap size is used if that is not true.
      
      this means that after an upgrade from a ndb version
      supporting a smaller hashmap size to a ndb version
      supporting a bigger hashmap size, will be downgradable
      as long as no new tables are created or an online
      reorg have been run after changing the number of
      fragments (implictly by adding nodes, or changing maxrows,
      or explicitly added partitions).
      
      NOTE: neither unique index nor blob tables are reorganized.

    added:
      storage/ndb/include/util/ndb_math.h
    modified:
      storage/ndb/include/ndbapi/NdbDictionary.hpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/test/ndbapi/testDict.cpp
 4986 Mauritz Sundell	2012-09-21
      ndb - testcase for check that online reorg can extend hashmap

    modified:
      storage/ndb/include/ndbapi/NdbDictionary.hpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/test/ndbapi/testDict.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 4985 Mauritz Sundell	2012-09-21
      ndb - let ndb_desc show hashmap for table and index
      
      added option --table/-t <tablename> to make ndb_desc try to find index.
      
      Example,
      ndb_desc -d test -t mytable 'myindex$unique'
      
      The printing of Tables are moved into NdbDictionary.cpp
      instead of NDBT_Table.cpp.  And methods for printing of
      other NdbDictionary objects are added (using operator<<).
      
      ndb_desc can print Index information.
      
      HashMap are printed for Tables.
      
      Value of FragmentType is now printed as text not number.

    modified:
      mysql-test/suite/ndb/r/ndb_native_default_support.result
      storage/ndb/include/ndbapi/NdbDictionary.hpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/test/include/NDBT_Table.hpp
      storage/ndb/test/src/NDBT_Table.cpp
      storage/ndb/tools/desc.cpp
 4984 magnus.blaudd@stripped	2012-09-21 [merge]
      Merge

    modified:
      sql/ha_ndb_index_stat.cc
      sql/ha_ndb_index_stat.h
=== modified file 'mysql-test/suite/ndb/r/ndb_native_default_support.result'
--- a/mysql-test/suite/ndb/r/ndb_native_default_support.result	2011-05-17 23:29:55 +0000
+++ b/mysql-test/suite/ndb/r/ndb_native_default_support.result	2012-09-21 12:25:04 +0000
@@ -127,7 +127,7 @@ bit1
 t1
 -- t1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -143,7 +143,8 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 j Int NULL AT=FIXED ST=MEMORY DEFAULT 6
 f Float NOT NULL AT=FIXED ST=MEMORY DEFAULT 6.600000
@@ -157,17 +158,15 @@ blob1 Blob(256,2000,0) NULL AT=MEDIUM_VA
 text1 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=NDB$BLOB_XX_10
 timestamp_c Timestamp NOT NULL AT=FIXED ST=MEMORY
 ch2 Char(30;latin1_swedish_ci) NULL AT=FIXED ST=MEMORY DEFAULT "alter table"
-
 -- Indexes -- 
 PRIMARY KEY(i) - UniqueHashIndex
 Index
 
-
 NDBT_ProgramExit: 0 - OK
 
 -- bit1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -183,19 +182,18 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 pk Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 b1 Bit(3) NULL AT=FIXED ST=MEMORY DEFAULT H'0x7
 b2 Bit(9) NULL AT=FIXED ST=MEMORY DEFAULT H'0x5
 b3 Bit(23) NULL AT=FIXED ST=MEMORY DEFAULT H'0x6
 b4 Bit(37) NULL AT=FIXED ST=MEMORY DEFAULT H'0x3
 b5 Bit(63) NULL AT=FIXED ST=MEMORY DEFAULT H'0x2B
-
 -- Indexes -- 
 PRIMARY KEY(pk) - UniqueHashIndex
 PRIMARY(pk) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 SELECT pk,BIN(b1),BIN(b2),BIN(b3),BIN(b4),BIN(b5) FROM bit1 ORDER BY pk;
@@ -237,7 +235,7 @@ DROP DATABASE mysqltest;
 ******************************************************************************
 -- t1 --
 Version: Any
-Fragment type: 5
+Fragment type: DistrKeyHash
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -253,7 +251,7 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+-- Attributes --
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 j Int NULL AT=FIXED ST=MEMORY
 f Float NOT NULL AT=FIXED ST=MEMORY
@@ -266,17 +264,15 @@ vb Varbinary(19) NULL AT=SHORT_VAR ST=ME
 blob1 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=NDB$BLOB_XX_9
 text1 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=NDB$BLOB_XX_10
 timestamp_c Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(i) - UniqueHashIndex
 Index
 
-
 NDBT_ProgramExit: 0 - OK
 
 -- bit1 --
 Version: Any
-Fragment type: 5
+Fragment type: DistrKeyHash
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -292,19 +288,17 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+-- Attributes --
 pk Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 b1 Bit(3) NULL AT=FIXED ST=MEMORY
 b2 Bit(9) NULL AT=FIXED ST=MEMORY
 b3 Bit(23) NULL AT=FIXED ST=MEMORY
 b4 Bit(37) NULL AT=FIXED ST=MEMORY
 b5 Bit(63) NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(pk) - UniqueHashIndex
 PRIMARY(pk) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 USE test;
@@ -355,7 +349,7 @@ t1	CREATE TABLE `t1` (
 ) ENGINE=ndbcluster AUTO_INCREMENT=12 DEFAULT CHARSET=latin1
 -- t1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -371,7 +365,8 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 j Int NULL AT=FIXED ST=MEMORY DEFAULT 6
 f Float NOT NULL AT=FIXED ST=MEMORY DEFAULT 6.600000
@@ -384,12 +379,10 @@ vb Varbinary(19) NULL AT=SHORT_VAR ST=ME
 blob1 Blob(256,2000,0) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=NDB$BLOB_XX_9
 text1 Text(256,2000,0;latin1_swedish_ci) NULL AT=MEDIUM_VAR ST=MEMORY BV=2 BT=NDB$BLOB_XX_10
 timestamp_c Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(i) - UniqueHashIndex
 Index
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO t1 VALUES();
@@ -447,7 +440,7 @@ i	j	f	d
 ALTER TABLE t2 MODIFY COLUMN j INT DEFAULT 666;
 -- t2 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -463,19 +456,18 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 j Int NULL AT=FIXED ST=MEMORY DEFAULT 666
 f Float NOT NULL AT=FIXED ST=MEMORY DEFAULT 6.600000
 d Double NULL AT=FIXED ST=MEMORY DEFAULT 8.800000
-
 -- Indexes -- 
 PRIMARY KEY(i) - UniqueHashIndex
 Index
 Index
 Index
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO t2 VALUES();
@@ -496,7 +488,7 @@ DROP TABLE IF EXISTS t2;
 CREATE TABLE t1 (a int primary key, b int default 12, c char not null) engine=ndb;
 -- t1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -512,16 +504,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Int NULL AT=FIXED ST=MEMORY DEFAULT 12
 c Char(1;latin1_swedish_ci) NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 DROP TABLE t1;
@@ -539,7 +530,7 @@ h enum('Pig','Lion') not null,
 i char(2) default '66') engine=ndb;
 -- t1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -555,7 +546,8 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Int NULL AT=FIXED ST=MEMORY DEFAULT 12
 c Char(1;latin1_swedish_ci) NOT NULL AT=FIXED ST=MEMORY
@@ -565,12 +557,10 @@ f Char(1;binary) NULL AT=FIXED ST=MEMORY
 g Char(1;binary) NULL AT=FIXED ST=MEMORY DEFAULT "0x05"
 h Char(1;binary) NOT NULL AT=FIXED ST=MEMORY
 i Char(2;latin1_swedish_ci) NULL AT=FIXED ST=MEMORY DEFAULT "66"
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 DROP TABLE t1;
@@ -590,7 +580,7 @@ t1	CREATE TABLE `t1` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- t1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -606,16 +596,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Binary(10) NULL AT=FIXED ST=MEMORY DEFAULT 0x4142430045464748494A
 c Varbinary(100) NULL AT=SHORT_VAR ST=MEMORY DEFAULT 0x4142430045464748494A
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 DROP TABLE t1;
@@ -635,7 +624,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -651,15 +640,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -679,7 +667,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -695,15 +683,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -723,7 +710,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -739,15 +726,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -769,7 +755,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -785,16 +771,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY
 c Int NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a,c) VALUES (1,1);
@@ -818,7 +803,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -834,15 +819,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY DEFAULT 0
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -862,7 +846,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -878,15 +862,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY DEFAULT 235861201
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -908,7 +891,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -924,16 +907,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY DEFAULT 235861201
 c Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -955,7 +937,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -971,16 +953,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY DEFAULT 235861201
 c Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1006,7 +987,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1022,16 +1003,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NOT NULL AT=FIXED ST=MEMORY DEFAULT 235861201
 c Timestamp NOT NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1051,7 +1031,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1067,15 +1047,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1095,7 +1074,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1111,15 +1090,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NULL AT=FIXED ST=MEMORY DEFAULT 0
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1139,7 +1117,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1155,15 +1133,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NULL AT=FIXED ST=MEMORY DEFAULT 235861201
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1183,7 +1160,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1199,15 +1176,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1229,7 +1205,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1245,16 +1221,15 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NULL AT=FIXED ST=MEMORY
 c Int NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a,c) VALUES (1,1);
@@ -1278,7 +1253,7 @@ variant	CREATE TABLE `variant` (
 ) ENGINE=ndbcluster DEFAULT CHARSET=latin1
 -- variant --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1294,15 +1269,14 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 a Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
 b Timestamp NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(a) - UniqueHashIndex
 PRIMARY(a) - OrderedIndex
 
-
 NDBT_ProgramExit: 0 - OK
 
 INSERT INTO variant (a) VALUES (1);
@@ -1368,7 +1342,7 @@ t1	CREATE TABLE `t1` (
 ) ENGINE=ndbcluster AUTO_INCREMENT=18446744073709551615 DEFAULT CHARSET=latin1
 -- t1 --
 Version: Any
-Fragment type: 9
+Fragment type: HashMapPartition
 K Value: 6
 Min load factor: 78
 Max load factor: 80
@@ -1384,7 +1358,8 @@ FragmentCount: 2
 ExtraRowGciBits: 0
 ExtraRowAuthorBits: 0
 TableStatus: Retrieved
--- Attributes -- 
+HashMap: DEFAULT-HASHMAP-3840-2
+-- Attributes --
 i Int PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY AUTO_INCR
 j Bigint NOT NULL AT=FIXED ST=MEMORY DEFAULT 6006
 f Float NOT NULL AT=FIXED ST=MEMORY DEFAULT 6.600000
@@ -1398,12 +1373,10 @@ text1 Text(256,2000,0;latin1_swedish_ci)
 timestamp_c Timestamp NOT NULL AT=FIXED ST=MEMORY DEFAULT 1275905182
 newOne Varchar(255;latin1_swedish_ci) NULL AT=SHORT_VAR ST=MEMORY DEFAULT "Comment field default"
 newTwo Bigint NULL AT=FIXED ST=MEMORY
-
 -- Indexes -- 
 PRIMARY KEY(i) - UniqueHashIndex
 Index
 
-
 NDBT_ProgramExit: 0 - OK
 
 SELECT i, j, f, d, d2, ch, HEX(b), HEX(vb), HEX(blob1), text1, timestamp_c, newOne, newTwo from t1 order by i;

=== modified file 'storage/ndb/include/ndbapi/NdbDictionary.hpp'
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp	2011-07-04 13:37:56 +0000
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp	2012-09-21 12:34:28 +0000
@@ -2168,6 +2168,8 @@ public:
      */
     const Index * getIndex(const char * indexName,
 			   const char * tableName) const;
+    const Index * getIndex(const char * indexName,
+                           const Table& base) const;
 
     /**
      * Fetch list of indexes of given table.
@@ -2458,18 +2460,21 @@ public:
      * Get default HashMap
      */
     int getDefaultHashMap(HashMap& dst, Uint32 fragments);
+    int getDefaultHashMap(HashMap& dst, Uint32 buckets, Uint32 fragments);
 
 
     /**
      * Init a default HashMap
      */
     int initDefaultHashMap(HashMap& dst, Uint32 fragments);
+    int initDefaultHashMap(HashMap& dst, Uint32 buckets, Uint32 fragments);
 
     /**
      * create (or retreive) a HashMap suitable for alter
      * NOTE: Requires a started schema transaction
      */
     int prepareHashMap(const Table& oldTable, Table& newTable);
+    int prepareHashMap(const Table& oldTable, Table& newTable, Uint32 buckets);
 
     /** @} *******************************************************************/
 
@@ -2562,8 +2567,8 @@ public:
 #endif
     class NdbDictionaryImpl & m_impl;
     Dictionary(NdbDictionaryImpl&);
-    const Table * getIndexTable(const char * indexName, 
-				const char * tableName) const;
+    const Table * getIndexTable(const char * indexName,
+                                const char * tableName) const;
   public:
 #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
     const Table * getTable(const char * name, void **data) const;
@@ -2616,6 +2621,13 @@ public:
       createRecord
     */
     void releaseRecord(NdbRecord *rec);
+
+    /*
+      Methods to print objects more verbose than possible from
+      object itself.
+     */
+    void print(class NdbOut& out, NdbDictionary::Index const& idx);
+    void print(class NdbOut& out, NdbDictionary::Table const& tab);
   }; // class Dictionary
 
   class NdbDataPrintFormat
@@ -2640,9 +2652,14 @@ public:
                                     const NdbDictionary::Column* c,
                                     const void* val);
   
-
 }; // class NdbDictionary
 
 class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Column& col);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Index& idx);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Index::Type type);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Object::FragmentType fragtype);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Object::Status status);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Object::Type type);
+class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Table& tab);
 
 #endif

=== added file 'storage/ndb/include/util/ndb_math.h'
--- a/storage/ndb/include/util/ndb_math.h	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/include/util/ndb_math.h	2012-09-21 12:34:28 +0000
@@ -0,0 +1,49 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_MATH_H
+#define NDB_MATH_H
+
+/**
+ * Greatest common divisor, gcd.
+ * Arguments should be positive integers.
+ */
+
+template<typename Int>
+inline Int gcd(Int x, Int y)
+{
+  do {
+    Int t = y;
+    y = x % y;
+    x = t;
+  } while (y != 0);
+  return x;
+}
+
+/**
+ * Least common multiple, lcm.
+ * Arguments should be positive integers.
+ * Result may be overflowed.
+ */
+
+template<typename Int>
+inline Int lcm(Int x, Int y)
+{
+  return (x / gcd(x, y)) * y;
+}
+
+#endif

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2012-09-19 06:37:24 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2012-09-21 12:34:28 +0000
@@ -23,6 +23,7 @@
 #include "diskpage.hpp"
 
 #include <ndb_limits.h>
+#include <ndb_math.h>
 #include <NdbOut.hpp>
 #include <OutputStream.hpp>
 #include <Properties.hpp>
@@ -8500,19 +8501,16 @@ Dbdict::check_supported_reorg(Uint32 org
   Ptr<Hash2FragmentMap> newptr;
   g_hash_map.getPtr(newptr, newmap_ptr.p->m_map_ptr_i);
 
-  if (newptr.p->m_cnt < orgptr.p->m_cnt)
-  {
-    jam();
-    return AlterTableRef::UnsupportedChange;
-  }
-
-  for (Uint32 i = 0; i<orgptr.p->m_cnt; i++)
+  /*
+   * check that old fragments maps to same old fragment
+   * or to a new fragment.
+   * allow both extending and shrinking hashmap.
+   */
+  Uint32 period = lcm(orgptr.p->m_cnt, newptr.p->m_cnt);
+  for (Uint32 i = 0; i < period; i++)
   {
-    jam();
-    if (orgptr.p->m_map[i] == newptr.p->m_map[i])
-      continue;
-
-    if (newptr.p->m_map[i] < orgptr.p->m_fragments)
+    if (orgptr.p->m_map[i % orgptr.p->m_cnt] != newptr.p->m_map[i % newptr.p->m_cnt] &&
+        newptr.p->m_map[i % newptr.p->m_cnt] < orgptr.p->m_fragments)
     {
       /**
        * Moving data from "old" fragment into "old" fragment

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2012-06-21 12:24:52 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2012-09-21 12:34:28 +0000
@@ -15,6 +15,7 @@
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
 */
 
+#include <ndb_math.h>
 #include <NdbDictionary.hpp>
 #include "NdbDictionaryImpl.hpp"
 #include <NdbOut.hpp>
@@ -1894,9 +1895,17 @@ int
 NdbDictionary::Dictionary::getDefaultHashMap(NdbDictionary::HashMap& dst,
                                              Uint32 fragments)
 {
+  return getDefaultHashMap(dst, NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
+}
+
+int
+NdbDictionary::Dictionary::getDefaultHashMap(NdbDictionary::HashMap& dst,
+                                             Uint32 buckets,
+                                             Uint32 fragments)
+{
   BaseString tmp;
   tmp.assfmt("DEFAULT-HASHMAP-%u-%u",
-             NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
+             buckets, fragments);
 
   return getHashMap(dst, tmp.c_str());
 }
@@ -1926,14 +1935,22 @@ int
 NdbDictionary::Dictionary::initDefaultHashMap(NdbDictionary::HashMap& dst,
                                               Uint32 fragments)
 {
+  return initDefaultHashMap(dst, NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
+}
+
+int
+NdbDictionary::Dictionary::initDefaultHashMap(NdbDictionary::HashMap& dst,
+                                              Uint32 buckets,
+                                              Uint32 fragments)
+{
   BaseString tmp;
   tmp.assfmt("DEFAULT-HASHMAP-%u-%u",
-             NDB_DEFAULT_HASHMAP_BUCKETS, fragments);
+             buckets, fragments);
 
   dst.setName(tmp.c_str());
 
   Vector<Uint32> map;
-  for (Uint32 i = 0; i < NDB_DEFAULT_HASHMAP_BUCKETS; i++)
+  for (Uint32 i = 0; i < buckets; i++)
   {
     map.push_back(i % fragments);
   }
@@ -1946,6 +1963,14 @@ int
 NdbDictionary::Dictionary::prepareHashMap(const Table& oldTableF,
                                           Table& newTableF)
 {
+  return prepareHashMap(oldTableF, newTableF, NDB_DEFAULT_HASHMAP_BUCKETS);
+}
+
+int
+NdbDictionary::Dictionary::prepareHashMap(const Table& oldTableF,
+                                          Table& newTableF,
+                                          Uint32 buckets)
+{
   if (!hasSchemaTrans())
   {
     return -1;
@@ -1968,8 +1993,6 @@ NdbDictionary::Dictionary::prepareHashMa
     }
 
     HashMap newmapF;
-    NdbHashMapImpl& newmap = NdbHashMapImpl::getImpl(newmapF);
-    newmap.assign(NdbHashMapImpl::getImpl(oldmap));
 
     Uint32 oldcnt = oldTable.getFragmentCount();
     Uint32 newcnt = newTable.getFragmentCount();
@@ -2016,12 +2039,86 @@ NdbDictionary::Dictionary::prepareHashMa
       newTable.setFragmentCount(newcnt);
     }
 
-    for (Uint32 i = 0; i<newmap.m_map.size(); i++)
+    /*
+     * if fragment count has not changed,
+     * dont move data and keep old hashmap.
+     */
+
+    if (newcnt == oldcnt)
+    {
+      newTable.m_hash_map_id = oldTable.m_hash_map_id;
+      newTable.m_hash_map_version = oldTable.m_hash_map_version;
+      return 0;
+    }
+
+    Uint32 newmapsize = buckets;
+    Uint32 oldmapsize = oldmap.getMapLen();
+
+    /**
+     * if old hashmap size is smaller than new hashmap size
+     * and new fragment count is a multiple of old hashmap
+     * size, no need to extend map, keep old hashmap size
+     */
+
+    if (oldmapsize < newmapsize &&
+        oldmapsize % newcnt == 0)
+    {
+      newmapsize = oldmapsize;
+    }
+
+    NdbHashMapImpl& newmap = NdbHashMapImpl::getImpl(newmapF);
+    NdbHashMapImpl const& oldmapimpl = NdbHashMapImpl::getImpl(oldmap);
+
+    newmap.m_map.expand(newmapsize);
+    for (Uint32 i = 0; i < newmapsize; i++)
     {
       Uint32 newval = i % newcnt;
-      if (newval >= oldcnt)
+      if (newval < oldcnt)
+      {
+         newval = oldmapimpl.m_map[i % oldmapsize];
+      }
+      newmap.m_map.push_back(newval);
+    }
+
+    /**
+     * check that new map do not imply data movement
+     * from old fragment to another old fragment.
+     * in such case, fall back to use old hashmap size
+     */
+
+    if (oldmapsize != newmapsize)
+    {
+      Uint32 period = lcm(oldmapsize, newmapsize);
+      Uint32 i;
+
+      for (i = 0; i < period; i++)
+      {
+        if (oldmapimpl.m_map[i % oldmapsize] != newmap.m_map[i % newmapsize] &&
+            newmap.m_map[i % newmapsize] < oldcnt)
+        {
+          /**
+           * move from old fragment to another old fragment
+           * not supported - keep old hashmap size
+           */
+          break;
+        }
+      }
+
+      /* keep old hashmap size, recreate newmap */
+      if (i < period)
       {
-        newmap.m_map[i] = newval;
+        newmapsize = oldmapsize;
+        newmap.m_map.clear();
+        newmap.m_map.expand(newmapsize);
+        for (Uint32 i = 0; i < newmapsize; i++)
+        {
+          Uint32 newval = i % newcnt;
+          if (newval < oldcnt)
+          {
+             newval = oldmapimpl.m_map[i % oldmapsize];
+          }
+          newmap.m_map.push_back(newval);
+        }
       }
     }
 
@@ -2029,7 +2126,7 @@ NdbDictionary::Dictionary::prepareHashMa
      * Check if this accidently became a "default" map
      */
     HashMap def;
-    if (getDefaultHashMap(def, newcnt) == 0)
+    if (getDefaultHashMap(def, newmapsize, newcnt) == 0)
     {
       if (def.equal(newmapF))
       {
@@ -2039,7 +2136,7 @@ NdbDictionary::Dictionary::prepareHashMa
       }
     }
 
-    initDefaultHashMap(def, newcnt);
+    initDefaultHashMap(def, newmapsize, newcnt);
     if (def.equal(newmapF))
     {
       ObjectId tmp;
@@ -2057,14 +2154,14 @@ retry:
     if (cnt == 0)
     {
       newmap.m_name.assfmt("HASHMAP-%u-%u-%u",
-                           NDB_DEFAULT_HASHMAP_BUCKETS,
+                           newmapsize,
                            oldcnt,
                            newcnt);
     }
     else
     {
       newmap.m_name.assfmt("HASHMAP-%u-%u-%u-#%u",
-                           NDB_DEFAULT_HASHMAP_BUCKETS,
+                           newmapsize,
                            oldcnt,
                            newcnt,
                            cnt);
@@ -2654,6 +2751,16 @@ NdbDictionary::Dictionary::getIndex(cons
   return 0;
 }
 
+const NdbDictionary::Index *
+NdbDictionary::Dictionary::getIndex(const char * indexName,
+                                    const NdbDictionary::Table& base) const
+{
+  NdbIndexImpl * i = m_impl.getIndex(indexName, NdbTableImpl::getImpl(base));
+  if (i)
+    return i->m_facade;
+  return 0;
+}
+
 void
 NdbDictionary::Dictionary::invalidateIndex(const Index *index){
   DBUG_ENTER("NdbDictionary::Dictionary::invalidateIndex");
@@ -3631,3 +3738,255 @@ NdbDictionary::Dictionary::createHashMap
                                             0));
   return ret;
 }
+
+NdbOut& operator <<(NdbOut& ndbout, NdbDictionary::Object::FragmentType const fragtype)
+{
+  switch (fragtype)
+  {
+  case NdbDictionary::Object::FragUndefined:
+    ndbout << "FragUndefined";
+    break;
+  case NdbDictionary::Object::FragSingle:
+    ndbout << "FragSingle";
+    break;
+  case NdbDictionary::Object::FragAllSmall:
+    ndbout << "FragAllSmall";
+    break;
+  case NdbDictionary::Object::FragAllMedium:
+    ndbout << "FragAllMedium";
+    break;
+  case NdbDictionary::Object::FragAllLarge:
+    ndbout << "FragAllLarge";
+    break;
+  case NdbDictionary::Object::DistrKeyHash:
+    ndbout << "DistrKeyHash";
+    break;
+  case NdbDictionary::Object::DistrKeyLin:
+    ndbout << "DistrKeyLin";
+    break;
+  case NdbDictionary::Object::UserDefined:
+    ndbout << "UserDefined";
+    break;
+  case NdbDictionary::Object::HashMapPartition:
+    ndbout << "HashMapPartition";
+    break;
+  default:
+    ndbout << "Unknown(" << (unsigned) fragtype << ")";
+  }
+  return ndbout;
+}
+
+NdbOut& operator <<(NdbOut& ndbout, NdbDictionary::Object::Type const type)
+{
+  switch (type)
+  {
+  case NdbDictionary::Object::TypeUndefined:
+    ndbout << "Undefined";
+    break;
+  case NdbDictionary::Object::SystemTable:
+    ndbout << "SystemTable";
+    break;
+  case NdbDictionary::Object::UserTable:
+    ndbout << "UserTable";
+    break;
+  case NdbDictionary::Object::UniqueHashIndex:
+    ndbout << "UniqueHashIndex";
+    break;
+  case NdbDictionary::Object::OrderedIndex:
+    ndbout << "OrderedIndex";
+    break;
+  case NdbDictionary::Object::HashIndexTrigger:
+    ndbout << "HashIndexTrigger";
+    break;
+  case NdbDictionary::Object::IndexTrigger:
+    ndbout << "IndexTrigger";
+    break;
+  case NdbDictionary::Object::SubscriptionTrigger:
+    ndbout << "SubscriptionTrigger";
+    break;
+  case NdbDictionary::Object::ReadOnlyConstraint:
+    ndbout << "ReadOnlyConstraint";
+    break;
+  case NdbDictionary::Object::TableEvent:
+    ndbout << "TableEvent";
+    break;
+  case NdbDictionary::Object::Tablespace:
+    ndbout << "Tablespace";
+    break;
+  case NdbDictionary::Object::LogfileGroup:
+    ndbout << "LogfileGroup";
+    break;
+  case NdbDictionary::Object::Datafile:
+    ndbout << "Datafile";
+    break;
+  case NdbDictionary::Object::Undofile:
+    ndbout << "Undofile";
+    break;
+  case NdbDictionary::Object::ReorgTrigger:
+    ndbout << "ReorgTrigger";
+    break;
+  case NdbDictionary::Object::HashMap:
+    ndbout << "HashMap";
+    break;
+  default:
+    ndbout << "Type " << (unsigned) type;
+  }
+  return ndbout;
+}
+
+NdbOut& operator <<(NdbOut& ndbout, NdbDictionary::Index::Type const type)
+{
+  switch (type)
+  {
+  case NdbDictionary::Index::Undefined:
+    ndbout << "Undefined";
+    break;
+  case NdbDictionary::Index::UniqueHashIndex:
+    ndbout << "UniqueHashIndex";
+    break;
+  case NdbDictionary::Index::OrderedIndex:
+    ndbout << "OrderedIndex";
+    break;
+  default:
+    ndbout << "Type " << (unsigned) type;
+  }
+  return ndbout;
+}
+
+NdbOut& operator <<(NdbOut& ndbout, NdbDictionary::Object::Status const status)
+{
+  switch (status)
+  {
+  case NdbDictionary::Object::New:
+    ndbout << "New";
+    break;
+  case NdbDictionary::Object::Changed:
+    ndbout << "Changed";
+    break;
+  case NdbDictionary::Object::Retrieved:
+    ndbout << "Retrieved";
+    break;
+  case NdbDictionary::Object::Invalid:
+    ndbout << "Invalid";
+    break;
+  case NdbDictionary::Object::Altered:
+    ndbout << "Altered";
+    break;
+  default:
+    ndbout << "Undefined(" << (unsigned) status << ")";
+  }
+  return ndbout;
+}
+
+NdbOut& operator <<(NdbOut& ndbout, NdbDictionary::Index const& idx)
+{
+  ndbout << "Version: " << idx.getObjectVersion() << endl;
+  ndbout << "Base table: " << idx.getTable() << endl;
+  ndbout << "Number of attributes: " << idx.getNoOfColumns() << endl;
+  ndbout << "Logging: " << idx.getLogging() << endl;
+  ndbout << "Index type: " << idx.getType() << endl;
+  ndbout << "Index status: " << idx.getObjectStatus() << endl;
+
+  return ndbout;
+}
+
+void NdbDictionary::Dictionary::print(NdbOut& ndbout, NdbDictionary::Index const& idx)
+{
+  ndbout << idx;
+
+  ndbout << "-- Attributes --" << endl;
+  for (unsigned col = 0; col < idx.getNoOfColumns() ; col++)
+  {
+    ndbout << *idx.getColumn(col) << endl;
+  }
+
+  Table const& indexTable = *NdbIndexImpl::getImpl(idx).getIndexTable();
+  ndbout << "-- IndexTable " << indexTable.getName() << " --" << endl;
+
+  print(ndbout, indexTable);
+}
+
+NdbOut& operator <<(class NdbOut&, NdbDictionary::Table const& tab)
+{
+  ndbout << "Version: " <<  tab.getObjectVersion() << endl;
+  ndbout << "Fragment type: " <<  tab.getFragmentType() << endl;
+  ndbout << "K Value: " <<  tab.getKValue()<< endl;
+  ndbout << "Min load factor: " <<  tab.getMinLoadFactor()<< endl;
+  ndbout << "Max load factor: " <<  tab.getMaxLoadFactor()<< endl;
+  ndbout << "Temporary table: " <<  (tab.getStoredTable() ? "no" : "yes") << endl;
+  ndbout << "Number of attributes: " <<  tab.getNoOfColumns() << endl;
+  ndbout << "Number of primary keys: " <<  tab.getNoOfPrimaryKeys() << endl;
+  ndbout << "Length of frm data: " << tab.getFrmLength() << endl;
+  ndbout << "Row Checksum: " << tab.getRowChecksumIndicator() << endl;
+  ndbout << "Row GCI: " << tab.getRowGCIIndicator() << endl;
+  ndbout << "SingleUserMode: " << (Uint32) tab.getSingleUserMode() << endl;
+  ndbout << "ForceVarPart: " << tab.getForceVarPart() << endl;
+  ndbout << "FragmentCount: " << tab.getFragmentCount() << endl;
+  ndbout << "ExtraRowGciBits: " << tab.getExtraRowGciBits() << endl;
+  ndbout << "ExtraRowAuthorBits: " << tab.getExtraRowAuthorBits() << endl;
+  ndbout << "TableStatus: " << tab.getObjectStatus() << endl;
+  return ndbout;
+}
+
+void NdbDictionary::Dictionary::print(NdbOut& ndbout, NdbDictionary::Table const& tab)
+{
+  ndbout << tab;
+
+  HashMap hashmap;
+  if (getHashMap(hashmap, &tab) != -1)
+  {
+    ndbout << "HashMap: " << hashmap.getName() << endl;
+  }
+
+  ndbout << "-- Attributes --" << endl;
+  for (int col = 0; col < tab.getNoOfColumns() ; col++)
+  {
+    ndbout << *tab.getColumn(col) << endl;
+  }
+
+  ndbout << "-- Indexes -- " << endl;
+  ndbout << "PRIMARY KEY(";
+  unsigned j;
+  for (j= 0; (int)j < tab.getNoOfPrimaryKeys(); j++)
+  {
+    const Column * col= tab.getColumn(tab.getPrimaryKey(j));
+    ndbout << col->getName();
+    if ((int)j < tab.getNoOfPrimaryKeys()-1)
+      ndbout << ", ";
+  }
+  ndbout << ") - UniqueHashIndex" << endl;
+
+  List list;
+  if (listIndexes(list, tab) == 0)
+  {
+    for (j= 0; j < list.count; j++) {
+      List::Element& elt = list.elements[j];
+      const Index *pIdx = getIndex(elt.name, tab);
+      if (!pIdx)
+      {
+#ifdef VM_TRACE
+        assert(false);
+#endif
+        continue;
+      }
+
+      ndbout << pIdx->getName();
+      ndbout << "(";
+      unsigned noOfAttributes = pIdx->getNoOfColumns();
+      for (unsigned i = 0; i < noOfAttributes; i++)
+      {
+        const Column *col = pIdx->getColumn(i);
+        ndbout << col->getName();
+        if (i < noOfAttributes - 1)
+          ndbout << ", ";
+      }
+      ndbout << ")";
+      ndbout << " - " << pIdx->getType();
+      ndbout << endl;
+    }
+  }
+#ifdef VM_TRACE
+  else assert(false);
+#endif
+}
+

=== modified file 'storage/ndb/test/include/NDBT_Table.hpp'
--- a/storage/ndb/test/include/NDBT_Table.hpp	2011-10-22 09:47:36 +0000
+++ b/storage/ndb/test/include/NDBT_Table.hpp	2012-09-21 12:25:04 +0000
@@ -58,7 +58,6 @@ class NDBT_Table : public NdbDictionary:
    * Print meta information about table 
    * (information on how it is strored, what the attributes look like etc.)
    */
-  friend class NdbOut& operator <<(class NdbOut&, const NDBT_Table &);
 public: 
   
   NDBT_Table(const char* name, 
@@ -107,13 +106,4 @@ NDBT_Table::discoverTableFromDb(Ndb* ndb
   return ndb->getDictionary()->getTable(name);
 }
 
-
-/**
- * Print meta information about index
- * (information on how it is strored, what the attributes look like etc.)
- */
-class NdbOut& operator <<(class NdbOut&, const NdbDictionary::Index &);
-
-
-
 #endif

=== modified file 'storage/ndb/test/ndbapi/testDict.cpp'
--- a/storage/ndb/test/ndbapi/testDict.cpp	2012-08-30 08:40:49 +0000
+++ b/storage/ndb/test/ndbapi/testDict.cpp	2012-09-21 12:34:28 +0000
@@ -9174,6 +9174,184 @@ runIndexStatCreate(NDBT_Context* ctx, ND
   return NDBT_OK;
 }
 
+int
+getOrCreateDefaultHashMap(NdbDictionary::Dictionary& dict, NdbDictionary::HashMap& hm, Uint32 buckets, Uint32 fragments)
+{
+  if (dict.getDefaultHashMap(hm, buckets, fragments) == 0)
+  {
+    return 0;
+  }
+
+  dict.initDefaultHashMap(hm, buckets, fragments);
+  if (dict.createHashMap(hm, NULL) == -1)
+  {
+    return -1;
+  }
+
+  if (dict.getDefaultHashMap(hm, buckets, fragments) == 0)
+  {
+    return 0;
+  }
+
+  return -1;
+}
+
+struct Bug14645319_createTable_args
+{
+  char const* template_name;
+  char const* name;
+  Uint32 buckets;
+  Uint32 fragments;
+};
+
+int Bug14645319_createTable(Ndb* pNdb, NdbDictionary::Table& tab, int when,
+                                    void* arg)
+{
+  Bug14645319_createTable_args& args = *static_cast<Bug14645319_createTable_args*>(arg);
+  NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
+  if (when == 0)
+  {
+    tab.setName(args.name);
+    tab.setFragmentCount(args.fragments);
+    if (args.fragments == 0)
+    {
+      tab.setFragmentData(0, 0);
+    }
+    NdbDictionary::HashMap hm;
+    getOrCreateDefaultHashMap(*pDic, hm, args.buckets, args.fragments);
+    tab.setHashMap(hm);
+  }
+  return 0;
+}
+
+int
+runBug14645319(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* pNdb = GETNDB(step);
+  NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
+  int failures = 0;
+
+  struct test_case {
+    char const* description;
+    int old_fragments;
+    int old_buckets;
+    int new_fragments;
+    int new_buckets;
+    int expected_buckets;
+  };
+
+  STATIC_ASSERT(NDB_DEFAULT_HASHMAP_BUCKETS % 240 == 0);
+  STATIC_ASSERT(NDB_DEFAULT_HASHMAP_BUCKETS % 260 != 0);
+  test_case test_cases[] = {
+    { "Simulate online reorg, may or may not change hashmap depending on default fragment count",
+      3, 120, 0, NDB_DEFAULT_HASHMAP_BUCKETS, 0 },
+    { "Keep old hashmap since no new fragments",
+      3, 120, 3, NDB_DEFAULT_HASHMAP_BUCKETS, 120 },
+    { "Keep old hashmap size since old size a multiple of new fragment count",
+      3, 120, 6, NDB_DEFAULT_HASHMAP_BUCKETS, 120 },
+    { "Keep old hashmap size since new size not a multiple of old",
+      3, 130, 6, NDB_DEFAULT_HASHMAP_BUCKETS, 130 },
+    { "Extend hashmap",
+      3, 120, 7, NDB_DEFAULT_HASHMAP_BUCKETS, NDB_DEFAULT_HASHMAP_BUCKETS },
+    { "Keep old hashmap size since old size not multiple of old fragment count",
+      7, 120, 10, 60, 120 },
+    { "Shrink hashmap",
+      3, 120, 6, 60, 60 },
+  };
+
+  Bug14645319_createTable_args args;
+  args.template_name = ctx->getTab()->getName();
+  args.name = "Bug14645319";
+
+  for (size_t testi = 0; testi < NDB_ARRAY_SIZE(test_cases); testi++)
+  {
+    test_case const& test = test_cases[testi];
+    int result = NDBT_FAILED;
+
+    int old_fragments = 0;
+    int old_buckets = 0;
+    int new_fragments = 0;
+    int new_buckets = 0;
+
+    do {
+      /* setup old table */
+      args.buckets = test.old_buckets;
+      args.fragments = test.old_fragments;
+      result = NDBT_Tables::createTable(pNdb, args.template_name, false, false, Bug14645319_createTable, &args);
+      if (result != 0) break;
+
+      NdbDictionary::Table const& old_tab = *pDic->getTable(args.name);
+
+      /* check old table properties */
+      NdbDictionary::HashMap old_hm;
+      result = pDic->getHashMap(old_hm, &old_tab);
+      if (result != 0) break;
+
+      old_fragments = old_tab.getFragmentCount();
+      old_buckets = old_hm.getMapLen();
+      if (old_fragments != test.old_fragments)
+      {
+        result = NDBT_FAILED;
+        break;
+      }
+      if (old_buckets != test.old_buckets)
+      {
+        result = NDBT_FAILED;
+        break;
+      }
+
+      /* alter table */
+      NdbDictionary::Table new_tab = old_tab;
+      new_tab.setFragmentCount(test.new_fragments);
+      if (test.new_fragments == 0)
+        new_tab.setFragmentData(0, 0);
+
+      result = pDic->beginSchemaTrans();
+      if (result != 0) break;
+
+      result = pDic->prepareHashMap(old_tab, new_tab, test.new_buckets);
+
+      result |= pDic->endSchemaTrans();
+      if (result != 0) break;
+
+      result = pDic->alterTable(old_tab, new_tab);
+      if (result != 0) break;
+
+      /* check */
+      NdbDictionary::HashMap new_hm;
+      result = pDic->getHashMap(new_hm, &new_tab);
+      if (result != 0) break;
+
+      new_fragments = new_tab.getFragmentCount();
+      new_buckets = new_hm.getMapLen();
+
+      if (test.expected_buckets > 0 &&
+          new_buckets != test.expected_buckets)
+      {
+        result = NDBT_FAILED;
+        break;
+      }
+      result = 0;
+    } while (false);
+
+    result |= pDic->dropTable(args.name);
+
+    if (result == 0)
+    {
+      ndbout << "Test#" << (testi + 1) << " '" << test_cases[testi].description << "' passed" <<
+        " (" << old_buckets << " => " << test_cases[testi].new_buckets << " => " << test_cases[testi].expected_buckets << ")" << endl;
+    }
+    else
+    {
+      ndbout << "Test#" << (testi + 1) << " '" << test_cases[testi].description << "' failed" <<
+        " (" << old_buckets << " => " << test_cases[testi].new_buckets << " => " << new_buckets << " expected: " << test_cases[testi].expected_buckets << ")" << endl;
+      failures++;
+    }
+  }
+
+  return failures > 0 ? NDBT_FAILED : NDBT_OK;
+}
+
 NDBT_TESTSUITE(testDict);
 TESTCASE("testDropDDObjects",
          "* 1. start cluster\n"
@@ -9488,6 +9666,10 @@ TESTCASE("IndexStatCreate", "")
 {
   STEPS(runIndexStatCreate, 10);
 }
+TESTCASE("Bug14645319", "")
+{
+  STEP(runBug14645319);
+}
 NDBT_TESTSUITE_END(testDict);
 
 int main(int argc, const char** argv){

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2012-09-13 08:05:29 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2012-09-21 12:26:26 +0000
@@ -1875,3 +1875,7 @@ max-time : 300
 cmd: testScan
 args: -n ScanKeyInfoExhaust T1
 
+max-time : 300
+cmd: testDict
+args: -n Bug14645319 T1
+

=== modified file 'storage/ndb/test/src/NDBT_Table.cpp'
--- a/storage/ndb/test/src/NDBT_Table.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/test/src/NDBT_Table.cpp	2012-09-21 12:25:04 +0000
@@ -19,78 +19,11 @@
 #include <NdbTimer.hpp>
 #include <NDBT.hpp>
 
-class NdbOut& 
+class NdbOut&
 operator <<(class NdbOut& ndbout, const NDBT_Table & tab)
 {
   ndbout << "-- " << tab.getName() << " --" << endl;
-  
-  ndbout << "Version: " <<  tab.getObjectVersion() << endl; 
-  ndbout << "Fragment type: " <<  (unsigned) tab.getFragmentType() << endl; 
-  ndbout << "K Value: " <<  tab.getKValue()<< endl; 
-  ndbout << "Min load factor: " <<  tab.getMinLoadFactor()<< endl;
-  ndbout << "Max load factor: " <<  tab.getMaxLoadFactor()<< endl; 
-  ndbout << "Temporary table: " <<  (tab.getStoredTable() ? "no" : "yes") << endl;
-  ndbout << "Number of attributes: " <<  tab.getNoOfColumns() << endl;
-  ndbout << "Number of primary keys: " <<  tab.getNoOfPrimaryKeys() << endl;
-  ndbout << "Length of frm data: " << tab.getFrmLength() << endl;
-  ndbout << "Row Checksum: " << tab.getRowChecksumIndicator() << endl;
-  ndbout << "Row GCI: " << tab.getRowGCIIndicator() << endl;
-  ndbout << "SingleUserMode: " << (Uint32) tab.getSingleUserMode() << endl;
-  ndbout << "ForceVarPart: " << tab.getForceVarPart() << endl;
-  ndbout << "FragmentCount: " << tab.getFragmentCount() << endl;
-  ndbout << "ExtraRowGciBits: " << tab.getExtraRowGciBits() << endl;
-  ndbout << "ExtraRowAuthorBits: " << tab.getExtraRowAuthorBits() << endl;
-
-  //<< ((tab.getTupleKey() == TupleId) ? " tupleid" : "") <<endl;
-  ndbout << "TableStatus: ";
-  switch(tab.getObjectStatus()){
-  case NdbDictionary::Object::New:
-    ndbout << "New" << endl;
-    break;
-  case NdbDictionary::Object::Changed:
-    ndbout << "Changed" << endl;
-    break;
-  case NdbDictionary::Object::Retrieved:
-    ndbout << "Retrieved" << endl;
-    break;
-  default:
-    ndbout << "Unknown(" << (unsigned) tab.getObjectStatus() << ")" << endl;
-  }
-  
-  ndbout << "-- Attributes -- " << endl;
-  int noOfAttributes = tab.getNoOfColumns();
-  for(int i = 0; i<noOfAttributes; i++){
-    ndbout << (* (const NDBT_Attribute*)tab.getColumn(i)) << endl;
-  }
+  ndbout << NdbDictionary::Table(tab);
   
   return ndbout;
 }
-
-class NdbOut& operator <<(class NdbOut&, const NdbDictionary::Index & idx)
-{
-  ndbout << idx.getName();
-  ndbout << "(";
-  for (unsigned i=0; i < idx.getNoOfColumns(); i++)
-  {
-    const NdbDictionary::Column *col = idx.getColumn(i);
-    ndbout << col->getName();
-    if (i < idx.getNoOfColumns()-1)
-      ndbout << ", ";
-  }
-  ndbout << ")";
-  
-  ndbout << " - ";
-  switch (idx.getType()) {
-  case NdbDictionary::Object::UniqueHashIndex:
-    ndbout << "UniqueHashIndex";
-    break;
-  case NdbDictionary::Object::OrderedIndex:
-    ndbout << "OrderedIndex";
-    break;
-  default:
-    ndbout << "Type " << (unsigned) idx.getType();
-    break;
-  }
-  return ndbout;
-}
-

=== modified file 'storage/ndb/tools/desc.cpp'
--- a/storage/ndb/tools/desc.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/tools/desc.cpp	2012-09-21 12:25:04 +0000
@@ -26,10 +26,12 @@ int desc_logfilegroup(Ndb *myndb, char*
 int desc_undofile(Ndb_cluster_connection &con, Ndb *myndb, char* name);
 int desc_datafile(Ndb_cluster_connection &con, Ndb *myndb, char* name);
 int desc_tablespace(Ndb *myndb,char* name);
+int desc_index(Ndb *myndb, char* name);
 int desc_table(Ndb *myndb,char* name);
 int desc_hashmap(Ndb_cluster_connection &con, Ndb *myndb, char* name);
 
 static const char* _dbname = "TEST_DB";
+static const char* _tblname = NULL;
 static int _unqualified = 0;
 static int _partinfo = 0;
 static int _blobinfo = 0;
@@ -60,6 +62,9 @@ static struct my_option my_long_options[
   { "extra-node-info", 'n', "Print node info for partitions (requires -p)",
     (uchar**) &_nodeinfo, (uchar**) &_nodeinfo, 0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "table", 't', "Base table for index",
+    (uchar**) &_tblname, (uchar**) &_tblname, 0,
+    GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 
@@ -73,7 +78,7 @@ static void usage()
   ndb_usage(short_usage_sub, load_default_groups, my_long_options);
 }
 
-static void print_part_info(Ndb* pNdb, NDBT_Table* pTab);
+static void print_part_info(Ndb* pNdb, NdbDictionary::Table const* pTab);
 
 int main(int argc, char** argv){
   NDB_INIT(argv[0]);
@@ -109,7 +114,9 @@ int main(int argc, char** argv){
 
   for(int i= 0; i<argc;i++)
   {
-    if(desc_table(&MyNdb,argv[i]))
+    if (desc_index(&MyNdb, argv[i]))
+      ;
+    else if(desc_table(&MyNdb, argv[i]))
       ;
     else if(desc_tablespace(&MyNdb,argv[i]))
       ;
@@ -248,44 +255,36 @@ int desc_datafile(Ndb_cluster_connection
   return 1;
 }
 
-int desc_table(Ndb *myndb, char* name)
+int desc_index(Ndb *myndb, char* name)
 {
   NdbDictionary::Dictionary * dict= myndb->getDictionary();
-  NDBT_Table* pTab;
-  while ((pTab = (NDBT_Table*)dict->getTable(name)) == NULL && --_retries >= 0) NdbSleep_SecSleep(1);
-  if (!pTab)
+  NdbDictionary::Index const* pIndex;
+
+  /* need to know base table */
+  if (_tblname == NULL)
     return 0;
 
-  ndbout << (* pTab) << endl;
+  while ((pIndex = dict->getIndex(name, _tblname)) == NULL && --_retries >= 0)
+    NdbSleep_SecSleep(1);
+  if (pIndex == NULL)
+    return 0;
 
-  NdbDictionary::Dictionary::List list;
-  if (dict->listIndexes(list, name) != 0){
-    ndbout << name << ": " << dict->getNdbError() << endl;
-    return NDBT_ProgramExit(NDBT_FAILED);
-  }
+  ndbout << "-- " << pIndex->getName() << " --" << endl;
+  dict->print(ndbout, *pIndex);
 
-  ndbout << "-- Indexes -- " << endl;
-  ndbout << "PRIMARY KEY(";
-  unsigned j;
-  for (j= 0; (int)j < pTab->getNoOfPrimaryKeys(); j++)
-  {
-    const NdbDictionary::Column * col= pTab->getColumn(pTab->getPrimaryKey(j));
-    ndbout << col->getName();
-    if ((int)j < pTab->getNoOfPrimaryKeys()-1)
-      ndbout << ", ";
-  }
-  ndbout << ") - UniqueHashIndex" << endl;
-  for (j= 0; j < list.count; j++) {
-    NdbDictionary::Dictionary::List::Element& elt = list.elements[j];
-    const NdbDictionary::Index *pIdx = dict->getIndex(elt.name, name);
-    if (!pIdx){
-      ndbout << name << ": " << dict->getNdbError() << endl;
-      return NDBT_ProgramExit(NDBT_FAILED);
-    }
+  return 1;
+}
 
-    ndbout << (*pIdx) << endl;
-  }
-  ndbout << endl;
+int desc_table(Ndb *myndb, char* name)
+{
+  NdbDictionary::Dictionary * dict= myndb->getDictionary();
+  NdbDictionary::Table const* pTab;
+  while ((pTab = dict->getTable(name)) == NULL && --_retries >= 0) NdbSleep_SecSleep(1);
+  if (!pTab)
+    return 0;
+
+  ndbout << "-- " << pTab->getName() << " --" << endl;
+  dict->print(ndbout, *pTab);
 
   if (_partinfo)
   {
@@ -319,7 +318,7 @@ struct InfoInfo
 
 
 static 
-void print_part_info(Ndb* pNdb, NDBT_Table* pTab)
+void print_part_info(Ndb* pNdb, NdbDictionary::Table const* pTab)
 {
   InfoInfo g_part_info[] = {
     { "Partition", 0, NdbDictionary::Column::FRAGMENT },

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0 branch (mauritz.sundell:4984 to 4987) Mauritz Sundell21 Sep